From bc89e777e4e0d52e91cdcb22f1234ab67786a943 Mon Sep 17 00:00:00 2001 From: Your Name Date: Sat, 28 Jun 2025 19:29:18 -0700 Subject: [PATCH 1/5] fix: Update v0 server code extraction to handle multiple files and markdown artifacts --- .../example_structured_args.md | 0 {.claude/commands => .archive}/mcp_add.md | 0 .../mcp_build_advanced.md | 0 {.claude/commands => .archive}/mcp_client.md | 0 {.claude/commands => .archive}/mcp_convert.md | 0 {.claude/commands => .archive}/test.md | 0 .../commands => .archive}/todo_protocol.md | 0 .claude/{commands => }/DB-QUICK-REFERENCE.md | 0 .claude/commands/todo_guide | 61 ++ .claude/{commands => }/mcp_client_advanced.md | 0 .claude/{commands => }/project_kickoff_pro.md | 0 .env.example | 91 +- components/ButtonDemo.tsx | 286 ------ .../http/vercel-v0-mcp/components.json | 17 + mcp-servers/http/vercel-v0-mcp/lib/utils.ts | 6 + mcp-servers/http/vercel-v0-mcp/package.json | 32 + .../http/vercel-v0-mcp/postcss.config.js | 6 + .../vercel-v0-mcp/src/vercel_v0_server.py | 120 ++- .../http/vercel-v0-mcp/tailwind.config.js | 77 ++ mcp-servers/http/vercel-v0-mcp/tsconfig.json | 41 + .../src/anthropic_comprehensive_server.py | 560 +++++++++++ servers/github-http-mcp/src/github_server.py | 68 +- .../src/openai_tools_server.py | 406 ++++++++ .../routing-http-mcp/src/routing_server.py | 932 ++++++++++++++++++ servers/start_all_http_servers.sh | 112 +++ .../supabase-http-mcp/src/supabase_server.py | 108 +- 26 files changed, 2525 insertions(+), 398 deletions(-) rename {.claude/commands => .archive}/example_structured_args.md (100%) rename {.claude/commands => .archive}/mcp_add.md (100%) rename {.claude/commands => .archive}/mcp_build_advanced.md (100%) rename {.claude/commands => .archive}/mcp_client.md (100%) rename {.claude/commands => .archive}/mcp_convert.md (100%) rename {.claude/commands => .archive}/test.md (100%) rename {.claude/commands => .archive}/todo_protocol.md (100%) rename .claude/{commands => }/DB-QUICK-REFERENCE.md (100%) create mode 100644 .claude/commands/todo_guide rename .claude/{commands => }/mcp_client_advanced.md (100%) rename .claude/{commands => }/project_kickoff_pro.md (100%) delete mode 100644 components/ButtonDemo.tsx create mode 100644 mcp-servers/http/vercel-v0-mcp/components.json create mode 100644 mcp-servers/http/vercel-v0-mcp/lib/utils.ts create mode 100644 mcp-servers/http/vercel-v0-mcp/package.json create mode 100644 mcp-servers/http/vercel-v0-mcp/postcss.config.js create mode 100644 mcp-servers/http/vercel-v0-mcp/tailwind.config.js create mode 100644 mcp-servers/http/vercel-v0-mcp/tsconfig.json create mode 100644 servers/anthropic-comprehensive-http-mcp/src/anthropic_comprehensive_server.py create mode 100644 servers/openai-tools-http-mcp/src/openai_tools_server.py create mode 100644 servers/routing-http-mcp/src/routing_server.py create mode 100755 servers/start_all_http_servers.sh diff --git a/.claude/commands/example_structured_args.md b/.archive/example_structured_args.md similarity index 100% rename from .claude/commands/example_structured_args.md rename to .archive/example_structured_args.md diff --git a/.claude/commands/mcp_add.md b/.archive/mcp_add.md similarity index 100% rename from .claude/commands/mcp_add.md rename to .archive/mcp_add.md diff --git a/.claude/commands/mcp_build_advanced.md b/.archive/mcp_build_advanced.md similarity index 100% rename from .claude/commands/mcp_build_advanced.md rename to .archive/mcp_build_advanced.md diff --git a/.claude/commands/mcp_client.md b/.archive/mcp_client.md similarity index 100% rename from .claude/commands/mcp_client.md rename to .archive/mcp_client.md diff --git a/.claude/commands/mcp_convert.md b/.archive/mcp_convert.md similarity index 100% rename from .claude/commands/mcp_convert.md rename to .archive/mcp_convert.md diff --git a/.claude/commands/test.md b/.archive/test.md similarity index 100% rename from .claude/commands/test.md rename to .archive/test.md diff --git a/.claude/commands/todo_protocol.md b/.archive/todo_protocol.md similarity index 100% rename from .claude/commands/todo_protocol.md rename to .archive/todo_protocol.md diff --git a/.claude/commands/DB-QUICK-REFERENCE.md b/.claude/DB-QUICK-REFERENCE.md similarity index 100% rename from .claude/commands/DB-QUICK-REFERENCE.md rename to .claude/DB-QUICK-REFERENCE.md diff --git a/.claude/commands/todo_guide b/.claude/commands/todo_guide new file mode 100644 index 0000000..fa4a4bc --- /dev/null +++ b/.claude/commands/todo_guide @@ -0,0 +1,61 @@ +# Claude Code Todo System Guide + +## How Todos Actually Work + +The todo system in Claude Code stores todo lists as JSON files with UUID names in: +``` +/home/gotime2022/.claude/todos/ +``` + +Each session gets a unique UUID, for example: +- `/home/gotime2022/.claude/todos/a64ca605-74cc-4915-afa3-19ac8c315b41.json` + +## The Problem + +1. Each new Claude Code session creates a NEW todo file +2. Previous session todos are saved but not loaded +3. This creates the illusion that todos don't persist + +## The Solution + +To access todos from previous sessions: + +1. List recent todo files: +```bash +ls -t /home/gotime2022/.claude/todos/*.json | head -10 +``` + +2. Read a specific todo file: +``` +Read(/home/gotime2022/.claude/todos/[UUID].json) +``` + +3. Find non-empty todo files: +```bash +for f in $(ls -t /home/gotime2022/.claude/todos/*.json | head -20); do + size=$(wc -c < "$f") + if [ $size -gt 10 ]; then + echo "File: $f (size: $size)" + cat "$f" | head -5 + fi +done +``` + +## Permissions Required + +Add these to `.claude/settings.json`: +```json +"Read(/home/gotime2022/.claude/todos/*.json)", +"TodoRead()", +"TodoWrite(*)" +``` + +## Current Session Todo Commands + +- `TodoRead()` - Read current session's todos +- `TodoWrite(todos)` - Update current session's todos +- `/todo` - Display todos in interactive mode + +## The Real Issue + +The todo system works but lacks session continuity. Each session is isolated, which breaks the workflow when you need to continue work from a previous session. \ No newline at end of file diff --git a/.claude/commands/mcp_client_advanced.md b/.claude/mcp_client_advanced.md similarity index 100% rename from .claude/commands/mcp_client_advanced.md rename to .claude/mcp_client_advanced.md diff --git a/.claude/commands/project_kickoff_pro.md b/.claude/project_kickoff_pro.md similarity index 100% rename from .claude/commands/project_kickoff_pro.md rename to .claude/project_kickoff_pro.md diff --git a/.env.example b/.env.example index 761dd35..8b998a2 100644 --- a/.env.example +++ b/.env.example @@ -1,46 +1,67 @@ -# DevLoopAI Environment Variables +# MCP Server Environment Variables # Copy this file to .env and fill in your actual values -# NEVER commit the .env file to version control -# API Keys - REQUIRED -ANTHROPIC_API_KEY=your_anthropic_api_key_here -OPENAI_API_KEY=your_openai_api_key_here -GEMINI_API_KEY=your_gemini_api_key_here +# GitHub MCP Server +GITHUB_TOKEN=your_github_personal_access_token -# Supabase Configuration - REQUIRED -SUPABASE_URL=your_supabase_project_url -SUPABASE_ANON_KEY=your_supabase_anon_key -SUPABASE_SERVICE_ROLE_KEY=your_supabase_service_role_key +# OpenAI Tools MCP Server +OPENAI_API_KEY=your_openai_api_key -# GitHub Configuration - OPTIONAL -GITHUB_TOKEN=your_github_personal_access_token -GITHUB_ORG=your_github_organization +# Anthropic Comprehensive MCP Server +ANTHROPIC_API_KEY=your_anthropic_api_key -# Slack Configuration - OPTIONAL -SLACK_BOT_TOKEN=your_slack_bot_token -SLACK_APP_TOKEN=your_slack_app_token +# Supabase MCP Server +SUPABASE_URL=https://your-project.supabase.co +SUPABASE_SERVICE_KEY=your_supabase_service_key +SUPABASE_ACCESS_TOKEN=your_supabase_access_token + +# Vercel v0 MCP Server (for AI component generation) +V0_API_KEY=your_v0_api_key -# Redis Configuration - OPTIONAL -REDIS_URL=redis://localhost:6379 +# Vercel Deploy MCP Server (for deployments) +VERCEL_TOKEN=your_vercel_deploy_token + +# Gemini MCP Server +GEMINI_API_KEY=your_gemini_api_key + +# Slack MCP Server +SLACK_BOT_TOKEN=your_slack_bot_token +SLACK_USER_TOKEN=your_slack_user_token -# Application Configuration -NODE_ENV=development -NEXT_PUBLIC_API_URL=http://localhost:8080 -API_PORT=8080 -FRONTEND_PORT=3001 +# Redis MCP Server +REDIS_HOST=localhost +REDIS_PORT=6379 +REDIS_PASSWORD=optional_redis_password -# Security -JWT_SECRET=generate_a_strong_random_string_here -SESSION_SECRET=generate_another_strong_random_string_here +# Context7 MCP Server +CONTEXT7_API_KEY=your_context7_api_key -# Feature Flags -ENABLE_MCP_SERVERS=true -ENABLE_PARALLEL_EXECUTION=true -ENABLE_AUTO_SAVE=true +# Browserbase MCP Server +BROWSERBASE_API_KEY=your_browserbase_api_key +BROWSERBASE_PROJECT_ID=your_browserbase_project_id -# Logging -LOG_LEVEL=info -LOG_FORMAT=json +# Hostinger MCP Server +HOSTINGER_API_KEY=your_hostinger_api_key -# Database -DATABASE_URL=postgresql://user:password@localhost:5432/devloop \ No newline at end of file +# MCP Server Ports (defaults shown) +BRAVE_SEARCH_MCP_PORT=8003 +FILESYSTEM_MCP_PORT=8006 +MEMORY_MCP_PORT=8007 +VERCEL_V0_MCP_PORT=8010 +GITHUB_MCP_PORT=8011 +OPENAI_TOOLS_MCP_PORT=8012 +SUPABASE_MCP_PORT=8013 +GEMINI_MCP_PORT=8014 +ANTHROPIC_MCP_PORT=8015 +SEQUENTIAL_THINKING_MCP_PORT=8016 +SLACK_MCP_PORT=8017 +REDIS_MCP_PORT=8018 +CONTEXT7_MCP_PORT=8019 +DOCKER_MCP_PORT=8020 +EVERYTHING_MCP_PORT=8021 +FETCH_MCP_PORT=8022 +BROWSERBASE_MCP_PORT=8023 +HOSTINGER_MCP_PORT=8024 +VERCEL_DEPLOY_MCP_PORT=8025 +ROUTING_MCP_PORT=8026 +EOF < /dev/null diff --git a/components/ButtonDemo.tsx b/components/ButtonDemo.tsx deleted file mode 100644 index d5cb74f..0000000 --- a/components/ButtonDemo.tsx +++ /dev/null @@ -1,286 +0,0 @@ - create a modern React button component with hover effects and click animations using TypeScript and Tailwind CSS (which shadcn/ui uses). Here's a production-ready implementation: - -```tsx file="components/animated-button.tsx" -'use client' - -import React, { forwardRef, useState } from 'react' -import { cn } from '@/lib/utils' - -// Define button variants for different styles -const buttonVariants = { - default: 'bg-primary text-primary-foreground hover:bg-primary/90', - destructive: 'bg-destructive text-destructive-foreground hover:bg-destructive/90', - outline: 'border border-input bg-background hover:bg-accent hover:text-accent-foreground', - secondary: 'bg-secondary text-secondary-foreground hover:bg-secondary/80', - ghost: 'hover:bg-accent hover:text-accent-foreground', - link: 'text-primary underline-offset-4 hover:underline' -} - -// Define button sizes -const buttonSizes = { - default: 'h-10 px-4 py-2', - sm: 'h-9 rounded-md px-3', - lg: 'h-11 rounded-md px-8', - icon: 'h-10 w-10' -} - -// TypeScript interface for button props -interface AnimatedButtonProps extends React.ButtonHTMLAttributes { - variant?: keyof typeof buttonVariants - size?: keyof typeof buttonSizes - asChild?: boolean - children: React.ReactNode - loading?: boolean -} - -/** - * AnimatedButton Component - * - * A modern button component with hover effects and click animations. - * Includes accessibility features and responsive design. - */ -const AnimatedButton = forwardRef( - ({ - className, - variant = 'default', - size = 'default', - children, - loading = false, - disabled, - onClick, - ...props - }, ref) => { - // State to track if button is being pressed for click animation - const [isPressed, setIsPressed] = useState(false) - - /** - * Handle mouse down event for click animation - * Creates a pressed state for visual feedback - */ - const handleMouseDown = () => { - setIsPressed(true) - } - - /** - * Handle mouse up event to reset pressed state - */ - const handleMouseUp = () => { - setIsPressed(false) - } - - /** - * Handle mouse leave to ensure pressed state is reset - * if user drags mouse away while pressing - */ - const handleMouseLeave = () => { - setIsPressed(false) - } - - /** - * Enhanced click handler that includes haptic feedback simulation - */ - const handleClick = (event: React.MouseEvent) => { - // Reset pressed state - setIsPressed(false) - - // Call original onClick if provided and not disabled/loading - if (onClick && !disabled && !loading) { - onClick(event) - } - } - - return ( - - ) - } -) - -// Display name for debugging -AnimatedButton.displayName = 'AnimatedButton' - -export default AnimatedButton -``` - -```tsx file="components/button-demo.tsx" -'use client' - -import AnimatedButton from './animated-button' -import { useState } from 'react' - -/** - * Demo component showcasing different button variants and states - */ -export default function ButtonDemo() { - const [loading, setLoading] = useState(false) - - const handleAsyncAction = async () => { - setLoading(true) - // Simulate async operation - await new Promise(resolve => setTimeout(resolve, 2000)) - setLoading(false) - } - - return ( -
-
-

Animated Button Variants

- - {/* Different variants */} -
- - Default Button - - - - Destructive - - - - Outline - - - - Secondary - - - - Ghost - - - - Link Button - -
-
- -
-

Different Sizes

-
- Small - Default - Large -
-
- -
-

States

-
- - Disabled - - - - {loading ? 'Loading...' : 'Click for Loading'} - -
-
- -
-

Interactive Examples

-
- alert('Button clicked!')} - className="w-full sm:w-auto" - > - Click Me! - - - console.log('Logged to console')} - className="w-full sm:w-auto" - > - Log to Console - -
-
-
- ) -} -``` - -```ts file="lib/utils.ts" -import { type ClassValue, clsx } from 'clsx' -import { twMerge } from 'tailwind-merge' - -/** - * Utility function to merge Tailwind CSS classes - * Combines clsx for conditional classes and tailwind-merge for deduplication - */ -export function cn(...inputs: ClassValue[]) { - return twMerge(clsx(inputs)) -} -``` - diff --git a/mcp-servers/http/vercel-v0-mcp/components.json b/mcp-servers/http/vercel-v0-mcp/components.json new file mode 100644 index 0000000..811d2db --- /dev/null +++ b/mcp-servers/http/vercel-v0-mcp/components.json @@ -0,0 +1,17 @@ +{ + "$schema": "https://ui.shadcn.com/schema.json", + "style": "default", + "rsc": true, + "tsx": true, + "tailwind": { + "config": "tailwind.config.js", + "css": "app/globals.css", + "baseColor": "slate", + "cssVariables": true, + "prefix": "" + }, + "aliases": { + "components": "@/components", + "utils": "@/lib/utils" + } +} \ No newline at end of file diff --git a/mcp-servers/http/vercel-v0-mcp/lib/utils.ts b/mcp-servers/http/vercel-v0-mcp/lib/utils.ts new file mode 100644 index 0000000..d084cca --- /dev/null +++ b/mcp-servers/http/vercel-v0-mcp/lib/utils.ts @@ -0,0 +1,6 @@ +import { type ClassValue, clsx } from "clsx" +import { twMerge } from "tailwind-merge" + +export function cn(...inputs: ClassValue[]) { + return twMerge(clsx(inputs)) +} diff --git a/mcp-servers/http/vercel-v0-mcp/package.json b/mcp-servers/http/vercel-v0-mcp/package.json new file mode 100644 index 0000000..d528b46 --- /dev/null +++ b/mcp-servers/http/vercel-v0-mcp/package.json @@ -0,0 +1,32 @@ +{ + "name": "v0-generated-project", + "version": "0.1.0", + "private": true, + "scripts": { + "dev": "next dev", + "build": "next build", + "start": "next start", + "lint": "next lint" + }, + "dependencies": { + "next": "14.2.3", + "react": "^18.3.1", + "react-dom": "^18.3.1", + "@radix-ui/react-slot": "^1.0.2", + "class-variance-authority": "^0.7.0", + "clsx": "^2.1.1", + "tailwind-merge": "^2.3.0", + "lucide-react": "^0.394.0" + }, + "devDependencies": { + "@types/node": "^20.14.0", + "@types/react": "^18.3.3", + "@types/react-dom": "^18.3.0", + "autoprefixer": "^10.4.19", + "eslint": "^8.57.0", + "eslint-config-next": "14.2.3", + "postcss": "^8.4.38", + "tailwindcss": "^3.4.4", + "typescript": "^5.4.5" + } +} \ No newline at end of file diff --git a/mcp-servers/http/vercel-v0-mcp/postcss.config.js b/mcp-servers/http/vercel-v0-mcp/postcss.config.js new file mode 100644 index 0000000..33ad091 --- /dev/null +++ b/mcp-servers/http/vercel-v0-mcp/postcss.config.js @@ -0,0 +1,6 @@ +module.exports = { + plugins: { + tailwindcss: {}, + autoprefixer: {}, + }, +} diff --git a/mcp-servers/http/vercel-v0-mcp/src/vercel_v0_server.py b/mcp-servers/http/vercel-v0-mcp/src/vercel_v0_server.py index 69a365b..20fa838 100644 --- a/mcp-servers/http/vercel-v0-mcp/src/vercel_v0_server.py +++ b/mcp-servers/http/vercel-v0-mcp/src/vercel_v0_server.py @@ -164,43 +164,102 @@ def _parse_generated_code(self, code: str) -> List[Dict[str, str]]: logger.debug(f"Raw v0 response first 500 chars: {code[:500]}") # First, try to extract code from markdown code blocks - # Pattern for ```language\ncode\n``` - code_block_regex = r'```(?:tsx?|jsx?|typescript|javascript)?\n([\s\S]*?)```' - code_blocks = re.findall(code_block_regex, code) + # Updated pattern to handle file markers in code blocks + # Pattern: ```language file="path/to/file.ext"\ncode\n``` + code_block_with_file_regex = r'```(?:tsx?|jsx?|typescript|javascript|ts|js)?\s*(?:file="([^"]+)")?\n([\s\S]*?)```' + matches = re.finditer(code_block_with_file_regex, code) + code_blocks = [] + for match in matches: + file_path = match.group(1) # May be None if no file attribute + code_content = match.group(2) + + if file_path: + # Extract just the filename from the path + filename = file_path.split('/')[-1] + files.append({ + "name": filename, + "content": code_content.rstrip('\n'), + "path": file_path + }) + logger.info(f"Found code block with file marker: {file_path}") + else: + code_blocks.append(code_content) + + # If we found files with paths, return them + if files: + logger.info(f"Found {len(files)} files with explicit paths") + return files + + # If we have code blocks without file markers if code_blocks: - logger.info(f"Found {len(code_blocks)} code blocks in v0 response") - # If we found code blocks, use them instead of the raw response - # Join all code blocks if multiple + logger.info(f"Found {len(code_blocks)} code blocks without file markers") extracted_code = '\n\n'.join(code_blocks) else: - logger.info("No code blocks found, using raw response") - extracted_code = code - - # Check for multiple file markers - file_regex = r'// File: (.+)\n([\s\S]*?)(?=// File:|$)' - matches = re.finditer(file_regex, extracted_code) - - file_count = 0 - for match in matches: - file_count += 1 - # IMPORTANT: Do not strip() the content to preserve indentation - files.append({ - "name": match.group(1).strip(), - "content": match.group(2).rstrip('\n') # Only remove trailing newlines - }) - - logger.info(f"Found {file_count} file markers in code") + logger.info("No code blocks found, checking for raw code") + # Check if the response contains code without markdown blocks + # Skip any leading explanation text + lines = code.split('\n') + code_start = 0 + for i, line in enumerate(lines): + # Look for typical code start patterns + if (line.strip().startswith(('import ', 'export ', 'const ', 'function ', 'class ', '//')) or + line.strip() == '' and i > 0 and lines[i-1].strip() != ''): + code_start = i + break + + if code_start > 0: + extracted_code = '\n'.join(lines[code_start:]) + logger.info(f"Extracted code starting from line {code_start}") + else: + extracted_code = code + + # Check for multiple file markers in the extracted code + # Pattern: // File: path/to/file.ext + file_regex = r'^// File: (.+)$' + lines = extracted_code.split('\n') + current_file = None + current_content = [] + + for line in lines: + file_match = re.match(file_regex, line) + if file_match: + # Save previous file if exists + if current_file and current_content: + content = '\n'.join(current_content).rstrip('\n') + if content: # Only add if there's actual content + files.append({ + "name": current_file.split('/')[-1], + "content": content, + "path": current_file + }) + # Start new file + current_file = file_match.group(1).strip() + current_content = [] + logger.info(f"Found file marker: {current_file}") + elif current_file is not None: + current_content.append(line) + + # Save the last file + if current_file and current_content: + content = '\n'.join(current_content).rstrip('\n') + if content: + files.append({ + "name": current_file.split('/')[-1], + "content": content, + "path": current_file + }) - # If no file markers, treat entire code as single component - if not files: + # If no file markers found, treat entire code as single component + if not files and extracted_code.strip(): # Try to extract component name from code component_match = re.search(r'export (?:default )?(?:function|const) (\w+)', extracted_code) component_name = component_match.group(1) if component_match else 'Component' files.append({ "name": f"{component_name}.tsx", - "content": extracted_code.rstrip('\n') # Only remove trailing newlines + "content": extracted_code.rstrip('\n'), + "path": f"components/{component_name}.tsx" }) logger.info(f"Created single file: {component_name}.tsx") @@ -467,12 +526,19 @@ def create_component_files(self, files: List[Dict[str, str]], target_dir: str = for file_info in files: file_name = file_info.get("name", "Component.tsx") content = file_info.get("content", "") + file_path_from_info = file_info.get("path", None) # Ensure proper file extension if not file_name.endswith(('.tsx', '.jsx', '.ts', '.js')): file_name += '.tsx' - file_path = self.project_path / target_dir / file_name + # Use the path from file_info if provided, otherwise use target_dir + if file_path_from_info: + # Handle paths that might start with / or ./ + clean_path = file_path_from_info.lstrip('./') + file_path = self.project_path / clean_path + else: + file_path = self.project_path / target_dir / file_name try: # Create directory if it doesn't exist diff --git a/mcp-servers/http/vercel-v0-mcp/tailwind.config.js b/mcp-servers/http/vercel-v0-mcp/tailwind.config.js new file mode 100644 index 0000000..7cb7e37 --- /dev/null +++ b/mcp-servers/http/vercel-v0-mcp/tailwind.config.js @@ -0,0 +1,77 @@ +/** @type {import('tailwindcss').Config} */ +module.exports = { + darkMode: ["class"], + content: [ + './pages/**/*.{ts,tsx}', + './components/**/*.{ts,tsx}', + './app/**/*.{ts,tsx}', + './src/**/*.{ts,tsx}', + ], + prefix: "", + theme: { + container: { + center: true, + padding: "2rem", + screens: { + "2xl": "1400px", + }, + }, + extend: { + colors: { + border: "hsl(var(--border))", + input: "hsl(var(--input))", + ring: "hsl(var(--ring))", + background: "hsl(var(--background))", + foreground: "hsl(var(--foreground))", + primary: { + DEFAULT: "hsl(var(--primary))", + foreground: "hsl(var(--primary-foreground))", + }, + secondary: { + DEFAULT: "hsl(var(--secondary))", + foreground: "hsl(var(--secondary-foreground))", + }, + destructive: { + DEFAULT: "hsl(var(--destructive))", + foreground: "hsl(var(--destructive-foreground))", + }, + muted: { + DEFAULT: "hsl(var(--muted))", + foreground: "hsl(var(--muted-foreground))", + }, + accent: { + DEFAULT: "hsl(var(--accent))", + foreground: "hsl(var(--accent-foreground))", + }, + popover: { + DEFAULT: "hsl(var(--popover))", + foreground: "hsl(var(--popover-foreground))", + }, + card: { + DEFAULT: "hsl(var(--card))", + foreground: "hsl(var(--card-foreground))", + }, + }, + borderRadius: { + lg: "var(--radius)", + md: "calc(var(--radius) - 2px)", + sm: "calc(var(--radius) - 4px)", + }, + keyframes: { + "accordion-down": { + from: { height: "0" }, + to: { height: "var(--radix-accordion-content-height)" }, + }, + "accordion-up": { + from: { height: "var(--radix-accordion-content-height)" }, + to: { height: "0" }, + }, + }, + animation: { + "accordion-down": "accordion-down 0.2s ease-out", + "accordion-up": "accordion-up 0.2s ease-out", + }, + }, + }, + plugins: [require("tailwindcss-animate")], +} \ No newline at end of file diff --git a/mcp-servers/http/vercel-v0-mcp/tsconfig.json b/mcp-servers/http/vercel-v0-mcp/tsconfig.json new file mode 100644 index 0000000..0f30dc5 --- /dev/null +++ b/mcp-servers/http/vercel-v0-mcp/tsconfig.json @@ -0,0 +1,41 @@ +{ + "compilerOptions": { + "target": "ES2017", + "lib": [ + "dom", + "dom.iterable", + "esnext" + ], + "allowJs": true, + "skipLibCheck": true, + "strict": true, + "forceConsistentCasingInFileNames": true, + "noEmit": true, + "esModuleInterop": true, + "module": "esnext", + "moduleResolution": "bundler", + "resolveJsonModule": true, + "isolatedModules": true, + "jsx": "preserve", + "incremental": true, + "plugins": [ + { + "name": "next" + } + ], + "paths": { + "@/*": [ + "./*" + ] + } + }, + "include": [ + "next-env.d.ts", + "**/*.ts", + "**/*.tsx", + ".next/types/**/*.ts" + ], + "exclude": [ + "node_modules" + ] +} \ No newline at end of file diff --git a/servers/anthropic-comprehensive-http-mcp/src/anthropic_comprehensive_server.py b/servers/anthropic-comprehensive-http-mcp/src/anthropic_comprehensive_server.py new file mode 100644 index 0000000..0d502f4 --- /dev/null +++ b/servers/anthropic-comprehensive-http-mcp/src/anthropic_comprehensive_server.py @@ -0,0 +1,560 @@ +#!/usr/bin/env python3 +""" +Anthropic Comprehensive MCP Server - HTTP Version +Provides essential Anthropic Claude API tools for chat, vision, computer use, and more +""" + +import os +import logging +from typing import List, Dict, Any, Optional, AsyncIterator +import base64 +import json +from datetime import datetime + +from fastmcp import FastMCP +import anthropic +from anthropic import AsyncAnthropic +from anthropic.types import Message, ContentBlock, TextBlock, ToolUseBlock + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Initialize MCP server +mcp = FastMCP("anthropic-comprehensive") + +# Initialize Anthropic client +api_key = os.getenv('ANTHROPIC_API_KEY') +if not api_key: + raise ValueError("ANTHROPIC_API_KEY environment variable is required") + +client = AsyncAnthropic(api_key=api_key) + +# Core Chat Operations + +@mcp.tool() +async def chat_completion( + messages: List[Dict[str, str]], + model: str = "claude-3-5-sonnet-20241022", + max_tokens: int = 4096, + temperature: float = 0.7, + system: Optional[str] = None, + stop_sequences: Optional[List[str]] = None +) -> Dict[str, Any]: + """ + Generate a chat completion using Claude + + Args: + messages: List of message dicts with 'role' and 'content' + model: Claude model to use (claude-3-5-sonnet-20241022, claude-3-5-haiku-20241022) + max_tokens: Maximum tokens to generate + temperature: Sampling temperature (0-1) + system: System prompt + stop_sequences: List of sequences that will stop generation + + Returns: + Generated response with usage information + """ + try: + # Convert messages to Anthropic format + anthropic_messages = [] + for msg in messages: + anthropic_messages.append({ + "role": msg["role"], + "content": msg["content"] + }) + + kwargs = { + "model": model, + "messages": anthropic_messages, + "max_tokens": max_tokens, + "temperature": temperature + } + + if system: + kwargs["system"] = system + if stop_sequences: + kwargs["stop_sequences"] = stop_sequences + + response = await client.messages.create(**kwargs) + + # Extract text content + content = "" + for block in response.content: + if isinstance(block, TextBlock): + content += block.text + + return { + "success": True, + "content": content, + "model": response.model, + "usage": { + "input_tokens": response.usage.input_tokens, + "output_tokens": response.usage.output_tokens, + "total_tokens": response.usage.input_tokens + response.usage.output_tokens + }, + "stop_reason": response.stop_reason + } + except Exception as e: + logger.error(f"Failed to generate chat completion: {e}") + raise ValueError(f"Failed to generate chat completion: {str(e)}") + +@mcp.tool() +async def streaming_chat( + messages: List[Dict[str, str]], + model: str = "claude-3-5-sonnet-20241022", + max_tokens: int = 4096, + temperature: float = 0.7, + system: Optional[str] = None +) -> AsyncIterator[Dict[str, Any]]: + """ + Generate a streaming chat completion using Claude + + Args: + messages: List of message dicts with 'role' and 'content' + model: Claude model to use + max_tokens: Maximum tokens to generate + temperature: Sampling temperature (0-1) + system: System prompt + + Yields: + Streaming response chunks + """ + try: + anthropic_messages = [] + for msg in messages: + anthropic_messages.append({ + "role": msg["role"], + "content": msg["content"] + }) + + kwargs = { + "model": model, + "messages": anthropic_messages, + "max_tokens": max_tokens, + "temperature": temperature, + "stream": True + } + + if system: + kwargs["system"] = system + + async with client.messages.stream(**kwargs) as stream: + async for event in stream: + if hasattr(event, 'type'): + yield { + "type": event.type, + "data": event.model_dump() if hasattr(event, 'model_dump') else str(event) + } + except Exception as e: + logger.error(f"Failed to stream chat completion: {e}") + yield {"error": str(e)} + +# Token Management + +@mcp.tool() +async def count_tokens( + text: str, + model: str = "claude-3-5-sonnet-20241022" +) -> Dict[str, Any]: + """ + Count tokens in text for a specific Claude model + + Args: + text: Text to count tokens for + model: Claude model to use for tokenization + + Returns: + Token count information + """ + try: + # Use the client's token counting method + token_count = await client.count_tokens(text) + + return { + "success": True, + "token_count": token_count, + "model": model, + "text_length": len(text) + } + except Exception as e: + # Fallback to estimation if direct counting fails + # Claude's tokenizer roughly follows: ~4 chars per token + estimated_tokens = len(text) // 4 + + return { + "success": True, + "token_count": estimated_tokens, + "model": model, + "text_length": len(text), + "is_estimate": True + } + +# Vision and Multimodal + +@mcp.tool() +async def analyze_image( + image_base64: str, + prompt: str, + model: str = "claude-3-5-sonnet-20241022", + max_tokens: int = 1024 +) -> Dict[str, Any]: + """ + Analyze an image using Claude's vision capabilities + + Args: + image_base64: Base64 encoded image + prompt: Question or instruction about the image + model: Claude model to use (must support vision) + max_tokens: Maximum tokens to generate + + Returns: + Analysis results + """ + try: + # Detect image format from base64 header if present + if image_base64.startswith('data:image/'): + media_type = image_base64.split(';')[0].split(':')[1] + image_base64 = image_base64.split(',')[1] + else: + # Default to JPEG if not specified + media_type = "image/jpeg" + + response = await client.messages.create( + model=model, + max_tokens=max_tokens, + messages=[{ + "role": "user", + "content": [ + { + "type": "image", + "source": { + "type": "base64", + "media_type": media_type, + "data": image_base64 + } + }, + { + "type": "text", + "text": prompt + } + ] + }] + ) + + content = "" + for block in response.content: + if isinstance(block, TextBlock): + content += block.text + + return { + "success": True, + "analysis": content, + "model": response.model, + "usage": { + "input_tokens": response.usage.input_tokens, + "output_tokens": response.usage.output_tokens + } + } + except Exception as e: + logger.error(f"Failed to analyze image: {e}") + raise ValueError(f"Failed to analyze image: {str(e)}") + +# Computer Use (Beta) + +@mcp.tool() +async def computer_use( + instruction: str, + screenshot_base64: Optional[str] = None, + model: str = "claude-3-5-sonnet-20241022", + max_tokens: int = 4096 +) -> Dict[str, Any]: + """ + Use Claude's computer use capability to interact with desktop applications + + Args: + instruction: What action to perform + screenshot_base64: Optional screenshot of current screen state + model: Claude model to use (must support computer use) + max_tokens: Maximum tokens to generate + + Returns: + Computer use action instructions + """ + try: + messages = [] + content = [{"type": "text", "text": instruction}] + + if screenshot_base64: + # Add screenshot for context + if screenshot_base64.startswith('data:image/'): + media_type = screenshot_base64.split(';')[0].split(':')[1] + screenshot_base64 = screenshot_base64.split(',')[1] + else: + media_type = "image/png" + + content.insert(0, { + "type": "image", + "source": { + "type": "base64", + "media_type": media_type, + "data": screenshot_base64 + } + }) + + messages.append({ + "role": "user", + "content": content + }) + + # Enable computer use tools + response = await client.messages.create( + model=model, + max_tokens=max_tokens, + messages=messages, + tools=[ + { + "type": "computer_20241022", + "name": "computer", + "description": "Control computer interactions", + "display_width_px": 1920, + "display_height_px": 1080 + } + ], + tool_choice={"type": "auto"} + ) + + # Extract actions and text + actions = [] + text_content = "" + + for block in response.content: + if isinstance(block, TextBlock): + text_content += block.text + elif isinstance(block, ToolUseBlock): + actions.append({ + "tool": block.name, + "input": block.input + }) + + return { + "success": True, + "actions": actions, + "explanation": text_content, + "model": response.model, + "usage": { + "input_tokens": response.usage.input_tokens, + "output_tokens": response.usage.output_tokens + } + } + except Exception as e: + logger.error(f"Failed to use computer capability: {e}") + raise ValueError(f"Failed to use computer capability: {str(e)}") + +# Function Calling + +@mcp.tool() +async def function_calling( + messages: List[Dict[str, str]], + tools: List[Dict[str, Any]], + model: str = "claude-3-5-sonnet-20241022", + max_tokens: int = 4096, + tool_choice: Optional[Dict[str, str]] = None +) -> Dict[str, Any]: + """ + Use Claude's function calling capability for structured outputs + + Args: + messages: Conversation messages + tools: List of tool definitions with name, description, and parameters + model: Claude model to use + max_tokens: Maximum tokens to generate + tool_choice: Force specific tool use (auto, any, or specific tool) + + Returns: + Function call results and explanations + """ + try: + anthropic_messages = [] + for msg in messages: + anthropic_messages.append({ + "role": msg["role"], + "content": msg["content"] + }) + + kwargs = { + "model": model, + "messages": anthropic_messages, + "max_tokens": max_tokens, + "tools": tools + } + + if tool_choice: + kwargs["tool_choice"] = tool_choice + else: + kwargs["tool_choice"] = {"type": "auto"} + + response = await client.messages.create(**kwargs) + + # Extract function calls and text + function_calls = [] + text_content = "" + + for block in response.content: + if isinstance(block, TextBlock): + text_content += block.text + elif isinstance(block, ToolUseBlock): + function_calls.append({ + "id": block.id, + "name": block.name, + "arguments": block.input + }) + + return { + "success": True, + "function_calls": function_calls, + "content": text_content, + "model": response.model, + "usage": { + "input_tokens": response.usage.input_tokens, + "output_tokens": response.usage.output_tokens + } + } + except Exception as e: + logger.error(f"Failed to perform function calling: {e}") + raise ValueError(f"Failed to perform function calling: {str(e)}") + +# Batch Processing + +@mcp.tool() +async def batch_messages( + batch_requests: List[Dict[str, Any]], + model: str = "claude-3-5-sonnet-20241022" +) -> Dict[str, Any]: + """ + Process multiple message requests in batch for efficiency + + Args: + batch_requests: List of request dicts, each with messages and optional params + model: Default model to use for all requests + + Returns: + Batch processing results + """ + try: + results = [] + total_input_tokens = 0 + total_output_tokens = 0 + + for i, request in enumerate(batch_requests): + try: + # Extract parameters for this request + messages = request.get("messages", []) + req_model = request.get("model", model) + max_tokens = request.get("max_tokens", 1024) + temperature = request.get("temperature", 0.7) + system = request.get("system") + + # Make the request + kwargs = { + "model": req_model, + "messages": messages, + "max_tokens": max_tokens, + "temperature": temperature + } + + if system: + kwargs["system"] = system + + response = await client.messages.create(**kwargs) + + # Extract content + content = "" + for block in response.content: + if isinstance(block, TextBlock): + content += block.text + + results.append({ + "index": i, + "success": True, + "content": content, + "usage": { + "input_tokens": response.usage.input_tokens, + "output_tokens": response.usage.output_tokens + } + }) + + total_input_tokens += response.usage.input_tokens + total_output_tokens += response.usage.output_tokens + + except Exception as e: + results.append({ + "index": i, + "success": False, + "error": str(e) + }) + + return { + "success": True, + "results": results, + "total_usage": { + "input_tokens": total_input_tokens, + "output_tokens": total_output_tokens, + "total_tokens": total_input_tokens + total_output_tokens + }, + "batch_size": len(batch_requests) + } + except Exception as e: + logger.error(f"Failed to process batch: {e}") + raise ValueError(f"Failed to process batch: {str(e)}") + +# Context Management + +@mcp.tool() +async def create_cached_context( + context: str, + context_id: str, + model: str = "claude-3-5-sonnet-20241022" +) -> Dict[str, Any]: + """ + Create a cached context for efficient reuse in conversations + + Args: + context: The context to cache (system prompts, docs, etc) + context_id: Unique ID for this context + model: Model to optimize caching for + + Returns: + Caching confirmation and token information + """ + try: + # Note: Anthropic doesn't have direct context caching API yet + # This is a placeholder for future functionality + # For now, we'll return token count and store guidance + + token_count = len(context) // 4 # Rough estimation + + return { + "success": True, + "context_id": context_id, + "token_count": token_count, + "model": model, + "recommendation": "Store this context client-side and prepend to messages for efficiency", + "cache_strategy": { + "method": "client_side", + "max_size_tokens": 100000, + "ttl_minutes": 60 + } + } + except Exception as e: + logger.error(f"Failed to create cached context: {e}") + raise ValueError(f"Failed to create cached context: {str(e)}") + +if __name__ == "__main__": + # Get port from environment or use default + port = int(os.getenv('ANTHROPIC_COMPREHENSIVE_MCP_PORT', '8026')) + + logger.info(f"Starting Anthropic Comprehensive MCP Server on port {port}") + + # Run with streamable-http transport + mcp.run(transport="streamable-http", host="0.0.0.0", port=port, path="/") \ No newline at end of file diff --git a/servers/github-http-mcp/src/github_server.py b/servers/github-http-mcp/src/github_server.py index 96c976f..c2feafd 100644 --- a/servers/github-http-mcp/src/github_server.py +++ b/servers/github-http-mcp/src/github_server.py @@ -241,7 +241,7 @@ async def get_file_contents( repo_obj = github_client._get_repo(owner, repo) try: - contents = repo_obj.get_contents(path, ref=branch) + contents = repo_obj.get_contents(path, ref=branch if branch else None) if isinstance(contents, list): # Directory @@ -262,7 +262,17 @@ async def get_file_contents( } else: # File - content = base64.b64decode(contents.content).decode('utf-8') + try: + # Try UTF-8 first + content = base64.b64decode(contents.content).decode('utf-8') + except UnicodeDecodeError: + # If that fails, try latin-1 which accepts all bytes + try: + content = base64.b64decode(contents.content).decode('latin-1') + except: + # If all else fails, return raw base64 + content = contents.content + return { "success": True, "type": "file", @@ -681,20 +691,25 @@ async def update_issue_comment( """ try: repo_obj = github_client._get_repo(owner, repo) - comment = repo_obj.get_issue_comment(comment_id) - - # Update comment - comment.edit(body) + # PyGithub doesn't have get_issue_comment on Repository + # We need to use the GitHub API directly or get all comments + # Using direct API call + comment_url = f"{repo_obj.url}/issues/comments/{comment_id}" + headers, data = repo_obj._requester.requestJsonAndCheck( + "PATCH", + comment_url, + input={"body": body} + ) return { "success": True, "comment": { - "id": comment.id, - "body": comment.body, - "html_url": comment.html_url, - "user": comment.user.login, - "created_at": comment.created_at.isoformat(), - "updated_at": comment.updated_at.isoformat() + "id": data['id'], + "body": data['body'], + "html_url": data['html_url'], + "user": data['user']['login'], + "created_at": data['created_at'], + "updated_at": data['updated_at'] } } except GithubException as e: @@ -897,14 +912,20 @@ async def list_pull_requests( try: repo_obj = github_client._get_repo(owner, repo) - # Get pull requests - pulls = repo_obj.get_pulls( - state=state, - sort=sort, - direction=direction, - base=base, - head=head - ) + # Get pull requests with proper None handling + kwargs = {} + if state is not None: + kwargs['state'] = state + if sort is not None: + kwargs['sort'] = sort + if direction is not None: + kwargs['direction'] = direction + if base is not None: + kwargs['base'] = base + if head is not None: + kwargs['head'] = head + + pulls = repo_obj.get_pulls(**kwargs) # Paginate per_page = min(per_page or 30, 100) @@ -2009,8 +2030,11 @@ async def list_commits( try: repo_obj = github_client._get_repo(owner, repo) - # Get commits - commits = repo_obj.get_commits(sha=sha) + # Get commits with proper None handling + kwargs = {} + if sha is not None: + kwargs['sha'] = sha + commits = repo_obj.get_commits(**kwargs) # Paginate per_page = min(per_page or 30, 100) diff --git a/servers/openai-tools-http-mcp/src/openai_tools_server.py b/servers/openai-tools-http-mcp/src/openai_tools_server.py new file mode 100644 index 0000000..6c18751 --- /dev/null +++ b/servers/openai-tools-http-mcp/src/openai_tools_server.py @@ -0,0 +1,406 @@ +#!/usr/bin/env python3 +""" +OpenAI Tools MCP Server - HTTP Version +Provides essential OpenAI API tools for embeddings, images, audio, and moderation +""" + +import os +import logging +from typing import List, Dict, Any, Optional +import base64 +from PIL import Image +import io + +from fastmcp import FastMCP +from openai import AsyncOpenAI + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Initialize MCP server +mcp = FastMCP("openai-tools") + +# Initialize OpenAI client +api_key = os.getenv('OPENAI_API_KEY') +if not api_key: + raise ValueError("OPENAI_API_KEY environment variable is required") + +client = AsyncOpenAI(api_key=api_key) + +# Embedding Operations + +@mcp.tool() +async def create_embeddings( + texts: List[str], + model: str = "text-embedding-3-small" +) -> Dict[str, Any]: + """ + Create embeddings for a list of texts using OpenAI's embedding models + + Args: + texts: List of texts to embed (max 2048 texts) + model: Model to use (text-embedding-3-small or text-embedding-3-large) + + Returns: + Embeddings and usage information + """ + try: + if len(texts) > 2048: + raise ValueError("Maximum 2048 texts allowed per request") + + response = await client.embeddings.create( + input=texts, + model=model + ) + + embeddings = [e.embedding for e in response.data] + + return { + "success": True, + "embeddings": embeddings, + "model": model, + "usage": { + "prompt_tokens": response.usage.prompt_tokens, + "total_tokens": response.usage.total_tokens + } + } + except Exception as e: + logger.error(f"Failed to create embeddings: {e}") + raise ValueError(f"Failed to create embeddings: {str(e)}") + +@mcp.tool() +async def calculate_similarity( + embedding1: List[float], + embedding2: List[float] +) -> Dict[str, Any]: + """ + Calculate cosine similarity between two embeddings + + Args: + embedding1: First embedding vector + embedding2: Second embedding vector + + Returns: + Cosine similarity score between -1 and 1 + """ + try: + import numpy as np + + # Convert to numpy arrays + vec1 = np.array(embedding1) + vec2 = np.array(embedding2) + + # Calculate cosine similarity + dot_product = np.dot(vec1, vec2) + norm1 = np.linalg.norm(vec1) + norm2 = np.linalg.norm(vec2) + + similarity = dot_product / (norm1 * norm2) + + return { + "success": True, + "similarity": float(similarity), + "percentage": float((similarity + 1) / 2 * 100) # Convert to 0-100% + } + except Exception as e: + logger.error(f"Failed to calculate similarity: {e}") + raise ValueError(f"Failed to calculate similarity: {str(e)}") + +# Image Operations + +@mcp.tool() +async def generate_image( + prompt: str, + model: str = "dall-e-3", + size: str = "1024x1024", + quality: str = "standard", + n: int = 1 +) -> Dict[str, Any]: + """ + Generate images using DALL-E + + Args: + prompt: Text description of the image to generate + model: Model to use (dall-e-2 or dall-e-3) + size: Image size (1024x1024, 1792x1024, or 1024x1792 for dall-e-3) + quality: Image quality (standard or hd for dall-e-3) + n: Number of images to generate (1-10 for dall-e-2, only 1 for dall-e-3) + + Returns: + Generated image URLs + """ + try: + response = await client.images.generate( + model=model, + prompt=prompt, + size=size, + quality=quality, + n=n + ) + + images = [] + for image in response.data: + images.append({ + "url": image.url, + "revised_prompt": getattr(image, 'revised_prompt', None) + }) + + return { + "success": True, + "images": images, + "model": model, + "size": size + } + except Exception as e: + logger.error(f"Failed to generate image: {e}") + raise ValueError(f"Failed to generate image: {str(e)}") + +@mcp.tool() +async def edit_image( + image_base64: str, + prompt: str, + mask_base64: Optional[str] = None, + model: str = "dall-e-2", + size: str = "1024x1024", + n: int = 1 +) -> Dict[str, Any]: + """ + Edit an image using DALL-E 2 + + Args: + image_base64: Base64 encoded PNG image to edit (must be square, <4MB) + prompt: Description of how to edit the image + mask_base64: Optional base64 encoded PNG mask indicating areas to edit + model: Model to use (only dall-e-2 supports edits) + size: Output size (256x256, 512x512, or 1024x1024) + n: Number of variations to generate (1-10) + + Returns: + Edited image URLs + """ + try: + # Decode base64 images + image_data = base64.b64decode(image_base64) + mask_data = base64.b64decode(mask_base64) if mask_base64 else None + + # Create image files + image_file = io.BytesIO(image_data) + mask_file = io.BytesIO(mask_data) if mask_data else None + + response = await client.images.edit( + image=image_file, + prompt=prompt, + mask=mask_file, + model=model, + size=size, + n=n + ) + + images = [{"url": image.url} for image in response.data] + + return { + "success": True, + "images": images, + "model": model, + "size": size + } + except Exception as e: + logger.error(f"Failed to edit image: {e}") + raise ValueError(f"Failed to edit image: {str(e)}") + +# Audio Operations + +@mcp.tool() +async def text_to_speech( + text: str, + voice: str = "alloy", + model: str = "tts-1", + response_format: str = "mp3", + speed: float = 1.0 +) -> Dict[str, Any]: + """ + Convert text to speech using OpenAI TTS + + Args: + text: Text to convert to speech (max 4096 chars) + voice: Voice to use (alloy, echo, fable, onyx, nova, shimmer) + model: Model to use (tts-1 or tts-1-hd) + response_format: Audio format (mp3, opus, aac, flac, wav, pcm) + speed: Speed of speech (0.25 to 4.0) + + Returns: + Base64 encoded audio data + """ + try: + if len(text) > 4096: + raise ValueError("Text must be less than 4096 characters") + + response = await client.audio.speech.create( + model=model, + voice=voice, + input=text, + response_format=response_format, + speed=speed + ) + + # Get audio data as bytes + audio_data = response.content + + # Encode to base64 + audio_base64 = base64.b64encode(audio_data).decode('utf-8') + + return { + "success": True, + "audio_base64": audio_base64, + "format": response_format, + "model": model, + "voice": voice + } + except Exception as e: + logger.error(f"Failed to convert text to speech: {e}") + raise ValueError(f"Failed to convert text to speech: {str(e)}") + +@mcp.tool() +async def speech_to_text( + audio_base64: str, + model: str = "whisper-1", + language: Optional[str] = None, + response_format: str = "json", + temperature: float = 0 +) -> Dict[str, Any]: + """ + Transcribe audio to text using Whisper + + Args: + audio_base64: Base64 encoded audio file (mp3, mp4, mpeg, mpga, m4a, wav, webm) + model: Model to use (whisper-1) + language: Language of the audio (ISO-639-1 format) + response_format: Output format (json, text, srt, verbose_json, vtt) + temperature: Sampling temperature (0-1) + + Returns: + Transcribed text + """ + try: + # Decode base64 audio + audio_data = base64.b64decode(audio_base64) + audio_file = io.BytesIO(audio_data) + audio_file.name = "audio.mp3" # Required for API + + response = await client.audio.transcriptions.create( + model=model, + file=audio_file, + language=language, + response_format=response_format, + temperature=temperature + ) + + return { + "success": True, + "text": response.text if hasattr(response, 'text') else response, + "model": model, + "language": language or "auto-detected" + } + except Exception as e: + logger.error(f"Failed to transcribe audio: {e}") + raise ValueError(f"Failed to transcribe audio: {str(e)}") + +# Moderation + +@mcp.tool() +async def moderate_content( + text: str, + model: str = "text-moderation-latest" +) -> Dict[str, Any]: + """ + Check if text complies with OpenAI usage policies + + Args: + text: Text to moderate + model: Model to use (text-moderation-latest or text-moderation-stable) + + Returns: + Moderation results with category scores and flags + """ + try: + response = await client.moderations.create( + input=text, + model=model + ) + + result = response.results[0] + + return { + "success": True, + "flagged": result.flagged, + "categories": result.categories.model_dump(), + "category_scores": result.category_scores.model_dump(), + "model": model + } + except Exception as e: + logger.error(f"Failed to moderate content: {e}") + raise ValueError(f"Failed to moderate content: {str(e)}") + +# Utility Operations + +@mcp.tool() +async def extract_structured_data( + text: str, + schema: Dict[str, Any], + model: str = "gpt-4-turbo-preview" +) -> Dict[str, Any]: + """ + Extract structured data from text using GPT models + + Args: + text: Text to extract data from + schema: JSON schema describing the expected output structure + model: GPT model to use + + Returns: + Extracted structured data matching the schema + """ + try: + prompt = f"""Extract structured data from the following text according to the provided JSON schema. + +Schema: {schema} + +Text: {text} + +Return only valid JSON that matches the schema.""" + + response = await client.chat.completions.create( + model=model, + messages=[ + {"role": "system", "content": "You are a data extraction assistant. Extract structured data and return only valid JSON."}, + {"role": "user", "content": prompt} + ], + response_format={"type": "json_object"} + ) + + import json + extracted_data = json.loads(response.choices[0].message.content) + + return { + "success": True, + "data": extracted_data, + "model": model, + "usage": { + "prompt_tokens": response.usage.prompt_tokens, + "completion_tokens": response.usage.completion_tokens, + "total_tokens": response.usage.total_tokens + } + } + except Exception as e: + logger.error(f"Failed to extract structured data: {e}") + raise ValueError(f"Failed to extract structured data: {str(e)}") + +if __name__ == "__main__": + # Get port from environment or use default + port = int(os.getenv('OPENAI_TOOLS_MCP_PORT', '8012')) + + logger.info(f"Starting OpenAI Tools MCP Server on port {port}") + + # Run with streamable-http transport + mcp.run(transport="streamable-http", host="0.0.0.0", port=port, path="/") \ No newline at end of file diff --git a/servers/routing-http-mcp/src/routing_server.py b/servers/routing-http-mcp/src/routing_server.py new file mode 100644 index 0000000..80484e3 --- /dev/null +++ b/servers/routing-http-mcp/src/routing_server.py @@ -0,0 +1,932 @@ +#!/usr/bin/env python3 +""" +MCP Routing Server for Multi-Agent Orchestration + +This server provides tools for coordinating multiple AI agents, +managing sessions, and routing tasks based on agent capabilities. +""" + +import os +import json +import asyncio +import logging +from datetime import datetime +from typing import Dict, List, Optional, Any +from fastmcp import FastMCP +from pydantic import BaseModel, Field +import aiohttp +import redis.asyncio as redis +from uuid import uuid4 + +# Configure logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Initialize FastMCP server +mcp = FastMCP("routing-mcp") + +# Redis client for session state +redis_client = None + +# Supabase configuration +SUPABASE_URL = os.getenv("SUPABASE_URL", "") +SUPABASE_KEY = os.getenv("SUPABASE_ANON_KEY", "") + +# Agent registry (could be loaded from Supabase) +AGENT_REGISTRY = { + "claude": { + "capabilities": ["general", "coding", "architecture", "documentation"], + "max_concurrent_tasks": 3 + }, + "openai": { + "capabilities": ["general", "coding", "analysis"], + "max_concurrent_tasks": 3 + }, + "ui-specialist": { + "capabilities": ["frontend", "ui", "react", "css"], + "max_concurrent_tasks": 2 + }, + "backend-specialist": { + "capabilities": ["backend", "api", "database", "security"], + "max_concurrent_tasks": 2 + }, + "test-specialist": { + "capabilities": ["testing", "qa", "automation"], + "max_concurrent_tasks": 2 + } +} + +# Session tracking +active_sessions: Dict[str, Any] = {} + +# In-memory storage as fallback when Redis is not available +memory_storage: Dict[str, Any] = {} + +class RedisOrMemoryClient: + """Wrapper that uses Redis if available, otherwise falls back to memory""" + + def __init__(self, redis_client=None): + self.redis = redis_client + self.memory = memory_storage + + async def setex(self, key: str, seconds: int, value: str): + if self.redis: + return await self.redis.setex(key, seconds, value) + else: + self.memory[key] = value + return True + + async def get(self, key: str): + if self.redis: + return await self.redis.get(key) + else: + return self.memory.get(key) + + async def hset(self, key: str, field: str, value: str): + if self.redis: + return await self.redis.hset(key, field, value) + else: + if key not in self.memory: + self.memory[key] = {} + self.memory[key][field] = value + return True + + async def delete(self, key: str): + if self.redis: + return await self.redis.delete(key) + else: + if key in self.memory: + del self.memory[key] + return True + +async def init_redis(): + """Initialize Redis connection (optional)""" + global redis_client + try: + redis_url = os.getenv("REDIS_URL", "redis://localhost:6379") + real_redis_client = await redis.from_url(redis_url) + # Test connection + await real_redis_client.ping() + logger.info("Redis connection established") + redis_client = RedisOrMemoryClient(real_redis_client) + except Exception as e: + logger.warning(f"Redis connection failed: {e}") + logger.warning("Running without Redis - session state will not persist") + redis_client = RedisOrMemoryClient(None) + +async def get_supabase_headers(): + """Get headers for Supabase API calls""" + return { + "apikey": SUPABASE_KEY, + "Authorization": f"Bearer {SUPABASE_KEY}", + "Content-Type": "application/json" + } + +# Session Management Tools + +@mcp.tool() +async def session_start( + module_name: str = Field(..., description="Name of the module/feature being worked on"), + agent_type: str = Field(..., description="Type of agent starting the session"), + project_id: str = Field(..., description="Project ID from Supabase"), + context: Dict[str, Any] = Field(default_factory=dict, description="Additional context for the session") +) -> Dict[str, Any]: + """Start a new development session with context tracking""" + try: + session_id = str(uuid4()) + + session_data = { + "id": session_id, + "module_name": module_name, + "agent_type": agent_type, + "context": context, + "project_id": project_id, + "started_at": datetime.utcnow().isoformat(), + "status": "active", + "checkpoints": [], + "handoffs": [] + } + + # Store in memory + active_sessions[session_id] = session_data + + # Store in Redis for persistence + if redis_client: + await redis_client.setex( + f"session:{session_id}", + 3600 * 24, # 24 hour TTL + json.dumps(session_data) + ) + + # Record in Supabase + if SUPABASE_URL and SUPABASE_KEY: + async with aiohttp.ClientSession() as session: + url = f"{SUPABASE_URL}/rest/v1/agent_sessions" + headers = await get_supabase_headers() + + payload = { + "session_id": session_id, + "module_name": module_name, + "agent_type": agent_type, + "context": context, + "project_id": project_id, + "status": "active" + } + + async with session.post(url, json=payload, headers=headers) as resp: + if resp.status != 201: + logger.warning(f"Failed to record session in Supabase: {await resp.text()}") + + return { + "success": True, + "session_id": session_id, + "message": f"Started session for {module_name} with {agent_type}", + "session": session_data + } + + except Exception as e: + logger.error(f"Failed to start session: {str(e)}") + return { + "success": False, + "error": str(e) + } + +@mcp.tool() +async def session_checkpoint( + session_id: str = Field(..., description="Session ID to checkpoint"), + checkpoint_name: str = Field(..., description="Name of the checkpoint"), + state: Dict[str, Any] = Field(..., description="Current state to save"), + notes: str = Field("", description="Notes about the checkpoint") +) -> Dict[str, Any]: + """Create a checkpoint in the current session for recovery""" + try: + if session_id not in active_sessions: + # Try to load from Redis + if redis_client: + session_data = await redis_client.get(f"session:{session_id}") + if session_data: + active_sessions[session_id] = json.loads(session_data) + else: + return {"success": False, "error": "Session not found"} + else: + return {"success": False, "error": "Session not found"} + + checkpoint = { + "name": checkpoint_name, + "timestamp": datetime.utcnow().isoformat(), + "state": state, + "notes": notes + } + + active_sessions[session_id]["checkpoints"].append(checkpoint) + + # Update Redis + if redis_client: + await redis_client.setex( + f"session:{session_id}", + 3600 * 24, + json.dumps(active_sessions[session_id]) + ) + + return { + "success": True, + "message": f"Checkpoint '{checkpoint_name}' created", + "checkpoint": checkpoint + } + + except Exception as e: + logger.error(f"Failed to create checkpoint: {str(e)}") + return { + "success": False, + "error": str(e) + } + +@mcp.tool() +async def session_handoff( + session_id: str = Field(..., description="Session ID to hand off"), + target_agent: str = Field(..., description="Target agent to hand off to"), + handoff_notes: str = Field(..., description="Notes for the receiving agent"), + checkpoint_before_handoff: bool = Field(True, description="Create checkpoint before handoff") +) -> Dict[str, Any]: + """Prepare session for handoff to another agent""" + try: + if session_id not in active_sessions: + return {"success": False, "error": "Session not found"} + + session = active_sessions[session_id] + + # Create checkpoint if requested + if checkpoint_before_handoff: + await session_checkpoint( + session_id=session_id, + checkpoint_name=f"handoff_to_{target_agent}", + state=session.get("context", {}), + notes=f"Pre-handoff checkpoint to {target_agent}" + ) + + handoff = { + "from_agent": session["agent_type"], + "to_agent": target_agent, + "timestamp": datetime.utcnow().isoformat(), + "notes": handoff_notes, + "session_duration": ( + datetime.utcnow() - datetime.fromisoformat(session["started_at"]) + ).total_seconds() + } + + session["handoffs"].append(handoff) + session["agent_type"] = target_agent + session["status"] = "handed_off" + + # Update Redis + if redis_client: + await redis_client.setex( + f"session:{session_id}", + 3600 * 24, + json.dumps(session) + ) + + # Create handoff record in Supabase + if SUPABASE_URL and SUPABASE_KEY: + async with aiohttp.ClientSession() as http_session: + url = f"{SUPABASE_URL}/rest/v1/agent_handoffs" + headers = await get_supabase_headers() + + payload = { + "session_id": session_id, + "from_agent": handoff["from_agent"], + "to_agent": target_agent, + "notes": handoff_notes, + "project_id": session.get("project_id") + } + + await http_session.post(url, json=payload, headers=headers) + + return { + "success": True, + "message": f"Session handed off to {target_agent}", + "handoff": handoff, + "session_id": session_id + } + + except Exception as e: + logger.error(f"Failed to handoff session: {str(e)}") + return { + "success": False, + "error": str(e) + } + +@mcp.tool() +async def session_continue( + session_id: str = Field(..., description="Session ID to continue") +) -> Dict[str, Any]: + """Continue a previously started or handed-off session""" + try: + if session_id not in active_sessions: + # Try to load from Redis + if redis_client: + session_data = await redis_client.get(f"session:{session_id}") + if session_data: + active_sessions[session_id] = json.loads(session_data) + else: + return {"success": False, "error": "Session not found"} + else: + return {"success": False, "error": "Session not found"} + + session = active_sessions[session_id] + session["status"] = "active" + session["continued_at"] = datetime.utcnow().isoformat() + + # Get latest checkpoint + latest_checkpoint = None + if session["checkpoints"]: + latest_checkpoint = session["checkpoints"][-1] + + return { + "success": True, + "message": f"Continuing session for {session['module_name']}", + "session": session, + "latest_checkpoint": latest_checkpoint + } + + except Exception as e: + logger.error(f"Failed to continue session: {str(e)}") + return { + "success": False, + "error": str(e) + } + +# Workflow Orchestration Tools + +@mcp.tool() +async def workflow_trigger( + workflow_name: str = Field(..., description="Name of the workflow to trigger"), + parameters: Dict[str, Any] = Field(default_factory=dict, description="Workflow parameters"), + session_id: Optional[str] = Field(None, description="Associated session ID") +) -> Dict[str, Any]: + """Trigger a predefined workflow""" + try: + workflow_id = str(uuid4()) + + # Define workflow templates + workflows = { + "feature_implementation": [ + {"step": "requirements_analysis", "agent": "claude"}, + {"step": "backend_implementation", "agent": "backend-specialist"}, + {"step": "frontend_implementation", "agent": "ui-specialist"}, + {"step": "testing", "agent": "test-specialist"}, + {"step": "documentation", "agent": "claude"} + ], + "bug_fix": [ + {"step": "bug_analysis", "agent": "claude"}, + {"step": "fix_implementation", "agent": "backend-specialist"}, + {"step": "testing", "agent": "test-specialist"} + ], + "code_review": [ + {"step": "automated_checks", "agent": "claude"}, + {"step": "security_review", "agent": "backend-specialist"}, + {"step": "ui_review", "agent": "ui-specialist"} + ] + } + + if workflow_name not in workflows: + return { + "success": False, + "error": f"Unknown workflow: {workflow_name}", + "available_workflows": list(workflows.keys()) + } + + workflow_instance = { + "id": workflow_id, + "name": workflow_name, + "parameters": parameters, + "session_id": session_id, + "steps": workflows[workflow_name], + "current_step": 0, + "status": "running", + "started_at": datetime.utcnow().isoformat(), + "results": [] + } + + # Store workflow state + if redis_client: + await redis_client.setex( + f"workflow:{workflow_id}", + 3600 * 24, + json.dumps(workflow_instance) + ) + + return { + "success": True, + "workflow_id": workflow_id, + "message": f"Started workflow '{workflow_name}'", + "workflow": workflow_instance + } + + except Exception as e: + logger.error(f"Failed to trigger workflow: {str(e)}") + return { + "success": False, + "error": str(e) + } + +# Task Routing Tools + +@mcp.tool() +async def route_task_to_agent( + task_id: str = Field(..., description="Task ID to route"), + task_type: str = Field(..., description="Type of task (e.g., 'frontend', 'backend', 'testing')"), + agent_selector: str = Field("auto", description="Selection mode: 'auto' or 'manual'"), + agent_id: Optional[str] = Field(None, description="Specific agent ID for manual selection"), + complexity: str = Field("medium", description="Task complexity: low, medium, high") +) -> Dict[str, Any]: + """Route a task to the most appropriate agent""" + try: + if agent_selector == "manual" and agent_id: + selected_agent = agent_id + else: + # Auto-select based on task type and agent capabilities + candidates = [] + for agent, info in AGENT_REGISTRY.items(): + if task_type in info["capabilities"]: + candidates.append(agent) + + if not candidates: + # Fallback to general agents + candidates = ["claude", "openai"] + + # Simple load balancing (in production, check actual workload) + selected_agent = candidates[0] + + routing_decision = { + "task_id": task_id, + "task_type": task_type, + "assigned_agent": selected_agent, + "complexity": complexity, + "routing_timestamp": datetime.utcnow().isoformat(), + "routing_reason": f"Best match for {task_type} tasks" + } + + # Record routing decision + if SUPABASE_URL and SUPABASE_KEY: + async with aiohttp.ClientSession() as session: + url = f"{SUPABASE_URL}/rest/v1/task_routing" + headers = await get_supabase_headers() + + await session.post(url, json=routing_decision, headers=headers) + + return { + "success": True, + "routing_decision": routing_decision, + "message": f"Task {task_id} routed to {selected_agent}" + } + + except Exception as e: + logger.error(f"Failed to route task: {str(e)}") + return { + "success": False, + "error": str(e) + } + +@mcp.tool() +async def get_agent_workload( + agent_type: Optional[str] = Field(None, description="Specific agent type to check") +) -> Dict[str, Any]: + """Check agent availability and current workload""" + try: + workload_info = {} + + # In production, this would query actual agent status + # For now, return mock data based on agent registry + agents_to_check = [agent_type] if agent_type else list(AGENT_REGISTRY.keys()) + + for agent in agents_to_check: + if agent in AGENT_REGISTRY: + workload_info[agent] = { + "capabilities": AGENT_REGISTRY[agent]["capabilities"], + "max_concurrent_tasks": AGENT_REGISTRY[agent]["max_concurrent_tasks"], + "current_tasks": 0, # Would query actual data + "availability": "available", + "estimated_wait_time": 0 + } + + return { + "success": True, + "workload": workload_info, + "timestamp": datetime.utcnow().isoformat() + } + + except Exception as e: + logger.error(f"Failed to get agent workload: {str(e)}") + return { + "success": False, + "error": str(e) + } + +# Continuation Management Tools + +@mcp.tool() +async def continuation_create( + session_id: str = Field(..., description="Session ID to create continuation for"), + title: str = Field(..., description="Title for the continuation"), + description: str = Field(..., description="Description of work completed and next steps"), + files_modified: List[str] = Field(default_factory=list, description="List of files modified"), + dependencies: List[str] = Field(default_factory=list, description="Dependencies or blockers"), + next_agent_type: Optional[str] = Field(None, description="Suggested agent type for next phase") +) -> Dict[str, Any]: + """Create a session continuation for handoff or resumption""" + try: + if session_id not in active_sessions: + return {"success": False, "error": "Session not found"} + + continuation_id = str(uuid4()) + session = active_sessions[session_id] + + continuation = { + "id": continuation_id, + "session_id": session_id, + "title": title, + "description": description, + "files_modified": files_modified, + "dependencies": dependencies, + "next_agent_type": next_agent_type, + "created_at": datetime.utcnow().isoformat(), + "created_by": session["agent_type"], + "module_name": session["module_name"], + "project_id": session.get("project_id") + } + + # Store in Redis + if redis_client: + await redis_client.setex( + f"continuation:{continuation_id}", + 3600 * 24 * 7, # 7 day TTL + json.dumps(continuation) + ) + + # Store in Supabase + if SUPABASE_URL and SUPABASE_KEY: + async with aiohttp.ClientSession() as http_session: + url = f"{SUPABASE_URL}/rest/v1/session_continuations" + headers = await get_supabase_headers() + + await http_session.post(url, json=continuation, headers=headers) + + return { + "success": True, + "continuation_id": continuation_id, + "message": f"Created continuation '{title}'", + "continuation": continuation + } + + except Exception as e: + logger.error(f"Failed to create continuation: {str(e)}") + return { + "success": False, + "error": str(e) + } + +@mcp.tool() +async def continuation_list( + session_id: Optional[str] = Field(None, description="Filter by session ID"), + project_id: Optional[str] = Field(None, description="Filter by project ID"), + limit: int = Field(10, description="Maximum number of continuations to return") +) -> Dict[str, Any]: + """List session continuations with optional filtering""" + try: + continuations = [] + + if SUPABASE_URL and SUPABASE_KEY: + async with aiohttp.ClientSession() as session: + url = f"{SUPABASE_URL}/rest/v1/session_continuations" + headers = await get_supabase_headers() + + # Build query + params = {"limit": limit} + if session_id: + params["session_id"] = f"eq.{session_id}" + if project_id: + params["project_id"] = f"eq.{project_id}" + + async with session.get(url, params=params, headers=headers) as resp: + if resp.status == 200: + continuations = await resp.json() + + return { + "success": True, + "continuations": continuations, + "count": len(continuations) + } + + except Exception as e: + logger.error(f"Failed to list continuations: {str(e)}") + return { + "success": False, + "error": str(e) + } + +# Branch Management Tools + +@mcp.tool() +async def branch_create( + branch_name: str = Field(..., description="Name for the new branch"), + base_branch: str = Field("main", description="Base branch to create from"), + session_id: Optional[str] = Field(None, description="Associated session ID"), + auto_push: bool = Field(True, description="Automatically push to remote") +) -> Dict[str, Any]: + """Create and manage a new development branch""" + try: + # This would integrate with GitHub MCP for actual branch operations + branch_info = { + "name": branch_name, + "base": base_branch, + "session_id": session_id, + "created_at": datetime.utcnow().isoformat(), + "status": "created" + } + + # Store branch metadata + if redis_client and session_id: + await redis_client.hset( + f"session:{session_id}", + "branch_name", + branch_name + ) + + return { + "success": True, + "message": f"Created branch '{branch_name}' from '{base_branch}'", + "branch": branch_info + } + + except Exception as e: + logger.error(f"Failed to create branch: {str(e)}") + return { + "success": False, + "error": str(e) + } + +@mcp.tool() +async def branch_pr_create( + branch_name: str = Field(..., description="Branch to create PR from"), + title: str = Field(..., description="PR title"), + body: str = Field(..., description="PR description"), + session_id: Optional[str] = Field(None, description="Associated session ID"), + auto_link_issues: bool = Field(True, description="Automatically link related issues") +) -> Dict[str, Any]: + """Create a pull request with automatic linking and context""" + try: + # Gather session context if available + context = {} + if session_id and session_id in active_sessions: + session = active_sessions[session_id] + context = { + "module": session["module_name"], + "started_at": session["started_at"], + "checkpoints": len(session.get("checkpoints", [])), + "handoffs": len(session.get("handoffs", [])) + } + + pr_info = { + "branch": branch_name, + "title": title, + "body": body, + "context": context, + "created_at": datetime.utcnow().isoformat() + } + + # This would integrate with GitHub MCP for actual PR creation + + return { + "success": True, + "message": f"Created PR '{title}' from branch '{branch_name}'", + "pr": pr_info + } + + except Exception as e: + logger.error(f"Failed to create PR: {str(e)}") + return { + "success": False, + "error": str(e) + } + +# Sprint Management Tools + +@mcp.tool() +async def sprint_create( + name: str = Field(..., description="Sprint name"), + start_date: str = Field(..., description="Sprint start date (ISO format)"), + end_date: str = Field(..., description="Sprint end date (ISO format)"), + project_id: str = Field(..., description="Project ID from Supabase"), + goals: List[str] = Field(default_factory=list, description="Sprint goals") +) -> Dict[str, Any]: + """Create a new sprint with goals and timeline""" + try: + sprint_id = str(uuid4()) + + sprint = { + "id": sprint_id, + "name": name, + "start_date": start_date, + "end_date": end_date, + "project_id": project_id, + "goals": goals, + "status": "planning", + "created_at": datetime.utcnow().isoformat() + } + + # Store in Supabase + if SUPABASE_URL and SUPABASE_KEY: + async with aiohttp.ClientSession() as session: + url = f"{SUPABASE_URL}/rest/v1/sprints" + headers = await get_supabase_headers() + + await session.post(url, json=sprint, headers=headers) + + return { + "success": True, + "sprint_id": sprint_id, + "message": f"Created sprint '{name}'", + "sprint": sprint + } + + except Exception as e: + logger.error(f"Failed to create sprint: {str(e)}") + return { + "success": False, + "error": str(e) + } + +@mcp.tool() +async def sprint_assign_task( + task_id: str = Field(..., description="Task ID to assign"), + sprint_id: str = Field(..., description="Sprint ID to assign to"), + priority: str = Field("medium", description="Task priority in sprint") +) -> Dict[str, Any]: + """Assign a task to a sprint with priority""" + try: + assignment = { + "task_id": task_id, + "sprint_id": sprint_id, + "priority": priority, + "assigned_at": datetime.utcnow().isoformat() + } + + # Update task in Supabase + if SUPABASE_URL and SUPABASE_KEY: + async with aiohttp.ClientSession() as session: + url = f"{SUPABASE_URL}/rest/v1/tasks" + headers = await get_supabase_headers() + + update_data = { + "sprint_id": sprint_id, + "sprint_priority": priority + } + + await session.patch( + f"{url}?id=eq.{task_id}", + json=update_data, + headers=headers + ) + + return { + "success": True, + "message": f"Assigned task {task_id} to sprint {sprint_id}", + "assignment": assignment + } + + except Exception as e: + logger.error(f"Failed to assign task to sprint: {str(e)}") + return { + "success": False, + "error": str(e) + } + +# Documentation Workflow Tools + +@mcp.tool() +async def docs_workflow_start( + project_id: str = Field(..., description="Project ID to document"), + watch_paths: List[str] = Field(default_factory=list, description="Paths to watch for changes"), + auto_generate: bool = Field(True, description="Automatically generate docs on changes") +) -> Dict[str, Any]: + """Start automated documentation workflow""" + try: + workflow_id = str(uuid4()) + + docs_workflow = { + "id": workflow_id, + "project_id": project_id, + "watch_paths": watch_paths, + "auto_generate": auto_generate, + "status": "running", + "started_at": datetime.utcnow().isoformat() + } + + # Store workflow state + if redis_client: + await redis_client.setex( + f"docs_workflow:{workflow_id}", + 3600 * 24, + json.dumps(docs_workflow) + ) + + return { + "success": True, + "workflow_id": workflow_id, + "message": "Started documentation workflow", + "workflow": docs_workflow + } + + except Exception as e: + logger.error(f"Failed to start docs workflow: {str(e)}") + return { + "success": False, + "error": str(e) + } + +# Coordination Tools + +@mcp.tool() +async def coordinate_multi_agent_task( + task_description: str = Field(..., description="Description of the complex task"), + required_capabilities: List[str] = Field(..., description="List of required capabilities"), + project_id: str = Field(..., description="Project ID from Supabase") +) -> Dict[str, Any]: + """Coordinate a complex task requiring multiple agents""" + try: + coordination_id = str(uuid4()) + + # Analyze required capabilities and create sub-tasks + sub_tasks = [] + for capability in required_capabilities: + # Find agents with this capability + capable_agents = [ + agent for agent, info in AGENT_REGISTRY.items() + if capability in info["capabilities"] + ] + + if capable_agents: + sub_task = { + "id": str(uuid4()), + "capability": capability, + "assigned_agents": capable_agents, + "status": "pending" + } + sub_tasks.append(sub_task) + + coordination_plan = { + "id": coordination_id, + "task_description": task_description, + "required_capabilities": required_capabilities, + "sub_tasks": sub_tasks, + "project_id": project_id, + "status": "planned", + "created_at": datetime.utcnow().isoformat() + } + + # Store coordination plan + if redis_client: + await redis_client.setex( + f"coordination:{coordination_id}", + 3600 * 24, + json.dumps(coordination_plan) + ) + + return { + "success": True, + "coordination_id": coordination_id, + "plan": coordination_plan, + "message": f"Created coordination plan with {len(sub_tasks)} sub-tasks" + } + + except Exception as e: + logger.error(f"Failed to coordinate multi-agent task: {str(e)}") + return { + "success": False, + "error": str(e) + } + +# Initialize server +def main(): + """Initialize and run the MCP server""" + # Initialize Redis in a separate event loop + loop = asyncio.new_event_loop() + loop.run_until_complete(init_redis()) + loop.close() + + # Get port from environment or use default + port = int(os.getenv('ROUTING_MCP_PORT', '8026')) + + logger.info(f"Starting Routing MCP Server on port {port}") + + # Run the FastMCP server + mcp.run( + transport="streamable-http", + host="0.0.0.0", + port=port, + path="/" + ) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/servers/start_all_http_servers.sh b/servers/start_all_http_servers.sh new file mode 100755 index 0000000..95b7fe8 --- /dev/null +++ b/servers/start_all_http_servers.sh @@ -0,0 +1,112 @@ +#!/bin/bash + +# Start all MCP HTTP servers for DevLoop/Synergy +# This script starts all 18 HTTP MCP servers + +set -e + +# Colors for output +GREEN='\033[0;32m' +RED='\033[0;31m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +# Auto-detect project root (works with devloop3, Synergy, or DevLoopAI) +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +PROJECT_ROOT="$(dirname "$(dirname "$SCRIPT_DIR")")" +PROJECT_NAME="$(basename "$PROJECT_ROOT")" + +echo "🔍 Detected project: $PROJECT_NAME at $PROJECT_ROOT" + +# Set paths relative to project root +MCP_SERVERS_DIR="$SCRIPT_DIR" +CONFIG_DIR="$PROJECT_ROOT/configs" +VENV_PATH="$PROJECT_ROOT/.venv" + +# Check if venv exists +if [ ! -d "$VENV_PATH" ]; then + echo -e "${RED}Error: Virtual environment not found at $VENV_PATH${NC}" + echo "Please run: python3 -m venv $VENV_PATH" + exit 1 +fi + +# Use project's Python from venv +PYTHON_BIN="$VENV_PATH/bin/python" + +# Load environment variables +if [[ -f "$CONFIG_DIR/api-keys.env" ]]; then + source "$CONFIG_DIR/api-keys.env" +else + echo -e "${RED}Warning: api-keys.env not found at $CONFIG_DIR${NC}" +fi + +# Function to start a server +start_server() { + local server_name=$1 + local server_dir=$2 + local port=$3 + local main_file=$4 + + echo -e "${YELLOW}Starting ${server_name} on port ${port}...${NC}" + + cd "$server_dir" + + # Kill any existing process on the port + lsof -ti:$port | xargs kill -9 2>/dev/null || true + + # Start the server using project's Python + if [[ -f "$main_file" ]]; then + nohup $PYTHON_BIN "$main_file" > server.log 2>&1 & + sleep 2 + + # Check if server started + if lsof -i:$port > /dev/null 2>&1; then + echo -e "${GREEN}✓ ${server_name} started successfully${NC}" + else + echo -e "${RED}✗ ${server_name} failed to start${NC}" + fi + else + echo -e "${RED}✗ ${server_name} main file not found: $main_file${NC}" + fi +} + +echo "🚀 Starting all MCP HTTP servers for $PROJECT_NAME..." +echo "================================================" + +# Phase 1 - Core Services +echo -e "\n${YELLOW}Phase 1: Core Services${NC}" +start_server "Brave Search" "$MCP_SERVERS_DIR/brave-search-http-mcp" 8003 "src/brave_search_server.py" +start_server "Filesystem" "$MCP_SERVERS_DIR/filesystem-http-mcp" 8006 "src/filesystem_server.py" +start_server "Memory" "$MCP_SERVERS_DIR/memory-http-mcp" 8007 "src/memory_server.py" + +# Phase 2 - AI/Dev Services +echo -e "\n${YELLOW}Phase 2: AI/Development Services${NC}" +start_server "Vercel v0" "$MCP_SERVERS_DIR/vercel-v0-mcp" 8010 "src/vercel_v0_server.py" +start_server "GitHub" "$MCP_SERVERS_DIR/github-http-mcp" 8011 "src/github_server.py" +start_server "OpenAI Tools" "$MCP_SERVERS_DIR/openai-tools-http-mcp" 8012 "src/openai_tools_server.py" +start_server "Supabase" "$MCP_SERVERS_DIR/supabase-http-mcp" 8013 "src/supabase_server.py" +start_server "Gemini" "$MCP_SERVERS_DIR/gemini-http-mcp" 8014 "src/gemini_server.py" +start_server "Anthropic Comprehensive" "$MCP_SERVERS_DIR/anthropic-comprehensive-http-mcp" 8015 "src/anthropic_comprehensive_server.py" +start_server "Sequential Thinking" "$MCP_SERVERS_DIR/sequential-thinking-http-mcp" 8016 "src/sequential_thinking_server.py" + +# Phase 3 - External Integrations +echo -e "\n${YELLOW}Phase 3: External Integrations${NC}" +start_server "Slack" "$MCP_SERVERS_DIR/slack-http-mcp" 8017 "src/slack_server.py" +start_server "Redis" "$MCP_SERVERS_DIR/redis-http-mcp" 8018 "src/redis_server.py" +start_server "Context7" "$MCP_SERVERS_DIR/context7-http-mcp" 8019 "src/context7_server.py" + +# Phase 4 - Automation/Infrastructure +echo -e "\n${YELLOW}Phase 4: Automation & Infrastructure${NC}" +start_server "Docker" "$MCP_SERVERS_DIR/docker-http-mcp" 8020 "docker_server.py" +start_server "Everything" "$MCP_SERVERS_DIR/everything-http-mcp" 8021 "src/everything_server.py" +start_server "Fetch" "$MCP_SERVERS_DIR/fetch-http-mcp" 8022 "src/fetch_server.py" +start_server "Browserbase" "$MCP_SERVERS_DIR/browserbase-http-mcp" 8023 "browserbase_server.py" +start_server "Hostinger" "$MCP_SERVERS_DIR/hostinger-http-mcp" 8024 "src/hostinger_server.py" +start_server "Vercel Deploy" "$MCP_SERVERS_DIR/vercel-deploy-http-mcp" 8025 "src/vercel_deploy_server.py" + +echo -e "\n${GREEN}================================================${NC}" +echo -e "${GREEN}All MCP servers startup complete for $PROJECT_NAME!${NC}" +echo -e "\nTo check server status:" +echo -e " netstat -tlnp 2>/dev/null | grep -E '80[0-9]{2}' | grep python | sort" +echo -e "\nTo view logs:" +echo -e " tail -f $MCP_SERVERS_DIR/*/server.log" \ No newline at end of file diff --git a/servers/supabase-http-mcp/src/supabase_server.py b/servers/supabase-http-mcp/src/supabase_server.py index 8a8e0bd..2f58b3b 100644 --- a/servers/supabase-http-mcp/src/supabase_server.py +++ b/servers/supabase-http-mcp/src/supabase_server.py @@ -13,6 +13,22 @@ from datetime import datetime import asyncio +# Parameter validation helper +def validate_and_convert_params(**kwargs): + """Convert string parameters to proper types for MCP compatibility""" + converted = {} + for key, value in kwargs.items(): + if isinstance(value, str): + # Try to parse JSON strings + if value.startswith('[') or value.startswith('{'): + try: + converted[key] = json.loads(value) + continue + except json.JSONDecodeError: + pass + converted[key] = value + return converted + # Supabase client from supabase import create_client, Client import asyncpg @@ -221,16 +237,20 @@ async def execute_sql( Query results and metadata """ try: - # Use the Supabase client to execute SQL - response = supabase_client.supabase.rpc('exec_sql', { - 'query': query - }) + # Execute SQL directly using postgrest + response = supabase_client.supabase.postgrest.rpc('sql', {'query': query}) + result = response.execute() + + # Ensure proper JSON serialization + results = result.data if hasattr(result, 'data') else [] + if not isinstance(results, (list, dict)): + results = str(results) return { "success": True, "project_id": project_id, "query": query[:100] + "..." if len(query) > 100 else query, - "results": response.data if hasattr(response, 'data') else response, + "results": results, "executed_at": datetime.utcnow().isoformat() } except Exception as e: @@ -270,13 +290,30 @@ async def list_tables( ORDER BY schemaname, tablename; """ - response = supabase_client.supabase.rpc('exec_sql', {'query': query}) + # Use direct table query instead of RPC + try: + # Query pg_tables directly through the information_schema + response = supabase_client.supabase.table('information_schema.tables').select('*').eq('table_schema', 'public').execute() + tables = response.data if response.data else [] + except: + # Fallback: use a simpler approach + tables = [] + for schema in schemas: + try: + # Get table names from pg_tables + result = supabase_client.supabase.postgrest.rpc('sql', { + 'query': f"SELECT tablename as table_name FROM pg_tables WHERE schemaname = '{schema}'" + }).execute() + if hasattr(result, 'data') and result.data: + tables.extend(result.data) + except Exception as e: + logger.warning(f"Failed to get tables for schema {schema}: {e}") return { "success": True, "project_id": project_id, "schemas": schemas, - "tables": response.data if hasattr(response, 'data') else response + "tables": tables } except Exception as e: logger.error(f"Failed to list tables: {e}") @@ -326,7 +363,7 @@ async def apply_migration( @mcp.tool() async def insert_data( table: str, - data: Union[Dict[str, Any], List[Dict[str, Any]]], + data: Union[Dict[str, Any], List[Dict[str, Any]], str], project_id: Optional[str] = None ) -> Dict[str, Any]: """ @@ -334,13 +371,20 @@ async def insert_data( Args: table: Table name - data: Data to insert (single object or array) + data: Data to insert (single object or array, or JSON string) project_id: Optional project ID for logging Returns: Inserted data with any generated fields """ try: + # Handle parameter validation - convert JSON string to object/array + if isinstance(data, str): + try: + data = json.loads(data) + except json.JSONDecodeError: + raise ValueError(f"Invalid JSON string provided for data parameter: {data}") + response = supabase_client.supabase.table(table).insert(data).execute() return { @@ -751,28 +795,36 @@ async def generate_typescript_types(project_id: str) -> Dict[str, Any]: ORDER BY table_name, ordinal_position; """ - response = supabase_client.supabase.rpc('exec_sql', {'query': query}) - columns = response.data if hasattr(response, 'data') else response + try: + # Use information_schema directly + response = supabase_client.supabase.table('information_schema.columns').select('table_name,column_name,data_type,is_nullable').eq('table_schema', 'public').execute() + columns = response.data if response.data else [] + except: + # Fallback to empty response + columns = [] # Generate basic TypeScript interfaces tables = {} - for col in columns: - table = col['table_name'] - if table not in tables: - tables[table] = [] - - ts_type = { - 'varchar': 'string', - 'text': 'string', - 'integer': 'number', - 'bigint': 'number', - 'boolean': 'boolean', - 'timestamp': 'string', - 'uuid': 'string' - }.get(col['data_type'], 'any') - - nullable = '?' if col['is_nullable'] == 'YES' else '' - tables[table].append(f" {col['column_name']}{nullable}: {ts_type};") + if isinstance(columns, list): + for col in columns: + if isinstance(col, dict): + table = col.get('table_name', 'unknown') + if table not in tables: + tables[table] = [] + + ts_type = { + 'varchar': 'string', + 'text': 'string', + 'integer': 'number', + 'bigint': 'number', + 'boolean': 'boolean', + 'timestamp': 'string', + 'uuid': 'string' + }.get(col.get('data_type', 'text'), 'any') + + nullable = '?' if col.get('is_nullable') == 'YES' else '' + column_name = col.get('column_name', 'unknown') + tables[table].append(f" {column_name}{nullable}: {ts_type};") # Build TypeScript interfaces interfaces = [] From 9eb1ec60dd113cdf5b758f5a80a5c1e00e043c27 Mon Sep 17 00:00:00 2001 From: Your Name Date: Sat, 28 Jun 2025 19:31:59 -0700 Subject: [PATCH 2/5] feat: Add write_to_file parameter and v0_update_file function to v0 server --- .../vercel-v0-mcp/src/vercel_v0_server.py | 163 +++++++++++++++--- 1 file changed, 135 insertions(+), 28 deletions(-) diff --git a/mcp-servers/http/vercel-v0-mcp/src/vercel_v0_server.py b/mcp-servers/http/vercel-v0-mcp/src/vercel_v0_server.py index 20fa838..175d860 100644 --- a/mcp-servers/http/vercel-v0-mcp/src/vercel_v0_server.py +++ b/mcp-servers/http/vercel-v0-mcp/src/vercel_v0_server.py @@ -849,7 +849,9 @@ async def generate_component( ui_library: Optional[str] = None, output_format: Optional[str] = "single_file", project_path: Optional[str] = None, - auto_setup: Optional[bool] = True + auto_setup: Optional[bool] = True, + write_to_file: Optional[bool] = False, + target_directory: Optional[str] = None ) -> Dict[str, Any]: """ Generate a UI component using Vercel v0 API with intelligent dependency detection @@ -863,6 +865,8 @@ async def generate_component( output_format: 'single_file' or 'multi_file' project_path: Path to analyze for dependencies (defaults to current directory) auto_setup: Automatically create missing config files (default: True) + write_to_file: Whether to write the generated component to files (default: False) + target_directory: Where to create the component files (auto-detected if not provided) Returns: Generated component code with metadata and project analysis @@ -900,36 +904,46 @@ async def generate_component( stream=True ) - if output_format == "multi_file": - # Return structured file data - return { - "success": True, - "files": result['files'], - "generation_id": result['generation_id'], - "component_name": component_name or "Component", - "tech_stack_used": tech_stack, - "project_analysis": project_analysis, - "auto_detected": { - "framework": not framework, - "styling": not styling, - "ui_library": not ui_library + # Handle file writing if requested + if write_to_file: + file_creation_result = project_analyzer.create_component_files( + result['files'], + target_directory + ) + + if not file_creation_result["success"]: + return { + "success": False, + "error": "Component generated but file creation failed", + "file_errors": file_creation_result["errors"], + "partial_files": file_creation_result["created_files"] } + + response = { + "success": True, + "generation_id": result['generation_id'], + "component_name": component_name or "Component", + "tech_stack_used": tech_stack, + "project_analysis": project_analysis, + "auto_detected": { + "framework": not framework, + "styling": not styling, + "ui_library": not ui_library } + } + + if write_to_file: + response["files_created"] = file_creation_result["created_files"] + response["target_directory"] = file_creation_result["target_directory"] + + if output_format == "multi_file": + # Return structured file data + response["files"] = result['files'] else: # Return as single file - return { - "success": True, - "code": result['content'], - "generation_id": result['generation_id'], - "component_name": component_name or "Component", - "tech_stack_used": tech_stack, - "project_analysis": project_analysis, - "auto_detected": { - "framework": not framework, - "styling": not styling, - "ui_library": not ui_library - } - } + response["code"] = result['content'] + + return response except Exception as e: error_msg = f"Failed to generate component: {str(e)}" @@ -1281,7 +1295,9 @@ async def generate_ui_from_data( async def improve_component( existing_code: str, improvements: str, - maintain_structure: Optional[bool] = True + maintain_structure: Optional[bool] = True, + write_to_file: Optional[bool] = False, + file_path: Optional[str] = None ) -> Dict[str, Any]: """ Improve an existing component using v0 @@ -1290,6 +1306,8 @@ async def improve_component( existing_code: The current component code improvements: Description of improvements needed maintain_structure: Whether to keep the same structure + write_to_file: Whether to write the improved code back to file + file_path: Path to the file to update (required if write_to_file is True) Returns: Improved component code @@ -1317,6 +1335,43 @@ async def improve_component( stream=True ) + # Write to file if requested + if write_to_file: + if not file_path: + return { + "success": False, + "error": "file_path is required when write_to_file is True" + } + + try: + file_full_path = Path.cwd() / file_path + file_full_path.parent.mkdir(parents=True, exist_ok=True) + + # Extract code from result + code = result['content'] + if result['files']: + # Use the first file's content if multiple files + code = result['files'][0]['content'] + + # Write improved code to file + with open(file_full_path, 'w', encoding='utf-8', newline='\n') as f: + f.write(code.replace('\r\n', '\n').replace('\r', '\n')) + + return { + "success": True, + "code": code, + "improvements_applied": improvements, + "generation_id": result['generation_id'], + "file_updated": str(file_path) + } + except Exception as write_error: + return { + "success": False, + "error": f"Component improved but file update failed: {str(write_error)}", + "code": result['content'], + "generation_id": result['generation_id'] + } + return { "success": True, "code": result['content'], @@ -1331,6 +1386,58 @@ async def improve_component( "success": False, "error": error_msg } + +@mcp.tool() +async def v0_update_file( + file_path: str, + improvements: str, + maintain_structure: Optional[bool] = True, + project_path: Optional[str] = None +) -> Dict[str, Any]: + """ + Update an existing file in-place using v0 to improve it + + Args: + file_path: Path to the file to update (relative to project_path) + improvements: Description of improvements needed + maintain_structure: Whether to keep the same structure + project_path: Path to the project (defaults to current directory) + + Returns: + Update result with improved code + """ + try: + # Read the existing file + base_path = Path(project_path) if project_path else Path.cwd() + full_path = base_path / file_path + + if not full_path.exists(): + return { + "success": False, + "error": f"File not found: {file_path}" + } + + with open(full_path, 'r', encoding='utf-8') as f: + existing_code = f.read() + + # Improve the component + result = await improve_component( + existing_code=existing_code, + improvements=improvements, + maintain_structure=maintain_structure, + write_to_file=True, + file_path=file_path + ) + + return result + + except Exception as e: + error_msg = f"Failed to update file: {str(e)}" + logger.error(error_msg) + return { + "success": False, + "error": error_msg + } @mcp.tool() async def convert_design_to_code( From 70941f736bcf19796d47b3c285f6c03328f44aad Mon Sep 17 00:00:00 2001 From: Your Name Date: Sat, 28 Jun 2025 19:33:29 -0700 Subject: [PATCH 3/5] feat: Add v0_create_feature function for complete feature generation --- .../vercel-v0-mcp/src/vercel_v0_server.py | 140 ++++++++++++++++++ 1 file changed, 140 insertions(+) diff --git a/mcp-servers/http/vercel-v0-mcp/src/vercel_v0_server.py b/mcp-servers/http/vercel-v0-mcp/src/vercel_v0_server.py index 175d860..ccf25bd 100644 --- a/mcp-servers/http/vercel-v0-mcp/src/vercel_v0_server.py +++ b/mcp-servers/http/vercel-v0-mcp/src/vercel_v0_server.py @@ -1492,6 +1492,146 @@ async def convert_design_to_code( "success": False, "error": error_msg } + +@mcp.tool() +async def v0_create_feature( + feature_description: str, + components_needed: List[str], + api_endpoints: Optional[List[str]] = None, + project_path: Optional[str] = None, + create_files: Optional[bool] = True +) -> Dict[str, Any]: + """ + Create a complete feature with multiple components and optional API routes + + Args: + feature_description: Description of the feature to create + components_needed: List of component names/descriptions needed + api_endpoints: Optional list of API endpoints to create + project_path: Path to the project (defaults to current directory) + create_files: Whether to create the files (default: True) + + Returns: + Complete feature implementation with all components and APIs + """ + try: + # Initialize project analyzer + project_analyzer = ProjectAnalyzer(project_path) if project_path else analyzer + project_analysis = project_analyzer.analyze_project() + + # Determine tech stack + tech_stack = { + "framework": project_analysis["framework"] or "Next.js 14 App Router", + "language": "TypeScript" if project_analysis["typescript"] else "JavaScript", + "styling": project_analysis["styling"][0] if project_analysis["styling"] else "Tailwind CSS", + "ui_library": project_analysis["ui_libraries"][0] if project_analysis["ui_libraries"] else "shadcn/ui", + "state": "React hooks" + } + + generated_files = [] + + # Generate each component + for component_desc in components_needed: + component_prompt = f"""Create a component for this feature: {feature_description} + +Component needed: {component_desc} + +Context of other components in this feature: +{', '.join(components_needed)} + +Requirements: +- Make sure components can work together +- Use consistent prop types and interfaces +- Export necessary types for other components to use +- Include proper error handling +- Add loading states where appropriate""" + + try: + result = await generator.generate_component( + prompt=component_prompt, + tech_stack=tech_stack, + stream=True + ) + + # Add generated files to list + generated_files.extend(result['files']) + + except Exception as comp_error: + logger.error(f"Failed to generate component {component_desc}: {comp_error}") + return { + "success": False, + "error": f"Failed to generate component {component_desc}: {str(comp_error)}", + "partial_files": generated_files + } + + # Generate API endpoints if requested + if api_endpoints: + for endpoint_desc in api_endpoints: + api_prompt = f"""Create a Next.js 14 API route for this feature: {feature_description} + +API endpoint needed: {endpoint_desc} + +Components using this API: +{', '.join(components_needed)} + +Requirements: +- Use App Router route handlers (route.ts) +- Include proper TypeScript types +- Add input validation +- Handle errors gracefully +- Return appropriate status codes +- Include CORS headers if needed +- Add rate limiting comments where appropriate""" + + try: + api_result = await generator.generate_component( + prompt=api_prompt, + stream=True + ) + + # Extract endpoint name from description + endpoint_name = endpoint_desc.lower().replace(' ', '-') + + # Add API route file + generated_files.append({ + "name": "route.ts", + "content": api_result['content'], + "path": f"app/api/{endpoint_name}/route.ts" + }) + + except Exception as api_error: + logger.error(f"Failed to generate API {endpoint_desc}: {api_error}") + + # Create files if requested + if create_files and generated_files: + file_creation_result = project_analyzer.create_component_files(generated_files) + + if not file_creation_result["success"]: + return { + "success": False, + "error": "Feature generated but file creation failed", + "file_errors": file_creation_result["errors"], + "partial_files": file_creation_result["created_files"], + "generated_files": generated_files + } + + return { + "success": True, + "feature_description": feature_description, + "components_generated": len([f for f in generated_files if 'component' in f.get('path', '').lower()]), + "apis_generated": len([f for f in generated_files if 'api' in f.get('path', '').lower()]), + "files": generated_files, + "tech_stack_used": tech_stack, + "files_created": file_creation_result["created_files"] if create_files else None + } + + except Exception as e: + error_msg = f"Failed to create feature: {str(e)}" + logger.error(error_msg) + return { + "success": False, + "error": error_msg + } if __name__ == "__main__": # Run as HTTP server on specified port From 88088c4cb30ac6b53cfdef2b76b03e1a47a7c099 Mon Sep 17 00:00:00 2001 From: Your Name Date: Sat, 28 Jun 2025 19:34:27 -0700 Subject: [PATCH 4/5] docs: Add summary of v0 server improvements --- V0_SERVER_IMPROVEMENTS_SUMMARY.md | 84 +++++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) create mode 100644 V0_SERVER_IMPROVEMENTS_SUMMARY.md diff --git a/V0_SERVER_IMPROVEMENTS_SUMMARY.md b/V0_SERVER_IMPROVEMENTS_SUMMARY.md new file mode 100644 index 0000000..2dfdefc --- /dev/null +++ b/V0_SERVER_IMPROVEMENTS_SUMMARY.md @@ -0,0 +1,84 @@ +# V0 Server Major Improvements Summary + +## Branch: feat/v0-server-major-improvements + +### Overview +This branch contains significant improvements to the Vercel v0 MCP server to address code extraction issues and add new functionality for better file management and feature generation. + +## Key Improvements Made + +### 1. Fixed Code Extraction Issues +- **Problem**: The v0 server was saving entire AI responses including markdown artifacts and explanatory text +- **Solution**: Updated `_parse_generated_code` method with improved regex patterns to: + - Properly extract code from markdown code blocks + - Handle file markers like ` ```tsx file="path/to/file.ext" ` ` + - Remove language identifiers and closing backticks + - Support multiple files in a single response + +### 2. Enhanced Multi-File Support +- **Problem**: Generated files were all placed in the same directory regardless of specified paths +- **Solution**: Updated `create_component_files` method to: + - Use the `path` field from file_info dict when provided + - Create proper directory structures based on file paths + - Handle paths that start with `/` or `./` correctly + +### 3. Added write_to_file Parameter +- **Enhancement**: Added `write_to_file` parameter to `generate_component` tool +- **Benefits**: + - Allows direct file creation during component generation + - Reduces the need for separate file creation steps + - Includes `target_directory` parameter for flexible file placement + +### 4. Implemented v0_update_file Function +- **New Tool**: `v0_update_file` - Updates existing files in-place using v0 +- **Features**: + - Reads existing file content + - Improves it based on provided requirements + - Writes the improved code back to the file + - Maintains file structure and API compatibility + +### 5. Enhanced improve_component Function +- **Improvements**: + - Added `write_to_file` parameter to directly update files + - Added `file_path` parameter for specifying which file to update + - Better error handling for file write operations + +### 6. Added v0_create_feature Function +- **New Tool**: `v0_create_feature` - Creates complete features with multiple components +- **Capabilities**: + - Generates multiple related components for a feature + - Creates optional API endpoints + - Ensures components work together with consistent interfaces + - Handles file creation for all generated components + +## Code Quality Improvements + +### Error Handling +- Added try/catch blocks around file operations +- Proper error messages when file creation fails +- Partial success reporting when some files are created + +### Logging +- Added detailed logging for debugging code extraction +- Log file creation operations with sizes +- Log raw v0 responses for troubleshooting + +### File Management +- Consistent UTF-8 encoding with Unix line endings +- Proper path handling for cross-platform compatibility +- Directory creation with parents=True for nested structures + +## Testing Recommendations + +1. Test multi-file generation with v0 +2. Verify file paths are created correctly +3. Test the v0_update_file function on existing components +4. Test v0_create_feature with multiple components +5. Verify markdown artifacts are properly removed + +## Next Steps + +1. Copy all fixes to devloop3 repository +2. Test with real-world component generation scenarios +3. Add more sophisticated error recovery mechanisms +4. Consider adding component preview functionality \ No newline at end of file From e2337e6e0401785d74f877414c24a4261589dfdc Mon Sep 17 00:00:00 2001 From: Your Name Date: Sat, 28 Jun 2025 20:05:24 -0700 Subject: [PATCH 5/5] feat: Add enhanced error handling, validation, and rate limiting to v0 server --- .../vercel-v0-mcp/src/vercel_v0_server.py | 268 ++++++++++++++---- 1 file changed, 215 insertions(+), 53 deletions(-) diff --git a/mcp-servers/http/vercel-v0-mcp/src/vercel_v0_server.py b/mcp-servers/http/vercel-v0-mcp/src/vercel_v0_server.py index ccf25bd..1aa8042 100644 --- a/mcp-servers/http/vercel-v0-mcp/src/vercel_v0_server.py +++ b/mcp-servers/http/vercel-v0-mcp/src/vercel_v0_server.py @@ -13,6 +13,7 @@ import logging import re import subprocess +import time from typing import Dict, Any, List, Optional from datetime import datetime from pathlib import Path @@ -28,16 +29,58 @@ logger = logging.getLogger(__name__) +class RateLimiter: + """Simple rate limiter for API calls""" + def __init__(self, calls_per_minute: int = 10): + self.calls_per_minute = calls_per_minute + self.calls = [] + self.lock = asyncio.Lock() + + async def wait_if_needed(self): + """Wait if rate limit would be exceeded""" + async with self.lock: + now = time.time() + # Remove calls older than 1 minute + self.calls = [call_time for call_time in self.calls if now - call_time < 60] + + if len(self.calls) >= self.calls_per_minute: + # Calculate wait time + oldest_call = self.calls[0] + wait_time = 60 - (now - oldest_call) + 1 + logger.info(f"Rate limit reached, waiting {wait_time:.1f} seconds") + await asyncio.sleep(wait_time) + # Clean up old calls again + now = time.time() + self.calls = [call_time for call_time in self.calls if now - call_time < 60] + + # Record this call + self.calls.append(now) + + class V0ComponentGenerator: """Handles component generation using v0 API""" - def __init__(self, api_key: str): + def __init__(self, api_key: str, rate_limit: int = 10): + if not api_key: + raise ValueError("V0 API key is required") + if not api_key.startswith(('v0_', 'v1:')): + logger.warning("API key doesn't match expected v0 format (v0_* or v1:*)") + self.api_key = api_key + self.rate_limiter = RateLimiter(calls_per_minute=rate_limit) + self.retry_attempts = 3 + self.retry_delay = 5 # seconds + # v0 uses OpenAI SDK with custom base URL - self.client = AsyncOpenAI( - api_key=api_key, - base_url='https://api.v0.dev/v1' - ) + try: + self.client = AsyncOpenAI( + api_key=api_key, + base_url='https://api.v0.dev/v1', + timeout=60.0 # 60 second timeout + ) + except Exception as e: + logger.error(f"Failed to initialize v0 client: {e}") + raise ValueError(f"Failed to initialize v0 API client: {str(e)}") async def generate_component( self, @@ -47,6 +90,13 @@ async def generate_component( ) -> Dict[str, Any]: """Generate a UI component using v0 API""" + # Validate inputs + if not prompt or not prompt.strip(): + raise ValueError("Prompt cannot be empty") + + if len(prompt) > 10000: + raise ValueError("Prompt is too long (max 10000 characters)") + # Default tech stack if not provided if not tech_stack: tech_stack = { @@ -56,6 +106,22 @@ async def generate_component( "ui_library": "shadcn/ui", "state": "React hooks" } + else: + # Validate tech stack + valid_frameworks = ["Next.js 14 App Router", "Next.js 13", "React", "Remix", "Gatsby"] + valid_languages = ["TypeScript", "JavaScript"] + valid_styling = ["Tailwind CSS", "CSS Modules", "Styled Components", "Emotion", "CSS"] + valid_ui_libraries = ["shadcn/ui", "Material-UI", "Chakra UI", "Ant Design", "None"] + + framework = tech_stack.get("framework", "Next.js 14 App Router") + if framework not in valid_frameworks: + logger.warning(f"Unknown framework: {framework}, using default") + tech_stack["framework"] = "Next.js 14 App Router" + + language = tech_stack.get("language", "TypeScript") + if language not in valid_languages: + logger.warning(f"Unknown language: {language}, using TypeScript") + tech_stack["language"] = "TypeScript" # Build the system prompt system_prompt = """You are v0, an expert at building modern web applications. @@ -99,61 +165,111 @@ async def generate_component( raise async def _generate_streaming(self, messages: List[Dict[str, str]]) -> Dict[str, Any]: - """Generate component with streaming""" + """Generate component with streaming and retry logic""" + for attempt in range(self.retry_attempts): + try: + # Apply rate limiting + await self.rate_limiter.wait_if_needed() + + stream = await self.client.chat.completions.create( + model='v0-1.0-md', + messages=messages, + stream=True, + max_tokens=4000, + temperature=0.7 + ) + + full_content = '' + generation_id = '' + chunk_count = 0 + + async for chunk in stream: + chunk_count += 1 + if chunk.id: + generation_id = chunk.id + # Check if choices exist and have content + if hasattr(chunk, 'choices') and chunk.choices and len(chunk.choices) > 0: + delta = chunk.choices[0].delta + if hasattr(delta, 'content') and delta.content: + full_content += delta.content + + logger.info(f"Received {chunk_count} chunks, total content length: {len(full_content)}") + + if not full_content: + raise ValueError("No content received from v0 API streaming response") + + # Parse and validate the generated code + files = self._parse_generated_code(full_content) + if not files: + logger.warning("No valid code files extracted from v0 response") + + return { + "content": full_content, + "generation_id": generation_id, + "files": files, + "streaming": True + } + except asyncio.TimeoutError: + if attempt < self.retry_attempts - 1: + logger.warning(f"v0 API timeout on attempt {attempt + 1}, retrying...") + await asyncio.sleep(self.retry_delay) + continue + logger.error("v0 API request timed out after all retries") + raise ValueError("v0 API request timed out - please try again") + except Exception as e: + if attempt < self.retry_attempts - 1 and "rate" in str(e).lower(): + logger.warning(f"Rate limit error on attempt {attempt + 1}, retrying after delay...") + await asyncio.sleep(self.retry_delay * 2) # Longer delay for rate limits + continue + elif attempt < self.retry_attempts - 1: + logger.warning(f"Generation failed on attempt {attempt + 1}: {e}") + await asyncio.sleep(self.retry_delay) + continue + else: + logger.error(f"Streaming generation failed after {self.retry_attempts} attempts: {e}") + # Fallback to non-streaming + logger.info("Falling back to non-streaming generation") + return await self._generate_regular(messages) + + async def _generate_regular(self, messages: List[Dict[str, str]]) -> Dict[str, Any]: + """Generate component without streaming""" try: - stream = await self.client.chat.completions.create( + completion = await self.client.chat.completions.create( model='v0-1.0-md', messages=messages, - stream=True + stream=False, + max_tokens=4000, + temperature=0.7 ) - full_content = '' - generation_id = '' + # Check if choices exist + if not completion.choices or len(completion.choices) == 0: + raise ValueError("No choices returned from v0 API") + + if not completion.choices[0].message or not completion.choices[0].message.content: + raise ValueError("No content in v0 API response") - async for chunk in stream: - if chunk.id: - generation_id = chunk.id - # Check if choices exist and have content - if hasattr(chunk, 'choices') and chunk.choices and len(chunk.choices) > 0: - delta = chunk.choices[0].delta - if hasattr(delta, 'content') and delta.content: - full_content += delta.content + content = completion.choices[0].message.content - if not full_content: - raise ValueError("No content received from v0 API streaming response") + logger.info(f"Non-streaming response length: {len(content)}") + + # Parse and validate the generated code + files = self._parse_generated_code(content) + if not files: + logger.warning("No valid code files extracted from v0 response") return { - "content": full_content, - "generation_id": generation_id, - "files": self._parse_generated_code(full_content), - "streaming": True + "content": content, + "generation_id": completion.id, + "files": files, + "streaming": False } + except asyncio.TimeoutError: + logger.error("v0 API request timed out") + raise ValueError("v0 API request timed out - please try again") except Exception as e: - logger.error(f"Streaming generation failed: {e}") - # Fallback to non-streaming - logger.info("Falling back to non-streaming generation") - return await self._generate_regular(messages) - - async def _generate_regular(self, messages: List[Dict[str, str]]) -> Dict[str, Any]: - """Generate component without streaming""" - completion = await self.client.chat.completions.create( - model='v0-1.0-md', - messages=messages, - stream=False - ) - - # Check if choices exist - if not completion.choices or len(completion.choices) == 0: - raise ValueError("No choices returned from v0 API") - - content = completion.choices[0].message.content - - return { - "content": content, - "generation_id": completion.id, - "files": self._parse_generated_code(content), - "streaming": False - } + logger.error(f"Non-streaming generation failed: {e}") + raise ValueError(f"Failed to generate component: {str(e)}") def _parse_generated_code(self, code: str) -> List[Dict[str, str]]: """Parse generated code to extract multiple files if present""" @@ -506,7 +622,15 @@ def install_dependencies(self, missing_deps: List[str]) -> Dict[str, Any]: } def create_component_files(self, files: List[Dict[str, str]], target_dir: str = None) -> Dict[str, Any]: - """Create component files in the project""" + """Create component files in the project with enhanced validation""" + if not files: + return { + "success": False, + "errors": ["No files provided to create"], + "created_files": [] + } + + # Validate target directory if not target_dir: # Auto-detect best location if (self.project_path / "src" / "components").exists(): @@ -518,24 +642,62 @@ def create_component_files(self, files: List[Dict[str, str]], target_dir: str = else: # Create components directory target_dir = "components" - (self.project_path / target_dir).mkdir(exist_ok=True) + try: + (self.project_path / target_dir).mkdir(exist_ok=True) + except Exception as e: + logger.error(f"Failed to create target directory: {e}") + return { + "success": False, + "errors": [f"Failed to create directory: {str(e)}"], + "created_files": [] + } created_files = [] errors = [] - for file_info in files: + for i, file_info in enumerate(files): + if not isinstance(file_info, dict): + errors.append({ + "file": f"file_{i}", + "error": "Invalid file info format" + }) + continue + file_name = file_info.get("name", "Component.tsx") content = file_info.get("content", "") file_path_from_info = file_info.get("path", None) + # Validate file name + if not file_name or not isinstance(file_name, str): + errors.append({ + "file": f"file_{i}", + "error": "Invalid or missing file name" + }) + continue + + # Security: Prevent directory traversal + if ".." in file_name or ".." in str(file_path_from_info or ""): + errors.append({ + "file": file_name, + "error": "Invalid file path - directory traversal detected" + }) + continue + # Ensure proper file extension - if not file_name.endswith(('.tsx', '.jsx', '.ts', '.js')): + if not file_name.endswith(('.tsx', '.jsx', '.ts', '.js', '.css', '.json')): file_name += '.tsx' # Use the path from file_info if provided, otherwise use target_dir if file_path_from_info: # Handle paths that might start with / or ./ clean_path = file_path_from_info.lstrip('./') + # Additional validation + if clean_path.startswith('/'): + errors.append({ + "file": file_name, + "error": "Absolute paths not allowed" + }) + continue file_path = self.project_path / clean_path else: file_path = self.project_path / target_dir / file_name