diff --git a/next.config.ts b/next.config.ts index e9ffa30..291190c 100644 --- a/next.config.ts +++ b/next.config.ts @@ -1,7 +1,31 @@ import type { NextConfig } from "next"; const nextConfig: NextConfig = { - /* config options here */ + async headers() { + return [ + { + source: '/(.*)', + headers: [ + { + key: 'X-Content-Type-Options', + value: 'nosniff', + }, + { + key: 'X-Frame-Options', + value: 'DENY', + }, + { + key: 'X-XSS-Protection', + value: '1; mode=block', + }, + { + key: 'Referrer-Policy', + value: 'strict-origin-when-cross-origin', + }, + ], + }, + ]; + }, }; export default nextConfig; diff --git a/src/app/api/architect/chat/route.ts b/src/app/api/architect/chat/route.ts index eea2c7e..02023c5 100644 --- a/src/app/api/architect/chat/route.ts +++ b/src/app/api/architect/chat/route.ts @@ -1,6 +1,7 @@ import { NextResponse } from "next/server"; import { z } from "zod"; import { callOpenRouter, parseJSONResponse } from "@/lib/api/openrouter"; +import { rateLimit } from "@/lib/rate-limit"; // Schema for input validation const ChatRequestSchema = z.object({ @@ -64,6 +65,16 @@ IMPORTANT: export async function POST(req: Request) { try { + // 1. Rate Limiting Check + const ip = req.headers.get("x-forwarded-for") ?? "127.0.0.1"; + const { success } = rateLimit(ip, 10, 60000); // 10 reqs per min + if (!success) { + return NextResponse.json( + { error: "Too many requests. Please try again later." }, + { status: 429 } + ); + } + const body = await req.json(); // Validate request body diff --git a/src/app/api/architect/plan/route.ts b/src/app/api/architect/plan/route.ts index 73fba06..3b32401 100644 --- a/src/app/api/architect/plan/route.ts +++ b/src/app/api/architect/plan/route.ts @@ -3,6 +3,7 @@ import { z } from "zod"; import { callOpenRouter, parseJSONResponse } from "@/lib/api/openrouter"; import toolsDB from "@/data/tools_database.json"; import bestPractices from "@/data/best_practices.json"; +import { rateLimit } from "@/lib/rate-limit"; // Input: The User Request + The Tool ID they selected const PlanRequestSchema = z.object({ @@ -34,6 +35,15 @@ const PlanResponseSchema = z.object({ export async function POST(req: Request) { try { + const ip = req.headers.get("x-forwarded-for") ?? "127.0.0.1"; + const { success } = rateLimit(ip, 5, 60000); // 5 reqs per min (heavier operation) + if (!success) { + return NextResponse.json( + { error: "Too many requests. Please try again later." }, + { status: 429 } + ); + } + const body = await req.json(); const { userRequest, selectedToolId } = PlanRequestSchema.parse(body); diff --git a/src/app/api/architect/select/route.ts b/src/app/api/architect/select/route.ts index 4e8bd7d..c658754 100644 --- a/src/app/api/architect/select/route.ts +++ b/src/app/api/architect/select/route.ts @@ -2,11 +2,21 @@ import { NextResponse } from "next/server"; import { callOpenRouter, parseJSONResponse } from "@/lib/api/openrouter"; import { SelectionRequestSchema } from "@/types/selection"; import { filterCandidates } from "@/lib/selection/hard-filter"; +import { rateLimit } from "@/lib/rate-limit"; const InputSchema = SelectionRequestSchema; export async function POST(req: Request) { try { + const ip = req.headers.get("x-forwarded-for") ?? "127.0.0.1"; + const { success } = rateLimit(ip, 5, 60000); // 5 reqs per min (heavy logic) + if (!success) { + return NextResponse.json( + { error: "Too many requests. Please try again later." }, + { status: 429 } + ); + } + // A. Parse Body const body = await req.json(); const parsedBody = InputSchema.parse(body); diff --git a/src/lib/rate-limit.ts b/src/lib/rate-limit.ts new file mode 100644 index 0000000..39dac5d --- /dev/null +++ b/src/lib/rate-limit.ts @@ -0,0 +1,39 @@ + +const trackers = new Map(); + +/** + * Basic in-memory rate limiter for Next.js API routes. + * Note: In a serverless environment, this state is per-lambda instance. + * For distributed rate limiting, use Redis or a similar external store. + */ +export function rateLimit(ip: string, limit: number = 10, windowMs: number = 60000) { + const now = Date.now(); + + // Prevent memory leaks / DoS on memory + if (trackers.size > 10000) { + const cleanupTime = now; + for (const [key, data] of trackers.entries()) { + if (data.expiresAt < cleanupTime) { + trackers.delete(key); + } + } + // If still too big, hard reset to fail open/safe + if (trackers.size > 10000) { + trackers.clear(); + } + } + + const record = trackers.get(ip); + + if (!record || now > record.expiresAt) { + trackers.set(ip, { count: 1, expiresAt: now + windowMs }); + return { success: true }; + } + + if (record.count >= limit) { + return { success: false }; + } + + record.count++; + return { success: true }; +}