diff --git a/README.md b/README.md
index f976dfa..bc2665d 100644
--- a/README.md
+++ b/README.md
@@ -1,8 +1,10 @@
-# Claude Code CLI Provider
+# Claude Max API Proxy
**Use your Claude Max subscription ($200/month) with any OpenAI-compatible client — no separate API costs!**
-This provider wraps the Claude Code CLI as a subprocess and exposes an OpenAI-compatible HTTP API, allowing tools like Clawdbot, Continue.dev, or any OpenAI-compatible client to use your Claude Max subscription instead of paying per-API-call.
+This proxy wraps the Claude Code CLI as a subprocess and exposes an OpenAI-compatible HTTP API, allowing tools like OpenClaw, Continue.dev, or any OpenAI-compatible client to use your Claude Max subscription instead of paying per-API-call.
+
+> **Fork note:** This is an actively maintained fork of [atalovesyou/claude-max-api-proxy](https://github.com/atalovesyou/claude-max-api-proxy) with additional features and bug fixes. See [What's Different](#whats-different-in-this-fork) below.
## Why This Exists
@@ -10,20 +12,20 @@ This provider wraps the Claude Code CLI as a subprocess and exposes an OpenAI-co
|----------|------|------------|
| Claude API | ~$15/M input, ~$75/M output tokens | Pay per use |
| Claude Max | $200/month flat | OAuth blocked for third-party API use |
-| **This Provider** | $0 extra (uses Max subscription) | Routes through CLI |
+| **This Proxy** | $0 extra (uses Max subscription) | Routes through CLI |
-Anthropic blocks OAuth tokens from being used directly with third-party API clients. However, the Claude Code CLI *can* use OAuth tokens. This provider bridges that gap by wrapping the CLI and exposing a standard API.
+Anthropic blocks OAuth tokens from being used directly with third-party API clients. However, the Claude Code CLI *can* use OAuth tokens. This proxy bridges that gap by wrapping the CLI and exposing a standard API.
## How It Works
```
-Your App (Clawdbot, etc.)
+Your App (OpenClaw, Continue.dev, etc.)
↓
HTTP Request (OpenAI format)
↓
- Claude Code CLI Provider (this project)
+ Claude Max API Proxy (this project)
↓
- Claude Code CLI (subprocess)
+ Claude Code CLI (subprocess, prompt via stdin)
↓
OAuth Token (from Max subscription)
↓
@@ -35,12 +37,14 @@ Your App (Clawdbot, etc.)
## Features
- **OpenAI-compatible API** — Works with any client that supports OpenAI's API format
-- **Streaming support** — Real-time token streaming via Server-Sent Events
-- **Multiple models** — Claude Opus, Sonnet, and Haiku
+- **Streaming support** — Real-time token streaming via Server-Sent Events (with usage data)
+- **Multiple models** — Claude Opus 4.6, Sonnet 4.5, Opus 4, Sonnet 4, and Haiku 4
+- **System prompt support** — Passes system/developer messages via `--append-system-prompt`
- **Session management** — Maintains conversation context
- **Auto-start service** — Optional LaunchAgent for macOS
- **Zero configuration** — Uses existing Claude CLI authentication
-- **Secure by design** — Uses spawn() to prevent shell injection
+- **Secure by design** — Uses spawn() + stdin to prevent shell injection and E2BIG errors
+- **Debug logging** — Optional `DEBUG_SUBPROCESS=true` for troubleshooting
## Prerequisites
@@ -55,14 +59,17 @@ Your App (Clawdbot, etc.)
```bash
# Clone the repository
-git clone https://github.com/anthropics/claude-code-cli-provider.git
-cd claude-code-cli-provider
+git clone https://github.com/smartchainark/claude-max-api-proxy.git
+cd claude-max-api-proxy
# Install dependencies
npm install
# Build
npm run build
+
+# (Optional) Install as global command
+npm link
```
## Usage
@@ -71,6 +78,8 @@ npm run build
```bash
node dist/server/standalone.js
+# or if installed globally:
+claude-max-api
```
The server runs at `http://localhost:3456` by default.
@@ -88,7 +97,7 @@ curl http://localhost:3456/v1/models
curl -X POST http://localhost:3456/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
- "model": "claude-opus-4",
+ "model": "claude-opus-4-6",
"messages": [{"role": "user", "content": "Hello!"}]
}'
@@ -96,10 +105,22 @@ curl -X POST http://localhost:3456/v1/chat/completions \
curl -N -X POST http://localhost:3456/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
- "model": "claude-opus-4",
+ "model": "claude-opus-4-6",
"messages": [{"role": "user", "content": "Hello!"}],
"stream": true
}'
+
+# With system prompt
+curl -N -X POST http://localhost:3456/v1/chat/completions \
+ -H "Content-Type: application/json" \
+ -d '{
+ "model": "claude-sonnet-4-5",
+ "messages": [
+ {"role": "system", "content": "You are a helpful assistant."},
+ {"role": "user", "content": "Hello!"}
+ ],
+ "stream": true
+ }'
```
## API Endpoints
@@ -112,11 +133,23 @@ curl -N -X POST http://localhost:3456/v1/chat/completions \
## Available Models
-| Model ID | Maps To |
-|----------|---------|
-| `claude-opus-4` | Claude Opus 4.5 |
-| `claude-sonnet-4` | Claude Sonnet 4 |
-| `claude-haiku-4` | Claude Haiku 4 |
+| Model ID | Maps To | Notes |
+|----------|---------|-------|
+| `claude-opus-4-6` | Claude Opus 4.6 | Latest, most capable |
+| `claude-opus-4` | Claude Opus 4 | |
+| `claude-sonnet-4-5` | Claude Sonnet 4.5 | Fast and capable |
+| `claude-sonnet-4` | Claude Sonnet 4 | |
+| `claude-haiku-4` | Claude Haiku 4 | Fastest |
+
+### Model aliases
+
+The proxy also accepts prefixed model names for compatibility with different clients:
+
+| Prefix | Example | Maps To |
+|--------|---------|---------|
+| `claude-max/` | `claude-max/claude-opus-4-6` | `opus` |
+| `claude-code-cli/` | `claude-code-cli/claude-sonnet-4-5` | `sonnet` |
+| (none) | `opus-max`, `sonnet-max` | `opus`, `sonnet` |
## Configuration with Popular Tools
@@ -130,16 +163,33 @@ clawdbot models status
If you see `anthropic:claude-cli=OAuth`, you're already using your Max subscription.
-### Continue.dev
+### OpenClaw
-Add to your Continue config:
+```json
+{
+ "providers": {
+ "claude-max": {
+ "baseUrl": "http://127.0.0.1:3456/v1",
+ "apiKey": "not-needed",
+ "api": "openai-completions",
+ "models": [
+ { "id": "claude-opus-4-6", "name": "Claude Opus 4.6 (Max)" },
+ { "id": "claude-sonnet-4-5", "name": "Claude Sonnet 4.5 (Max)" },
+ { "id": "claude-haiku-4", "name": "Claude Haiku 4 (Max)" }
+ ]
+ }
+ }
+}
+```
+
+### Continue.dev
```json
{
"models": [{
- "title": "Claude (Max)",
+ "title": "Claude Opus 4.6 (Max)",
"provider": "openai",
- "model": "claude-opus-4",
+ "model": "claude-opus-4-6",
"apiBase": "http://localhost:3456/v1",
"apiKey": "not-needed"
}]
@@ -157,14 +207,53 @@ client = OpenAI(
)
response = client.chat.completions.create(
- model="claude-opus-4",
+ model="claude-opus-4-6",
messages=[{"role": "user", "content": "Hello!"}]
)
```
## Auto-Start on macOS
-Create a LaunchAgent to start the provider automatically on login. See `docs/macos-setup.md` for detailed instructions.
+Create a LaunchAgent to start the proxy automatically on login:
+
+```xml
+
+
+
+
+
+ Label
+ com.claude-max-api-proxy
+ ProgramArguments
+
+ /path/to/node
+ /path/to/claude-max-api-proxy/dist/server/standalone.js
+
+ RunAtLoad
+
+ KeepAlive
+
+
+
+```
+
+## What's Different in This Fork
+
+Compared to the [original repo](https://github.com/atalovesyou/claude-max-api-proxy):
+
+| Feature | Original | This Fork |
+|---------|----------|-----------|
+| Streaming usage data | Missing | Included in final SSE chunk |
+| System prompt | Embedded in user prompt | Via `--append-system-prompt` flag |
+| Prompt delivery | CLI argument (E2BIG risk) | stdin (no size limit) |
+| Model support | Opus 4, Sonnet 4, Haiku 4 | + Opus 4.6, Sonnet 4.5 |
+| Model prefixes | `claude-code-cli/` only | + `claude-max/`, aliases |
+| Undefined model crash | Crashes on rate limit | Graceful fallback |
+| Debug logging | Always on (noisy) | Opt-in via `DEBUG_SUBPROCESS` |
+| Permissions | Requires confirmation | `--dangerously-skip-permissions` for service mode |
+| Array content | Not supported | Handles string and array content parts |
+
+Community PRs incorporated: [#7](https://github.com/atalovesyou/claude-max-api-proxy/pull/7), [#10](https://github.com/atalovesyou/claude-max-api-proxy/pull/10), [#12](https://github.com/atalovesyou/claude-max-api-proxy/pull/12).
## Architecture
@@ -172,12 +261,12 @@ Create a LaunchAgent to start the provider automatically on login. See `docs/mac
src/
├── types/
│ ├── claude-cli.ts # Claude CLI JSON output types
-│ └── openai.ts # OpenAI API types
+│ └── openai.ts # OpenAI API types (with multimodal content parts)
├── adapter/
│ ├── openai-to-cli.ts # Convert OpenAI requests → CLI format
│ └── cli-to-openai.ts # Convert CLI responses → OpenAI format
├── subprocess/
-│ └── manager.ts # Claude CLI subprocess management
+│ └── manager.ts # Claude CLI subprocess management (stdin-based)
├── session/
│ └── manager.ts # Session ID mapping
├── server/
@@ -187,23 +276,6 @@ src/
└── index.ts # Package exports
```
-## Security
-
-- Uses Node.js `spawn()` instead of shell execution to prevent injection attacks
-- No API keys stored or transmitted by this provider
-- All authentication handled by Claude CLI's secure keychain storage
-- Prompts passed as CLI arguments, not through shell interpretation
-
-## Cost Savings Example
-
-| Usage | API Cost | With This Provider |
-|-------|----------|-------------------|
-| 1M input tokens/month | ~$15 | $0 (included in Max) |
-| 500K output tokens/month | ~$37.50 | $0 (included in Max) |
-| **Monthly Total** | **~$52.50** | **$0 extra** |
-
-If you're already paying for Claude Max, this provider lets you use that subscription for API-style access at no additional cost.
-
## Troubleshooting
### "Claude CLI not found"
@@ -228,6 +300,37 @@ Check that the Claude CLI is in your PATH:
which claude
```
+### Enable debug logging
+
+To troubleshoot subprocess issues, enable detailed debug logging:
+```bash
+DEBUG_SUBPROCESS=true node dist/server/standalone.js
+```
+
+This will log:
+- Subprocess spawn events and PIDs
+- Stdout/stderr data flow
+- System prompt content
+- Assistant messages and results
+- Process exit codes
+
+## Cost Savings Example
+
+| Usage | API Cost | With This Provider |
+|-------|----------|-------------------|
+| 1M input tokens/month | ~$15 | $0 (included in Max) |
+| 500K output tokens/month | ~$37.50 | $0 (included in Max) |
+| **Monthly Total** | **~$52.50** | **$0 extra** |
+
+If you're already paying for Claude Max, this provider lets you use that subscription for API-style access at no additional cost.
+
+## Security
+
+- Uses Node.js `spawn()` instead of shell execution to prevent injection attacks
+- Prompts passed via stdin, not through shell interpretation or CLI arguments
+- No API keys stored or transmitted by this proxy
+- All authentication handled by Claude CLI's secure keychain storage
+
## Contributing
Contributions welcome! Please submit PRs with tests.
@@ -238,5 +341,7 @@ MIT
## Acknowledgments
+- Originally created by [atalovesyou](https://github.com/atalovesyou/claude-max-api-proxy)
- Built for use with [Clawdbot](https://clawd.bot)
+- Community contributors: [@wende](https://github.com/wende), [@kevinfealey](https://github.com/kevinfealey), [@jamshehan](https://github.com/jamshehan)
- Powered by [Claude Code CLI](https://github.com/anthropics/claude-code)
diff --git a/package-lock.json b/package-lock.json
index cfdfc81..f6561bd 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -1,17 +1,20 @@
{
- "name": "claude-code-cli-provider",
+ "name": "claude-max-api-proxy",
"version": "1.0.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
- "name": "claude-code-cli-provider",
+ "name": "claude-max-api-proxy",
"version": "1.0.0",
"license": "MIT",
"dependencies": {
"express": "^4.21.2",
"uuid": "^11.0.5"
},
+ "bin": {
+ "claude-max-api": "dist/server/standalone.js"
+ },
"devDependencies": {
"@types/express": "^5.0.0",
"@types/node": "^22.10.7",
diff --git a/src/adapter/cli-to-openai.ts b/src/adapter/cli-to-openai.ts
index 1e43eab..bb5f638 100644
--- a/src/adapter/cli-to-openai.ts
+++ b/src/adapter/cli-to-openai.ts
@@ -102,7 +102,8 @@ export function cliResultToOpenai(
* Normalize Claude model names to a consistent format
* e.g., "claude-sonnet-4-5-20250929" -> "claude-sonnet-4"
*/
-function normalizeModelName(model: string): string {
+function normalizeModelName(model: string | undefined): string {
+ if (!model) return "claude-sonnet-4";
if (model.includes("opus")) return "claude-opus-4";
if (model.includes("sonnet")) return "claude-sonnet-4";
if (model.includes("haiku")) return "claude-haiku-4";
diff --git a/src/adapter/openai-to-cli.ts b/src/adapter/openai-to-cli.ts
index c8ecaa1..34c5a88 100644
--- a/src/adapter/openai-to-cli.ts
+++ b/src/adapter/openai-to-cli.ts
@@ -2,7 +2,10 @@
* Converts OpenAI chat request format to Claude CLI input
*/
-import type { OpenAIChatRequest } from "../types/openai.js";
+import type {
+ OpenAIChatRequest,
+ OpenAIContentPart,
+} from "../types/openai.js";
export type ClaudeModel = "opus" | "sonnet" | "haiku";
@@ -10,20 +13,34 @@ export interface CliInput {
prompt: string;
model: ClaudeModel;
sessionId?: string;
+ systemPrompt?: string;
+ tools?: string[];
}
const MODEL_MAP: Record = {
// Direct model names
"claude-opus-4": "opus",
+ "claude-opus-4-6": "opus",
"claude-sonnet-4": "sonnet",
+ "claude-sonnet-4-5": "sonnet",
"claude-haiku-4": "haiku",
// With provider prefix
"claude-code-cli/claude-opus-4": "opus",
+ "claude-code-cli/claude-opus-4-6": "opus",
"claude-code-cli/claude-sonnet-4": "sonnet",
+ "claude-code-cli/claude-sonnet-4-5": "sonnet",
"claude-code-cli/claude-haiku-4": "haiku",
+ // Claude-max prefix (from OpenClaw config)
+ "claude-max/claude-opus-4": "opus",
+ "claude-max/claude-opus-4-6": "opus",
+ "claude-max/claude-sonnet-4": "sonnet",
+ "claude-max/claude-sonnet-4-5": "sonnet",
+ "claude-max/claude-haiku-4": "haiku",
// Aliases
"opus": "opus",
+ "opus-max": "opus",
"sonnet": "sonnet",
+ "sonnet-max": "sonnet",
"haiku": "haiku",
};
@@ -47,43 +64,101 @@ export function extractModel(model: string): ClaudeModel {
}
/**
- * Convert OpenAI messages array to a single prompt string for Claude CLI
+ * Extract text from message content.
*
- * Claude Code CLI in --print mode expects a single prompt, not a conversation.
- * We format the messages into a readable format that preserves context.
+ * OpenAI API allows content to be either a plain string or an array of
+ * content parts (e.g. [{type: "text", text: "..."}]). This function
+ * normalises both forms into a single string.
*/
-export function messagesToPrompt(messages: OpenAIChatRequest["messages"]): string {
- const parts: string[] = [];
+export function extractContent(
+ content: string | OpenAIContentPart[],
+): string {
+ if (typeof content === "string") return content;
+
+ if (Array.isArray(content)) {
+ return content
+ .map((part) => {
+ if (typeof part === "string") return part;
+ if (part && typeof part === "object") return part.text ?? "";
+ return "";
+ })
+ .filter(Boolean)
+ .join("\n");
+ }
+
+ return String(content ?? "");
+}
+
+/**
+ * Extract system messages and conversation from OpenAI messages array
+ *
+ * System messages should be passed via --append-system-prompt flag,
+ * not embedded in the user prompt (more reliable for OpenClaw integration).
+ */
+export function extractMessagesContent(messages: OpenAIChatRequest["messages"]): {
+ systemPrompt: string | undefined;
+ conversationPrompt: string;
+} {
+ const systemParts: string[] = [];
+ const conversationParts: string[] = [];
for (const msg of messages) {
+ const text = extractContent(msg.content);
+
switch (msg.role) {
case "system":
- // System messages become context instructions
- parts.push(`\n${msg.content}\n\n`);
+ case "developer":
+ // System/developer messages go to --append-system-prompt flag
+ // "developer" is OpenAI's newer role for system-level instructions
+ systemParts.push(text);
break;
case "user":
// User messages are the main prompt
- parts.push(msg.content);
+ conversationParts.push(text);
break;
case "assistant":
// Previous assistant responses for context
- parts.push(`\n${msg.content}\n\n`);
+ conversationParts.push(`\n${text}\n\n`);
break;
}
}
- return parts.join("\n").trim();
+ return {
+ systemPrompt: systemParts.length > 0 ? systemParts.join("\n\n").trim() : undefined,
+ conversationPrompt: conversationParts.join("\n").trim(),
+ };
+}
+
+/**
+ * Convert OpenAI messages array to a single prompt string for Claude CLI
+ *
+ * @deprecated Use extractMessagesContent instead for better system prompt handling
+ */
+export function messagesToPrompt(messages: OpenAIChatRequest["messages"]): string {
+ const { systemPrompt, conversationPrompt } = extractMessagesContent(messages);
+
+ if (systemPrompt) {
+ return `\n${systemPrompt}\n\n\n${conversationPrompt}`;
+ }
+
+ return conversationPrompt;
}
/**
* Convert OpenAI chat request to CLI input format
*/
export function openaiToCli(request: OpenAIChatRequest): CliInput {
+ const { systemPrompt, conversationPrompt } = extractMessagesContent(request.messages);
+
return {
- prompt: messagesToPrompt(request.messages),
+ prompt: conversationPrompt,
model: extractModel(request.model),
sessionId: request.user, // Use OpenAI's user field for session mapping
+ systemPrompt,
+ // TODO: Extract tool names from request.tools and map to Claude Code tool names
+ // For now, let Claude Code use all its builtin tools
+ tools: undefined,
};
}
diff --git a/src/server/routes.ts b/src/server/routes.ts
index ffe2e5b..abfcb93 100644
--- a/src/server/routes.ts
+++ b/src/server/routes.ts
@@ -135,11 +135,19 @@ async function handleStreamingResponse(
lastModel = message.message.model;
});
- subprocess.on("result", (_result: ClaudeCliResult) => {
+ subprocess.on("result", (result: ClaudeCliResult) => {
isComplete = true;
if (!res.writableEnded) {
- // Send final done chunk with finish_reason
- const doneChunk = createDoneChunk(requestId, lastModel);
+ // Send final chunk with finish_reason and usage data
+ const doneChunk = {
+ ...createDoneChunk(requestId, lastModel),
+ usage: {
+ prompt_tokens: result.usage?.input_tokens || 0,
+ completion_tokens: result.usage?.output_tokens || 0,
+ total_tokens:
+ (result.usage?.input_tokens || 0) + (result.usage?.output_tokens || 0),
+ },
+ };
res.write(`data: ${JSON.stringify(doneChunk)}\n\n`);
res.write("data: [DONE]\n\n");
res.end();
@@ -179,6 +187,8 @@ async function handleStreamingResponse(
subprocess.start(cliInput.prompt, {
model: cliInput.model,
sessionId: cliInput.sessionId,
+ systemPrompt: cliInput.systemPrompt,
+ tools: cliInput.tools,
}).catch((err) => {
console.error("[Streaming] Subprocess start error:", err);
reject(err);
@@ -234,6 +244,8 @@ async function handleNonStreamingResponse(
.start(cliInput.prompt, {
model: cliInput.model,
sessionId: cliInput.sessionId,
+ systemPrompt: cliInput.systemPrompt,
+ tools: cliInput.tools,
})
.catch((error) => {
res.status(500).json({
@@ -257,12 +269,24 @@ export function handleModels(_req: Request, res: Response): void {
res.json({
object: "list",
data: [
+ {
+ id: "claude-opus-4-6",
+ object: "model",
+ owned_by: "anthropic",
+ created: Math.floor(Date.now() / 1000),
+ },
{
id: "claude-opus-4",
object: "model",
owned_by: "anthropic",
created: Math.floor(Date.now() / 1000),
},
+ {
+ id: "claude-sonnet-4-5",
+ object: "model",
+ owned_by: "anthropic",
+ created: Math.floor(Date.now() / 1000),
+ },
{
id: "claude-sonnet-4",
object: "model",
diff --git a/src/subprocess/manager.ts b/src/subprocess/manager.ts
index 6551a81..6494d27 100644
--- a/src/subprocess/manager.ts
+++ b/src/subprocess/manager.ts
@@ -21,6 +21,8 @@ import type { ClaudeModel } from "../adapter/openai-to-cli.js";
export interface SubprocessOptions {
model: ClaudeModel;
sessionId?: string;
+ systemPrompt?: string;
+ tools?: string[];
cwd?: string;
timeout?: number;
}
@@ -36,17 +38,29 @@ export interface SubprocessEvents {
const DEFAULT_TIMEOUT = 300000; // 5 minutes
+// Debug logging controlled by environment variable
+const DEBUG = process.env.DEBUG_SUBPROCESS === "true";
+
export class ClaudeSubprocess extends EventEmitter {
private process: ChildProcess | null = null;
private buffer: string = "";
private timeoutId: NodeJS.Timeout | null = null;
private isKilled: boolean = false;
+ /**
+ * Conditional debug logging
+ */
+ private debug(...args: any[]): void {
+ if (DEBUG) {
+ console.error(...args);
+ }
+ }
+
/**
* Start the Claude CLI subprocess with the given prompt
*/
async start(prompt: string, options: SubprocessOptions): Promise {
- const args = this.buildArgs(prompt, options);
+ const args = this.buildArgs(options);
const timeout = options.timeout || DEFAULT_TIMEOUT;
return new Promise((resolve, reject) => {
@@ -81,15 +95,19 @@ export class ClaudeSubprocess extends EventEmitter {
}
});
- // Close stdin since we pass prompt as argument
- this.process.stdin?.end();
+ // Write prompt to stdin instead of passing as CLI argument
+ // This avoids E2BIG errors when prompts exceed the OS argument size limit
+ if (this.process.stdin) {
+ this.process.stdin.write(prompt);
+ this.process.stdin.end();
+ }
- console.error(`[Subprocess] Process spawned with PID: ${this.process.pid}`);
+ this.debug(`[Subprocess] Process spawned with PID: ${this.process.pid}`);
// Parse JSON stream from stdout
this.process.stdout?.on("data", (chunk: Buffer) => {
const data = chunk.toString();
- console.error(`[Subprocess] Received ${data.length} bytes of stdout`);
+ this.debug(`[Subprocess] Received ${data.length} bytes of stdout`);
this.buffer += data;
this.processBuffer();
});
@@ -100,13 +118,13 @@ export class ClaudeSubprocess extends EventEmitter {
if (errorText) {
// Don't emit as error unless it's actually an error
// Claude CLI may write debug info to stderr
- console.error("[Subprocess stderr]:", errorText.slice(0, 200));
+ this.debug("[Subprocess stderr]:", errorText.slice(0, 200));
}
});
// Handle process close
this.process.on("close", (code) => {
- console.error(`[Subprocess] Process closed with code: ${code}`);
+ this.debug(`[Subprocess] Process closed with code: ${code}`);
this.clearTimeout();
// Process any remaining buffer
if (this.buffer.trim()) {
@@ -127,7 +145,7 @@ export class ClaudeSubprocess extends EventEmitter {
/**
* Build CLI arguments array
*/
- private buildArgs(prompt: string, options: SubprocessOptions): string[] {
+ private buildArgs(options: SubprocessOptions): string[] {
const args = [
"--print", // Non-interactive mode
"--output-format",
@@ -137,13 +155,29 @@ export class ClaudeSubprocess extends EventEmitter {
"--model",
options.model, // Model alias (opus/sonnet/haiku)
"--no-session-persistence", // Don't save sessions
- prompt, // Pass prompt as argument (more reliable than stdin)
+ "--dangerously-skip-permissions", // Allow file operations (running as service)
];
+ // Add system prompt if provided (backstory/memories from OpenClaw)
+ if (options.systemPrompt) {
+ this.debug(`[Subprocess] System prompt: ${options.systemPrompt.length} chars`);
+ this.debug(`[Subprocess] System prompt content:\n${options.systemPrompt}`);
+ args.push("--append-system-prompt", options.systemPrompt);
+ } else {
+ this.debug("[Subprocess] NO system prompt provided");
+ }
+
+ // Add tool restrictions if provided
+ if (options.tools && options.tools.length > 0) {
+ args.push("--tools", options.tools.join(","));
+ }
+
if (options.sessionId) {
args.push("--session-id", options.sessionId);
}
+ // Prompt is passed via stdin to avoid E2BIG errors with large prompts
+
return args;
}
@@ -166,8 +200,10 @@ export class ClaudeSubprocess extends EventEmitter {
// Emit content delta for streaming
this.emit("content_delta", message as ClaudeCliStreamEvent);
} else if (isAssistantMessage(message)) {
+ this.debug(`[Response] Assistant message:`, JSON.stringify(message.message.content));
this.emit("assistant", message);
} else if (isResultMessage(message)) {
+ this.debug(`[Response] Result:`, message.result);
this.emit("result", message);
}
} catch {
diff --git a/src/types/openai.ts b/src/types/openai.ts
index c116658..5d435f4 100644
--- a/src/types/openai.ts
+++ b/src/types/openai.ts
@@ -3,9 +3,19 @@
* Used for Clawdbot integration
*/
+/**
+ * A single content part in a multi-part message.
+ * See: https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages
+ */
+export interface OpenAIContentPart {
+ type: "text" | "image_url";
+ text?: string;
+ image_url?: { url: string; detail?: string };
+}
+
export interface OpenAIChatMessage {
- role: "system" | "user" | "assistant";
- content: string;
+ role: "system" | "developer" | "user" | "assistant";
+ content: string | OpenAIContentPart[];
}
export interface OpenAIChatRequest {