Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,20 @@ Check that the Claude CLI is in your PATH:
which claude
```

### Enable debug logging

To troubleshoot subprocess issues, enable detailed debug logging:
```bash
DEBUG_SUBPROCESS=true node dist/server/standalone.js
```

This will log:
- Subprocess spawn events and PIDs
- Stdout/stderr data flow
- System prompt content
- Assistant messages and results
- Process exit codes

## Contributing

Contributions welcome! Please submit PRs with tests.
Expand Down
7 changes: 5 additions & 2 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

87 changes: 75 additions & 12 deletions src/adapter/openai-to-cli.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,19 @@
* Converts OpenAI chat request format to Claude CLI input
*/

import type { OpenAIChatRequest } from "../types/openai.js";
import type {
OpenAIChatRequest,
OpenAIContentPart,
} from "../types/openai.js";

export type ClaudeModel = "opus" | "sonnet" | "haiku";

export interface CliInput {
prompt: string;
model: ClaudeModel;
sessionId?: string;
systemPrompt?: string;
tools?: string[];
}

const MODEL_MAP: Record<string, ClaudeModel> = {
Expand Down Expand Up @@ -47,43 +52,101 @@ export function extractModel(model: string): ClaudeModel {
}

/**
* Convert OpenAI messages array to a single prompt string for Claude CLI
* Extract text from message content.
*
* Claude Code CLI in --print mode expects a single prompt, not a conversation.
* We format the messages into a readable format that preserves context.
* OpenAI API allows content to be either a plain string or an array of
* content parts (e.g. [{type: "text", text: "..."}]). This function
* normalises both forms into a single string.
*/
export function messagesToPrompt(messages: OpenAIChatRequest["messages"]): string {
const parts: string[] = [];
export function extractContent(
content: string | OpenAIContentPart[],
): string {
if (typeof content === "string") return content;

if (Array.isArray(content)) {
return content
.map((part) => {
if (typeof part === "string") return part;
if (part && typeof part === "object") return part.text ?? "";
return "";
})
.filter(Boolean)
.join("\n");
}

return String(content ?? "");
}

/**
* Extract system messages and conversation from OpenAI messages array
*
* System messages should be passed via --append-system-prompt flag,
* not embedded in the user prompt (more reliable for OpenClaw integration).
*/
export function extractMessagesContent(messages: OpenAIChatRequest["messages"]): {
systemPrompt: string | undefined;
conversationPrompt: string;
} {
const systemParts: string[] = [];
const conversationParts: string[] = [];

for (const msg of messages) {
const text = extractContent(msg.content);

switch (msg.role) {
case "system":
// System messages become context instructions
parts.push(`<system>\n${msg.content}\n</system>\n`);
case "developer":
// System/developer messages go to --append-system-prompt flag
// "developer" is OpenAI's newer role for system-level instructions
systemParts.push(text);
break;

case "user":
// User messages are the main prompt
parts.push(msg.content);
conversationParts.push(text);
break;

case "assistant":
// Previous assistant responses for context
parts.push(`<previous_response>\n${msg.content}\n</previous_response>\n`);
conversationParts.push(`<previous_response>\n${text}\n</previous_response>\n`);
break;
}
}

return parts.join("\n").trim();
return {
systemPrompt: systemParts.length > 0 ? systemParts.join("\n\n").trim() : undefined,
conversationPrompt: conversationParts.join("\n").trim(),
};
}

/**
* Convert OpenAI messages array to a single prompt string for Claude CLI
*
* @deprecated Use extractMessagesContent instead for better system prompt handling
*/
export function messagesToPrompt(messages: OpenAIChatRequest["messages"]): string {
const { systemPrompt, conversationPrompt } = extractMessagesContent(messages);

if (systemPrompt) {
return `<system>\n${systemPrompt}\n</system>\n\n${conversationPrompt}`;
}

return conversationPrompt;
}

/**
* Convert OpenAI chat request to CLI input format
*/
export function openaiToCli(request: OpenAIChatRequest): CliInput {
const { systemPrompt, conversationPrompt } = extractMessagesContent(request.messages);

return {
prompt: messagesToPrompt(request.messages),
prompt: conversationPrompt,
model: extractModel(request.model),
sessionId: request.user, // Use OpenAI's user field for session mapping
systemPrompt,
// TODO: Extract tool names from request.tools and map to Claude Code tool names
// For now, let Claude Code use all its builtin tools
tools: undefined,
};
}
4 changes: 4 additions & 0 deletions src/server/routes.ts
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,8 @@ async function handleStreamingResponse(
subprocess.start(cliInput.prompt, {
model: cliInput.model,
sessionId: cliInput.sessionId,
systemPrompt: cliInput.systemPrompt,
tools: cliInput.tools,
}).catch((err) => {
console.error("[Streaming] Subprocess start error:", err);
reject(err);
Expand Down Expand Up @@ -234,6 +236,8 @@ async function handleNonStreamingResponse(
.start(cliInput.prompt, {
model: cliInput.model,
sessionId: cliInput.sessionId,
systemPrompt: cliInput.systemPrompt,
tools: cliInput.tools,
})
.catch((error) => {
res.status(500).json({
Expand Down
43 changes: 38 additions & 5 deletions src/subprocess/manager.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ import type { ClaudeModel } from "../adapter/openai-to-cli.js";
export interface SubprocessOptions {
model: ClaudeModel;
sessionId?: string;
systemPrompt?: string;
tools?: string[];
cwd?: string;
timeout?: number;
}
Expand All @@ -36,12 +38,24 @@ export interface SubprocessEvents {

const DEFAULT_TIMEOUT = 300000; // 5 minutes

// Debug logging controlled by environment variable
const DEBUG = process.env.DEBUG_SUBPROCESS === "true";

export class ClaudeSubprocess extends EventEmitter {
private process: ChildProcess | null = null;
private buffer: string = "";
private timeoutId: NodeJS.Timeout | null = null;
private isKilled: boolean = false;

/**
* Conditional debug logging
*/
private debug(...args: any[]): void {
if (DEBUG) {
console.error(...args);
}
}

/**
* Start the Claude CLI subprocess with the given prompt
*/
Expand Down Expand Up @@ -84,12 +98,12 @@ export class ClaudeSubprocess extends EventEmitter {
// Close stdin since we pass prompt as argument
this.process.stdin?.end();

console.error(`[Subprocess] Process spawned with PID: ${this.process.pid}`);
this.debug(`[Subprocess] Process spawned with PID: ${this.process.pid}`);

// Parse JSON stream from stdout
this.process.stdout?.on("data", (chunk: Buffer) => {
const data = chunk.toString();
console.error(`[Subprocess] Received ${data.length} bytes of stdout`);
this.debug(`[Subprocess] Received ${data.length} bytes of stdout`);
this.buffer += data;
this.processBuffer();
});
Expand All @@ -100,13 +114,13 @@ export class ClaudeSubprocess extends EventEmitter {
if (errorText) {
// Don't emit as error unless it's actually an error
// Claude CLI may write debug info to stderr
console.error("[Subprocess stderr]:", errorText.slice(0, 200));
this.debug("[Subprocess stderr]:", errorText.slice(0, 200));
}
});

// Handle process close
this.process.on("close", (code) => {
console.error(`[Subprocess] Process closed with code: ${code}`);
this.debug(`[Subprocess] Process closed with code: ${code}`);
this.clearTimeout();
// Process any remaining buffer
if (this.buffer.trim()) {
Expand Down Expand Up @@ -137,13 +151,30 @@ export class ClaudeSubprocess extends EventEmitter {
"--model",
options.model, // Model alias (opus/sonnet/haiku)
"--no-session-persistence", // Don't save sessions
prompt, // Pass prompt as argument (more reliable than stdin)
"--dangerously-skip-permissions", // Allow file operations (running as service)
];

// Add system prompt if provided (backstory/memories from OpenClaw)
if (options.systemPrompt) {
this.debug(`[Subprocess] System prompt: ${options.systemPrompt.length} chars`);
this.debug(`[Subprocess] System prompt content:\n${options.systemPrompt}`);
args.push("--append-system-prompt", options.systemPrompt);
} else {
this.debug("[Subprocess] NO system prompt provided");
}

// Add tool restrictions if provided
if (options.tools && options.tools.length > 0) {
args.push("--tools", options.tools.join(","));
}

if (options.sessionId) {
args.push("--session-id", options.sessionId);
}

// Prompt goes last
args.push(prompt);

return args;
}

Expand All @@ -166,8 +197,10 @@ export class ClaudeSubprocess extends EventEmitter {
// Emit content delta for streaming
this.emit("content_delta", message as ClaudeCliStreamEvent);
} else if (isAssistantMessage(message)) {
this.debug(`[Response] Assistant message:`, JSON.stringify(message.message.content));
this.emit("assistant", message);
} else if (isResultMessage(message)) {
this.debug(`[Response] Result:`, message.result);
this.emit("result", message);
}
} catch {
Expand Down
14 changes: 12 additions & 2 deletions src/types/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,19 @@
* Used for Clawdbot integration
*/

/**
* A single content part in a multi-part message.
* See: https://platform.openai.com/docs/api-reference/chat/create#chat-create-messages
*/
export interface OpenAIContentPart {
type: "text" | "image_url";
text?: string;
image_url?: { url: string; detail?: string };
}

export interface OpenAIChatMessage {
role: "system" | "user" | "assistant";
content: string;
role: "system" | "developer" | "user" | "assistant";
content: string | OpenAIContentPart[];
}

export interface OpenAIChatRequest {
Expand Down