Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 28 additions & 1 deletion src/adapter/cli-to-openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,33 @@
import type { ClaudeCliAssistant, ClaudeCliResult } from "../types/claude-cli.js";
import type { OpenAIChatResponse, OpenAIChatChunk } from "../types/openai.js";

/**
* Extract JSON content from model response.
* Claude often outputs prose before/after JSON in code fences.
* This extracts the last JSON code fence block, or falls back to
* finding a raw JSON object/array in the text.
*/
function stripCodeFences(text: string): string {
// Find all code fence blocks and take the last one (most likely the JSON)
const fenceMatches = [...text.matchAll(/```(?:\w*)\n([\s\S]*?)\n```/g)];
if (fenceMatches.length > 0) {
return fenceMatches[fenceMatches.length - 1][1];
}

// Try to extract a raw JSON object or array
const jsonMatch = text.match(/(\{[\s\S]*\}|\[[\s\S]*\])/);
if (jsonMatch) {
try {
JSON.parse(jsonMatch[1]);
return jsonMatch[1];
} catch {
// Not valid JSON, return original
}
}

return text;
}

/**
* Extract text content from Claude CLI assistant message
*/
Expand Down Expand Up @@ -84,7 +111,7 @@ export function cliResultToOpenai(
index: 0,
message: {
role: "assistant",
content: result.result,
content: stripCodeFences(result.result),
},
finish_reason: "stop",
},
Expand Down
183 changes: 164 additions & 19 deletions src/adapter/openai-to-cli.ts
Original file line number Diff line number Diff line change
@@ -1,13 +1,52 @@
/**
* Converts OpenAI chat request format to Claude CLI input
*
* Supports two modes:
* - Text-only: prompt passed as CLI argument (legacy)
* - Stream-JSON: NDJSON piped to stdin with full multimodal support (images)
*/

import type { OpenAIChatRequest } from "../types/openai.js";
import type { OpenAIChatRequest, OpenAIChatContentPart } from "../types/openai.js";

export type ClaudeModel = "opus" | "sonnet" | "haiku";

/**
* Claude CLI stream-json content block types
*/
interface CliTextContent {
type: "text";
text: string;
}

interface CliImageContent {
type: "image";
source: {
type: "base64";
media_type: string;
data: string;
};
}

type CliContentBlock = CliTextContent | CliImageContent;

/**
* NDJSON message format for Claude CLI --input-format stream-json
*/
export interface CliStreamMessage {
type: "user";
message: {
role: "user";
content: CliContentBlock[];
};
}

export interface CliInput {
/** Single prompt string (legacy text-only mode) */
prompt: string;
/** NDJSON lines for stdin piping (stream-json mode with image support) */
stdinMessages: string[];
/** Whether the request contains images and needs stream-json mode */
hasImages: boolean;
model: ClaudeModel;
sessionId?: string;
}
Expand All @@ -31,45 +70,146 @@ const MODEL_MAP: Record<string, ClaudeModel> = {
* Extract Claude model alias from request model string
*/
export function extractModel(model: string): ClaudeModel {
// Try direct lookup
if (MODEL_MAP[model]) {
return MODEL_MAP[model];
}

// Try stripping provider prefix
const stripped = model.replace(/^claude-code-cli\//, "");
if (MODEL_MAP[stripped]) {
return MODEL_MAP[stripped];
}

// Default to opus (Claude Max subscription)
return "opus";
}

/**
* Convert OpenAI messages array to a single prompt string for Claude CLI
* Check if any message in the request contains images
*/
function requestHasImages(messages: OpenAIChatRequest["messages"]): boolean {
return messages.some((msg) => {
if (typeof msg.content === "string") return false;
return msg.content.some((part) => part.type === "image_url");
});
}

/**
* Extract text from content (string or array of content parts).
*/
function extractText(content: string | OpenAIChatContentPart[]): string {
if (typeof content === "string") {
return content;
}
return content
.filter((part) => part.type === "text" && part.text)
.map((part) => part.text!)
.join("\n");
}

/**
* Convert an OpenAI image_url to a Claude CLI base64 image block.
* Handles data URIs (data:image/png;base64,...) and passes through the data.
*/
function convertImagePart(part: OpenAIChatContentPart): CliImageContent | null {
if (part.type !== "image_url" || !part.image_url) return null;

const url = part.image_url.url;

// Parse data URI: data:image/png;base64,iVBOR...
const match = url.match(/^data:(image\/[a-z+]+);base64,(.+)$/i);
if (!match) {
// Non-data-URI images (http URLs) are not supported by CLI
console.error("[openai-to-cli] Skipping non-data-URI image:", url.slice(0, 60));
return null;
}

return {
type: "image",
source: {
type: "base64",
media_type: match[1],
data: match[2],
},
};
}

/**
* Convert OpenAI content parts to Claude CLI content blocks
*/
function convertContentParts(content: string | OpenAIChatContentPart[]): CliContentBlock[] {
if (typeof content === "string") {
return [{ type: "text", text: content }];
}

const blocks: CliContentBlock[] = [];
for (const part of content) {
if (part.type === "text" && part.text) {
blocks.push({ type: "text", text: part.text });
} else if (part.type === "image_url") {
const img = convertImagePart(part);
if (img) blocks.push(img);
}
}
return blocks;
}

/**
* Convert OpenAI messages to Claude CLI stream-json NDJSON lines.
*
* Claude Code CLI in --print mode expects a single prompt, not a conversation.
* We format the messages into a readable format that preserves context.
* Claude CLI stream-json only accepts "user" role messages.
* System and assistant messages are inlined as tagged text blocks
* within the user message content.
*/
function messagesToStreamJson(messages: OpenAIChatRequest["messages"]): string[] {
// Collect all content blocks into a single user message
// (Claude CLI stream-json expects user-role messages only)
const allBlocks: CliContentBlock[] = [];

for (const msg of messages) {
switch (msg.role) {
case "system": {
const text = extractText(msg.content);
allBlocks.push({ type: "text", text: `<system>\n${text}\n</system>` });
break;
}
case "assistant": {
const text = extractText(msg.content);
allBlocks.push({ type: "text", text: `<previous_response>\n${text}\n</previous_response>` });
break;
}
case "user": {
const blocks = convertContentParts(msg.content);
allBlocks.push(...blocks);
break;
}
}
}

const stdinMsg: CliStreamMessage = {
type: "user",
message: {
role: "user",
content: allBlocks,
},
};

return [JSON.stringify(stdinMsg)];
}

/**
* Convert OpenAI messages to a single prompt string (legacy text-only mode)
*/
export function messagesToPrompt(messages: OpenAIChatRequest["messages"]): string {
const parts: string[] = [];

for (const msg of messages) {
const text = extractText(msg.content);
switch (msg.role) {
case "system":
// System messages become context instructions
parts.push(`<system>\n${msg.content}\n</system>\n`);
parts.push(`<system>\n${text}\n</system>\n`);
break;

case "user":
// User messages are the main prompt
parts.push(msg.content);
parts.push(text);
break;

case "assistant":
// Previous assistant responses for context
parts.push(`<previous_response>\n${msg.content}\n</previous_response>\n`);
parts.push(`<previous_response>\n${text}\n</previous_response>\n`);
break;
}
}
Expand All @@ -78,12 +218,17 @@ export function messagesToPrompt(messages: OpenAIChatRequest["messages"]): strin
}

/**
* Convert OpenAI chat request to CLI input format
* Convert OpenAI chat request to CLI input format.
* Automatically chooses stream-json mode when images are present.
*/
export function openaiToCli(request: OpenAIChatRequest): CliInput {
const hasImages = requestHasImages(request.messages);

return {
prompt: messagesToPrompt(request.messages),
stdinMessages: messagesToStreamJson(request.messages),
hasImages,
model: extractModel(request.model),
sessionId: request.user, // Use OpenAI's user field for session mapping
sessionId: request.user,
};
}
2 changes: 1 addition & 1 deletion src/server/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ function createApp(): Express {
const app = express();

// Middleware
app.use(express.json({ limit: "10mb" }));
app.use(express.json({ limit: "50mb" })); // Large limit for base64 images

// Request logging (debug mode)
app.use((req: Request, _res: Response, next: NextFunction) => {
Expand Down
2 changes: 2 additions & 0 deletions src/server/routes.ts
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,7 @@ async function handleStreamingResponse(
subprocess.start(cliInput.prompt, {
model: cliInput.model,
sessionId: cliInput.sessionId,
stdinMessages: cliInput.hasImages ? cliInput.stdinMessages : undefined,
}).catch((err) => {
console.error("[Streaming] Subprocess start error:", err);
reject(err);
Expand Down Expand Up @@ -234,6 +235,7 @@ async function handleNonStreamingResponse(
.start(cliInput.prompt, {
model: cliInput.model,
sessionId: cliInput.sessionId,
stdinMessages: cliInput.hasImages ? cliInput.stdinMessages : undefined,
})
.catch((error) => {
res.status(500).json({
Expand Down
Loading