Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
50 changes: 32 additions & 18 deletions .github/actions/setup-node-frontend/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -65,13 +65,25 @@ runs:
exit 1
fi

# Remove any existing node_modules in apps/frontend (npm workspaces may create partial one)
if [ -d "apps/frontend/node_modules" ] && [ ! -L "apps/frontend/node_modules" ]; then
echo "Removing partial node_modules created by npm workspaces..."
rm -rf "apps/frontend/node_modules"
# Remove any existing node_modules in apps/frontend
# This handles: partial directories from npm workspaces, AND broken symlinks
if [ -e "apps/frontend/node_modules" ] || [ -L "apps/frontend/node_modules" ]; then
# Check if it's a valid symlink pointing to root node_modules
if [ -L "apps/frontend/node_modules" ]; then
target=$(readlink apps/frontend/node_modules 2>/dev/null || echo "")
if [ "$target" = "../../node_modules" ] && [ -d "apps/frontend/node_modules" ]; then
echo "Correct symlink already exists: apps/frontend/node_modules -> ../../node_modules"
else
echo "Removing incorrect/broken symlink (was: $target)..."
rm -f "apps/frontend/node_modules"
fi
else
echo "Removing partial node_modules directory created by npm workspaces..."
rm -rf "apps/frontend/node_modules"
fi
fi

# Create link if it doesn't exist
# Create link if it doesn't exist or was removed
if [ ! -L "apps/frontend/node_modules" ]; then
if [ "$RUNNER_OS" == "Windows" ]; then
# Use directory junction on Windows (works without admin privileges)
Expand All @@ -91,19 +103,21 @@ runs:
exit 1
fi
fi
else
echo "apps/frontend/node_modules symlink already exists"
fi

# Verify the link works
if [ -L "apps/frontend/node_modules" ]; then
target=$(readlink apps/frontend/node_modules 2>/dev/null || echo "junction")
echo "Verified: apps/frontend/node_modules -> $target"
# Check that the target resolves correctly
if [ -d "apps/frontend/node_modules" ]; then
count=$(ls apps/frontend/node_modules 2>/dev/null | wc -l)
echo "Link resolves to directory with $count entries"
else
echo "::warning::Link exists but does not resolve to a valid directory"
fi
# Final verification - the link must exist and resolve correctly
# Note: On Windows, junctions don't show as symlinks (-L), so we check if the directory exists
# and can be listed. On Unix, we also verify it's a symlink.
if [ "$RUNNER_OS" != "Windows" ] && [ ! -L "apps/frontend/node_modules" ]; then
echo "::error::apps/frontend/node_modules symlink was not created"
exit 1
fi
# Verify the link resolves to a valid directory with content
if ! ls apps/frontend/node_modules/electron >/dev/null 2>&1; then
echo "::error::apps/frontend/node_modules does not resolve correctly (electron not found)"
ls -la apps/frontend/ || true
ls apps/frontend/node_modules 2>&1 | head -5 || true
exit 1
fi
count=$(ls apps/frontend/node_modules 2>/dev/null | wc -l)
echo "Verified: apps/frontend/node_modules resolves correctly ($count entries)"
45 changes: 44 additions & 1 deletion apps/backend/.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ GRAPHITI_ENABLED=true
# Choose which providers to use for LLM and embeddings.
# Default is "openai" for both.

# LLM provider: openai | anthropic | azure_openai | ollama | google | openrouter
# LLM provider: openai | anthropic | azure_openai | ollama | google | openrouter | zai
# GRAPHITI_LLM_PROVIDER=openai

# Embedder provider: openai | voyage | azure_openai | ollama | google | openrouter
Expand All @@ -201,6 +201,9 @@ GRAPHITI_ENABLED=true
# OpenAI API Key
# OPENAI_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

# Custom OpenAI Base URL (OPTIONAL - for LocalAI or other compatible APIs)
# OPENAI_BASE_URL=

# OpenAI Model for LLM (default: gpt-4o-mini)
# OPENAI_MODEL=gpt-4o-mini

Expand Down Expand Up @@ -276,6 +279,24 @@ GRAPHITI_ENABLED=true
# OpenRouter Embedding Model (default: openai/text-embedding-3-small)
# OPENROUTER_EMBEDDING_MODEL=openai/text-embedding-3-small

# =============================================================================
# GRAPHITI: Z.AI Provider
# =============================================================================
# Use Z.AI code generation models (GLM-4 based).
# Get API key from: https://www.z.ai/
#
# Required: ZAI_API_KEY
# Note: Z.AI uses an OpenAI-compatible API structure.

# Z.AI API Key
# ZAI_API_KEY=zai-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx

# Z.AI Base URL (default: https://api.z.ai/api/coding/paas/v4)
# ZAI_BASE_URL=https://api.z.ai/api/coding/paas/v4

# Z.AI Model (default: GLM-4.7)
# ZAI_MODEL=GLM-4.7

# =============================================================================
# GRAPHITI: Azure OpenAI Provider
# =============================================================================
Expand Down Expand Up @@ -333,6 +354,28 @@ GRAPHITI_ENABLED=true
# GRAPHITI_EMBEDDER_PROVIDER=openai
# OPENAI_API_KEY=sk-xxxxxxxx
#
# --- Example 1b: Z.AI via OpenAI provider (custom base URL) ---
# Use this approach when you want to use Z.AI models through the OpenAI provider.
# This is useful if you want to use Z.AI for LLM and a different embedder (like Voyage).
# GRAPHITI_ENABLED=true
# GRAPHITI_LLM_PROVIDER=openai
# GRAPHITI_EMBEDDER_PROVIDER=voyage
# OPENAI_API_KEY=your-z-ai-key
# OPENAI_BASE_URL=https://api.z.ai/api/coding/paas/v4
# OPENAI_MODEL=GLM-4.7
# VOYAGE_API_KEY=pa-xxxxxxxx
#
# --- Example 1c: Z.AI via dedicated zai provider ---
# Use this approach for native Z.AI handling with dedicated configuration.
# This keeps Z.AI credentials separate from OpenAI credentials.
# GRAPHITI_ENABLED=true
# GRAPHITI_LLM_PROVIDER=zai
# GRAPHITI_EMBEDDER_PROVIDER=voyage
# ZAI_API_KEY=your-z-ai-key
# ZAI_BASE_URL=https://api.z.ai/api/coding/paas/v4
# ZAI_MODEL=GLM-4.7
# VOYAGE_API_KEY=pa-xxxxxxxx
#
# --- Example 2: Anthropic + Voyage (high quality) ---
# GRAPHITI_ENABLED=true
# GRAPHITI_LLM_PROVIDER=anthropic
Expand Down
8 changes: 8 additions & 0 deletions apps/backend/cli/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,16 @@ def setup_environment() -> Path:
sys.path.insert(0, str(script_dir))

# Load .env file - check both auto-claude/ and dev/auto-claude/ locations
# Load .env info
cwd_env_file = Path.cwd() / ".env"
env_file = script_dir / ".env"
dev_env_file = script_dir.parent / "dev" / "auto-claude" / ".env"

# Load from CWD first (allows project-specific overrides)
if cwd_env_file.exists() and cwd_env_file.resolve() != env_file.resolve():
load_dotenv(cwd_env_file)

# Then load base config (fills in missing values)
if env_file.exists():
load_dotenv(env_file)
elif dev_env_file.exists():
Expand Down
23 changes: 23 additions & 0 deletions apps/backend/integrations/graphiti/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,7 @@ class LLMProvider(str, Enum):
OLLAMA = "ollama"
GOOGLE = "google"
OPENROUTER = "openrouter"
ZAI = "zai"


class EmbedderProvider(str, Enum):
Expand Down Expand Up @@ -121,6 +122,7 @@ class GraphitiConfig:

# OpenAI settings
openai_api_key: str = ""
openai_base_url: str = "" # Custom base URL (e.g., for Z.AI, OpenRouter, etc.)
openai_model: str = "gpt-5-mini"
openai_embedding_model: str = "text-embedding-3-small"

Expand Down Expand Up @@ -149,6 +151,11 @@ class GraphitiConfig:
openrouter_llm_model: str = "anthropic/claude-sonnet-4"
openrouter_embedding_model: str = "openai/text-embedding-3-small"

# Z.AI settings (OpenAI-compatible)
zai_api_key: str = ""
zai_base_url: str = "https://api.z.ai/api/coding/paas/v4"
zai_model: str = "GLM-4.7"

# Ollama settings (local)
ollama_base_url: str = DEFAULT_OLLAMA_BASE_URL
ollama_llm_model: str = ""
Expand All @@ -174,6 +181,7 @@ def from_env(cls) -> "GraphitiConfig":

# OpenAI settings
openai_api_key = os.environ.get("OPENAI_API_KEY", "")
openai_base_url = os.environ.get("OPENAI_BASE_URL", "")
openai_model = os.environ.get("OPENAI_MODEL", "gpt-5-mini")
openai_embedding_model = os.environ.get(
"OPENAI_EMBEDDING_MODEL", "text-embedding-3-small"
Expand Down Expand Up @@ -216,6 +224,13 @@ def from_env(cls) -> "GraphitiConfig":
"OPENROUTER_EMBEDDING_MODEL", "openai/text-embedding-3-small"
)

# Z.AI settings
zai_api_key = os.environ.get("ZAI_API_KEY", "")
zai_base_url = os.environ.get(
"ZAI_BASE_URL", "https://api.z.ai/api/coding/paas/v4"
)
zai_model = os.environ.get("ZAI_MODEL", "GLM-4.7")

# Ollama settings
ollama_base_url = os.environ.get("OLLAMA_BASE_URL", DEFAULT_OLLAMA_BASE_URL)
ollama_llm_model = os.environ.get("OLLAMA_LLM_MODEL", "")
Expand All @@ -234,6 +249,7 @@ def from_env(cls) -> "GraphitiConfig":
database=database,
db_path=db_path,
openai_api_key=openai_api_key,
openai_base_url=openai_base_url,
openai_model=openai_model,
openai_embedding_model=openai_embedding_model,
anthropic_api_key=anthropic_api_key,
Expand All @@ -251,6 +267,9 @@ def from_env(cls) -> "GraphitiConfig":
openrouter_base_url=openrouter_base_url,
openrouter_llm_model=openrouter_llm_model,
openrouter_embedding_model=openrouter_embedding_model,
zai_api_key=zai_api_key,
zai_base_url=zai_base_url,
zai_model=zai_model,
ollama_base_url=ollama_base_url,
ollama_llm_model=ollama_llm_model,
ollama_embedding_model=ollama_embedding_model,
Expand Down Expand Up @@ -685,6 +704,10 @@ def get_available_providers() -> dict:
available_llm.append("openrouter")
available_embedder.append("openrouter")

# Check Z.AI (requires both API key and base URL)
if config.zai_api_key and config.zai_base_url:
available_llm.append("zai")

# Check Ollama
if config.ollama_llm_model:
available_llm.append("ollama")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,14 @@ def create_openai_embedder(config: "GraphitiConfig") -> Any:
if not config.openai_api_key:
raise ProviderError("OpenAI embedder requires OPENAI_API_KEY")

embedder_config = OpenAIEmbedderConfig(
api_key=config.openai_api_key,
embedding_model=config.openai_embedding_model,
)
# Build embedder config with optional custom base URL
embedder_config_kwargs = {
"api_key": config.openai_api_key,
"embedding_model": config.openai_embedding_model,
}
if config.openai_base_url:
embedder_config_kwargs["base_url"] = config.openai_base_url

embedder_config = OpenAIEmbedderConfig(**embedder_config_kwargs)

return OpenAIEmbedder(config=embedder_config)
3 changes: 3 additions & 0 deletions apps/backend/integrations/graphiti/providers_pkg/factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
create_ollama_llm_client,
create_openai_llm_client,
create_openrouter_llm_client,
create_zai_llm_client,
)

logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -62,6 +63,8 @@ def create_llm_client(config: "GraphitiConfig") -> Any:
return create_google_llm_client(config)
elif provider == "openrouter":
return create_openrouter_llm_client(config)
elif provider == "zai":
return create_zai_llm_client(config)
else:
raise ProviderError(f"Unknown LLM provider: {provider}")

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from .ollama_llm import create_ollama_llm_client
from .openai_llm import create_openai_llm_client
from .openrouter_llm import create_openrouter_llm_client
from .zai_llm import create_zai_llm_client

__all__ = [
"create_openai_llm_client",
Expand All @@ -24,4 +25,5 @@
"create_ollama_llm_client",
"create_google_llm_client",
"create_openrouter_llm_client",
"create_zai_llm_client",
]
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,15 @@ def create_openai_llm_client(config: "GraphitiConfig") -> Any:
f"Error: {e}"
)

llm_config = LLMConfig(
api_key=config.openai_api_key,
model=config.openai_model,
)
# Build LLM config with optional custom base URL
llm_config_kwargs = {
"api_key": config.openai_api_key,
"model": config.openai_model,
}
if config.openai_base_url:
llm_config_kwargs["base_url"] = config.openai_base_url

llm_config = LLMConfig(**llm_config_kwargs)

# GPT-5 family and o1/o3 models support reasoning/verbosity params
model_lower = config.openai_model.lower()
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
"""
Z.AI LLM Provider
=================

Z.AI LLM client implementation for Graphiti.
Reuses OpenAI client since Z.AI provides an OpenAI-compatible API.
"""

from typing import TYPE_CHECKING, Any

if TYPE_CHECKING:
from graphiti_config import GraphitiConfig

from ..exceptions import ProviderError, ProviderNotInstalled


def create_zai_llm_client(config: "GraphitiConfig") -> Any:
"""
Create Z.AI LLM client (using OpenAI client).

Args:
config: GraphitiConfig with Z.AI settings

Returns:
OpenAI LLM client instance configured for Z.AI

Raises:
ProviderNotInstalled: If graphiti-core is not installed
ProviderError: If API key is missing
"""
if not config.zai_api_key:
raise ProviderError("Z.AI provider requires ZAI_API_KEY")

if not config.zai_base_url:
raise ProviderError("Z.AI provider requires ZAI_BASE_URL")

try:
from graphiti_core.llm_client.config import LLMConfig
from graphiti_core.llm_client.openai_client import OpenAIClient
except ImportError as e:
raise ProviderNotInstalled(
f"Z.AI provider requires graphiti-core. "
f"Install with: pip install graphiti-core\n"
f"Error: {e}"
)

# Configure as specialized OpenAI client
llm_config = LLMConfig(
api_key=config.zai_api_key,
model=config.zai_model,
base_url=config.zai_base_url,
)

# Z.AI uses its own parameter names (e.g., 'thinking') and doesn't support
# OpenAI's 'reasoning' or 'verbosity' parameters - disable them for compatibility
return OpenAIClient(config=llm_config, reasoning=None, verbosity=None)
Loading