From 5940ba1e4b7ccfdd7eeade3decf1fad77f7362cd Mon Sep 17 00:00:00 2001 From: AndyMik90 Date: Wed, 21 Jan 2026 13:38:31 +0100 Subject: [PATCH 1/7] fix(ci): remove broken symlinks before creating node_modules link The previous fix only removed partial directories but not broken symlinks. CI logs showed that an existing broken symlink pointing to "../../../../../../apps/frontend/node_modules" was skipped, causing electron-builder to fail with ENOENT during macOS code signing. Changes: - Check if existing symlink points to correct target (../../node_modules) - Remove incorrect or broken symlinks before creating new one - Add strict verification that symlink exists AND resolves correctly - Fail fast with clear error if verification fails Co-Authored-By: Claude Opus 4.5 --- .../actions/setup-node-frontend/action.yml | 46 +++++++++++-------- 1 file changed, 28 insertions(+), 18 deletions(-) diff --git a/.github/actions/setup-node-frontend/action.yml b/.github/actions/setup-node-frontend/action.yml index e92e8515f8..79d5c92996 100644 --- a/.github/actions/setup-node-frontend/action.yml +++ b/.github/actions/setup-node-frontend/action.yml @@ -65,13 +65,25 @@ runs: exit 1 fi - # Remove any existing node_modules in apps/frontend (npm workspaces may create partial one) - if [ -d "apps/frontend/node_modules" ] && [ ! -L "apps/frontend/node_modules" ]; then - echo "Removing partial node_modules created by npm workspaces..." - rm -rf "apps/frontend/node_modules" + # Remove any existing node_modules in apps/frontend + # This handles: partial directories from npm workspaces, AND broken symlinks + if [ -e "apps/frontend/node_modules" ] || [ -L "apps/frontend/node_modules" ]; then + # Check if it's a valid symlink pointing to root node_modules + if [ -L "apps/frontend/node_modules" ]; then + target=$(readlink apps/frontend/node_modules 2>/dev/null || echo "") + if [ "$target" = "../../node_modules" ] && [ -d "apps/frontend/node_modules" ]; then + echo "Correct symlink already exists: apps/frontend/node_modules -> ../../node_modules" + else + echo "Removing incorrect/broken symlink (was: $target)..." + rm -f "apps/frontend/node_modules" + fi + else + echo "Removing partial node_modules directory created by npm workspaces..." + rm -rf "apps/frontend/node_modules" + fi fi - # Create link if it doesn't exist + # Create link if it doesn't exist or was removed if [ ! -L "apps/frontend/node_modules" ]; then if [ "$RUNNER_OS" == "Windows" ]; then # Use directory junction on Windows (works without admin privileges) @@ -91,19 +103,17 @@ runs: exit 1 fi fi - else - echo "apps/frontend/node_modules symlink already exists" fi - # Verify the link works - if [ -L "apps/frontend/node_modules" ]; then - target=$(readlink apps/frontend/node_modules 2>/dev/null || echo "junction") - echo "Verified: apps/frontend/node_modules -> $target" - # Check that the target resolves correctly - if [ -d "apps/frontend/node_modules" ]; then - count=$(ls apps/frontend/node_modules 2>/dev/null | wc -l) - echo "Link resolves to directory with $count entries" - else - echo "::warning::Link exists but does not resolve to a valid directory" - fi + # Final verification - the symlink must exist and resolve correctly + if [ ! -L "apps/frontend/node_modules" ]; then + echo "::error::apps/frontend/node_modules symlink was not created" + exit 1 + fi + if [ ! -d "apps/frontend/node_modules" ]; then + echo "::error::apps/frontend/node_modules symlink does not resolve to a valid directory" + ls -la apps/frontend/ || true + exit 1 fi + count=$(ls apps/frontend/node_modules 2>/dev/null | wc -l) + echo "Verified: apps/frontend/node_modules -> ../../node_modules ($count entries)" From a97cf13cf99101d98463e6eb1aa942ce8c5d4921 Mon Sep 17 00:00:00 2001 From: AndyMik90 Date: Wed, 21 Jan 2026 13:44:30 +0100 Subject: [PATCH 2/7] fix(ci): handle Windows junction verification correctly Windows junctions don't appear as symlinks to bash's -L test, causing the verification step to fail even when the junction was created successfully. Changes: - Skip symlink check (-L) on Windows since junctions are different - Verify link works by checking electron package is accessible - Add more diagnostic output on failure Co-Authored-By: Claude Opus 4.5 --- .github/actions/setup-node-frontend/action.yml | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/.github/actions/setup-node-frontend/action.yml b/.github/actions/setup-node-frontend/action.yml index 79d5c92996..af77a4d1e7 100644 --- a/.github/actions/setup-node-frontend/action.yml +++ b/.github/actions/setup-node-frontend/action.yml @@ -105,15 +105,19 @@ runs: fi fi - # Final verification - the symlink must exist and resolve correctly - if [ ! -L "apps/frontend/node_modules" ]; then + # Final verification - the link must exist and resolve correctly + # Note: On Windows, junctions don't show as symlinks (-L), so we check if the directory exists + # and can be listed. On Unix, we also verify it's a symlink. + if [ "$RUNNER_OS" != "Windows" ] && [ ! -L "apps/frontend/node_modules" ]; then echo "::error::apps/frontend/node_modules symlink was not created" exit 1 fi - if [ ! -d "apps/frontend/node_modules" ]; then - echo "::error::apps/frontend/node_modules symlink does not resolve to a valid directory" + # Verify the link resolves to a valid directory with content + if ! ls apps/frontend/node_modules/electron >/dev/null 2>&1; then + echo "::error::apps/frontend/node_modules does not resolve correctly (electron not found)" ls -la apps/frontend/ || true + ls apps/frontend/node_modules 2>&1 | head -5 || true exit 1 fi count=$(ls apps/frontend/node_modules 2>/dev/null | wc -l) - echo "Verified: apps/frontend/node_modules -> ../../node_modules ($count entries)" + echo "Verified: apps/frontend/node_modules resolves correctly ($count entries)" From cf0a0af95f804068911873be2e5bb4c74f86b8a3 Mon Sep 17 00:00:00 2001 From: Mateusz Ruszkowski Date: Tue, 20 Jan 2026 03:24:14 +0100 Subject: [PATCH 3/7] Add Z.AI provider support to Graphiti and improve env loading --- apps/backend/.env.example | 30 ++++++++++ apps/backend/cli/utils.py | 8 +++ apps/backend/integrations/graphiti/config.py | 23 ++++++++ .../graphiti/providers_pkg/factory.py | 3 + .../providers_pkg/llm_providers/__init__.py | 2 + .../providers_pkg/llm_providers/zai_llm.py | 56 +++++++++++++++++++ 6 files changed, 122 insertions(+) create mode 100644 apps/backend/integrations/graphiti/providers_pkg/llm_providers/zai_llm.py diff --git a/apps/backend/.env.example b/apps/backend/.env.example index 06eeb7771d..d542839684 100644 --- a/apps/backend/.env.example +++ b/apps/backend/.env.example @@ -201,6 +201,9 @@ GRAPHITI_ENABLED=true # OpenAI API Key # OPENAI_API_KEY=sk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx +# Custom OpenAI Base URL (OPTIONAL - for LocalAI or other compatible APIs) +# OPENAI_BASE_URL= + # OpenAI Model for LLM (default: gpt-4o-mini) # OPENAI_MODEL=gpt-4o-mini @@ -276,6 +279,24 @@ GRAPHITI_ENABLED=true # OpenRouter Embedding Model (default: openai/text-embedding-3-small) # OPENROUTER_EMBEDDING_MODEL=openai/text-embedding-3-small +# ============================================================================= +# GRAPHITI: Z.AI Provider +# ============================================================================= +# Use Z.AI code generation models (GLM-4 based). +# Get API key from: https://www.z.ai/ +# +# Required: ZAI_API_KEY +# Note: Z.AI uses an OpenAI-compatible API structure. + +# Z.AI API Key +# ZAI_API_KEY=your_key_here + +# Z.AI Base URL (default: https://api.z.ai/api/coding/paas/v4) +# ZAI_BASE_URL=https://api.z.ai/api/coding/paas/v4 + +# Z.AI Model (default: GLM-4.7) +# ZAI_MODEL=GLM-4.7 + # ============================================================================= # GRAPHITI: Azure OpenAI Provider # ============================================================================= @@ -333,6 +354,15 @@ GRAPHITI_ENABLED=true # GRAPHITI_EMBEDDER_PROVIDER=openai # OPENAI_API_KEY=sk-xxxxxxxx # +# --- Example 1b: Z.AI + Voyage (OpenAI-compatible) --- +# GRAPHITI_ENABLED=true +# GRAPHITI_LLM_PROVIDER=openai +# GRAPHITI_EMBEDDER_PROVIDER=voyage +# OPENAI_API_KEY=your-z-ai-key +# OPENAI_BASE_URL=https://api.z.ai/api/coding/paas/v4 +# OPENAI_MODEL=GLM-4.7 +# VOYAGE_API_KEY=pa-xxxxxxxx +# # --- Example 2: Anthropic + Voyage (high quality) --- # GRAPHITI_ENABLED=true # GRAPHITI_LLM_PROVIDER=anthropic diff --git a/apps/backend/cli/utils.py b/apps/backend/cli/utils.py index f65b83c78f..a685e842b2 100644 --- a/apps/backend/cli/utils.py +++ b/apps/backend/cli/utils.py @@ -86,8 +86,16 @@ def setup_environment() -> Path: sys.path.insert(0, str(script_dir)) # Load .env file - check both auto-claude/ and dev/auto-claude/ locations + # Load .env info + cwd_env_file = Path.cwd() / ".env" env_file = script_dir / ".env" dev_env_file = script_dir.parent / "dev" / "auto-claude" / ".env" + + # Load from CWD first (allows project-specific overrides) + if cwd_env_file.exists() and cwd_env_file.resolve() != env_file.resolve(): + load_dotenv(cwd_env_file) + + # Then load base config (fills in missing values) if env_file.exists(): load_dotenv(env_file) elif dev_env_file.exists(): diff --git a/apps/backend/integrations/graphiti/config.py b/apps/backend/integrations/graphiti/config.py index 45016f9669..f2360d55d1 100644 --- a/apps/backend/integrations/graphiti/config.py +++ b/apps/backend/integrations/graphiti/config.py @@ -90,6 +90,7 @@ class LLMProvider(str, Enum): OLLAMA = "ollama" GOOGLE = "google" OPENROUTER = "openrouter" + ZAI = "zai" class EmbedderProvider(str, Enum): @@ -121,6 +122,7 @@ class GraphitiConfig: # OpenAI settings openai_api_key: str = "" + openai_base_url: str = "" # Custom base URL (e.g., for Z.AI, OpenRouter, etc.) openai_model: str = "gpt-5-mini" openai_embedding_model: str = "text-embedding-3-small" @@ -149,6 +151,11 @@ class GraphitiConfig: openrouter_llm_model: str = "anthropic/claude-sonnet-4" openrouter_embedding_model: str = "openai/text-embedding-3-small" + # Z.AI settings (OpenAI-compatible) + zai_api_key: str = "" + zai_base_url: str = "https://api.z.ai/api/coding/paas/v4" + zai_model: str = "GLM-4.7" + # Ollama settings (local) ollama_base_url: str = DEFAULT_OLLAMA_BASE_URL ollama_llm_model: str = "" @@ -174,6 +181,7 @@ def from_env(cls) -> "GraphitiConfig": # OpenAI settings openai_api_key = os.environ.get("OPENAI_API_KEY", "") + openai_base_url = os.environ.get("OPENAI_BASE_URL", "") openai_model = os.environ.get("OPENAI_MODEL", "gpt-5-mini") openai_embedding_model = os.environ.get( "OPENAI_EMBEDDING_MODEL", "text-embedding-3-small" @@ -216,6 +224,13 @@ def from_env(cls) -> "GraphitiConfig": "OPENROUTER_EMBEDDING_MODEL", "openai/text-embedding-3-small" ) + # Z.AI settings + zai_api_key = os.environ.get("ZAI_API_KEY", "") + zai_base_url = os.environ.get( + "ZAI_BASE_URL", "https://api.z.ai/api/coding/paas/v4" + ) + zai_model = os.environ.get("ZAI_MODEL", "GLM-4.7") + # Ollama settings ollama_base_url = os.environ.get("OLLAMA_BASE_URL", DEFAULT_OLLAMA_BASE_URL) ollama_llm_model = os.environ.get("OLLAMA_LLM_MODEL", "") @@ -234,6 +249,7 @@ def from_env(cls) -> "GraphitiConfig": database=database, db_path=db_path, openai_api_key=openai_api_key, + openai_base_url=openai_base_url, openai_model=openai_model, openai_embedding_model=openai_embedding_model, anthropic_api_key=anthropic_api_key, @@ -251,6 +267,9 @@ def from_env(cls) -> "GraphitiConfig": openrouter_base_url=openrouter_base_url, openrouter_llm_model=openrouter_llm_model, openrouter_embedding_model=openrouter_embedding_model, + zai_api_key=zai_api_key, + zai_base_url=zai_base_url, + zai_model=zai_model, ollama_base_url=ollama_base_url, ollama_llm_model=ollama_llm_model, ollama_embedding_model=ollama_embedding_model, @@ -685,6 +704,10 @@ def get_available_providers() -> dict: available_llm.append("openrouter") available_embedder.append("openrouter") + # Check Z.AI + if config.zai_api_key: + available_llm.append("zai") + # Check Ollama if config.ollama_llm_model: available_llm.append("ollama") diff --git a/apps/backend/integrations/graphiti/providers_pkg/factory.py b/apps/backend/integrations/graphiti/providers_pkg/factory.py index 06eb2b667c..b9f9d85697 100644 --- a/apps/backend/integrations/graphiti/providers_pkg/factory.py +++ b/apps/backend/integrations/graphiti/providers_pkg/factory.py @@ -27,6 +27,7 @@ create_ollama_llm_client, create_openai_llm_client, create_openrouter_llm_client, + create_zai_llm_client, ) logger = logging.getLogger(__name__) @@ -62,6 +63,8 @@ def create_llm_client(config: "GraphitiConfig") -> Any: return create_google_llm_client(config) elif provider == "openrouter": return create_openrouter_llm_client(config) + elif provider == "zai": + return create_zai_llm_client(config) else: raise ProviderError(f"Unknown LLM provider: {provider}") diff --git a/apps/backend/integrations/graphiti/providers_pkg/llm_providers/__init__.py b/apps/backend/integrations/graphiti/providers_pkg/llm_providers/__init__.py index be335f5fb0..706a4f8f0a 100644 --- a/apps/backend/integrations/graphiti/providers_pkg/llm_providers/__init__.py +++ b/apps/backend/integrations/graphiti/providers_pkg/llm_providers/__init__.py @@ -16,6 +16,7 @@ from .ollama_llm import create_ollama_llm_client from .openai_llm import create_openai_llm_client from .openrouter_llm import create_openrouter_llm_client +from .zai_llm import create_zai_llm_client __all__ = [ "create_openai_llm_client", @@ -24,4 +25,5 @@ "create_ollama_llm_client", "create_google_llm_client", "create_openrouter_llm_client", + "create_zai_llm_client", ] diff --git a/apps/backend/integrations/graphiti/providers_pkg/llm_providers/zai_llm.py b/apps/backend/integrations/graphiti/providers_pkg/llm_providers/zai_llm.py new file mode 100644 index 0000000000..32b477a611 --- /dev/null +++ b/apps/backend/integrations/graphiti/providers_pkg/llm_providers/zai_llm.py @@ -0,0 +1,56 @@ +""" +Z.AI LLM Provider +================= + +Z.AI LLM client implementation for Graphiti. +Reuses OpenAI client since Z.AI provides an OpenAI-compatible API. +""" + +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from graphiti_config import GraphitiConfig + +from ..exceptions import ProviderError, ProviderNotInstalled + + +def create_zai_llm_client(config: "GraphitiConfig") -> Any: + """ + Create Z.AI LLM client (using OpenAI client). + + Args: + config: GraphitiConfig with Z.AI settings + + Returns: + OpenAI LLM client instance configured for Z.AI + + Raises: + ProviderNotInstalled: If graphiti-core is not installed + ProviderError: If API key is missing + """ + if not config.zai_api_key: + raise ProviderError("Z.AI provider requires ZAI_API_KEY") + + if not config.zai_base_url: + raise ProviderError("Z.AI provider requires ZAI_BASE_URL") + + try: + from graphiti_core.llm_client.config import LLMConfig + from graphiti_core.llm_client.openai_client import OpenAIClient + except ImportError as e: + raise ProviderNotInstalled( + f"Z.AI provider requires graphiti-core. " + f"Install with: pip install graphiti-core\n" + f"Error: {e}" + ) + + # Configure as specialized OpenAI client + llm_config = LLMConfig( + api_key=config.zai_api_key, + model=config.zai_model, + base_url=config.zai_base_url, + ) + + # Determine if model supports reasoning (GLM models usually do) + # Z.AI primarily serves GLM models (GLM-4.7, etc.) + return OpenAIClient(config=llm_config) From a187949f2b46210e9d0e31b2fe469c782899670d Mon Sep 17 00:00:00 2001 From: Mateusz Ruszkowski Date: Tue, 20 Jan 2026 23:07:15 +0100 Subject: [PATCH 4/7] fix: address review comments for Z.AI provider support - Fix get_available_providers to check both zai_api_key AND zai_base_url - Implement reasoning detection for Z.AI LLM client (GLM-4 models support reasoning) - Pass openai_base_url to OpenAI LLM client for custom endpoint support - Pass openai_base_url to OpenAI embedder client for custom endpoint support - Clarify .env.example with two Z.AI integration approaches: - Via OpenAI provider with custom base URL (Example 1b) - Via dedicated zai provider (Example 1c) Addresses review comments from: - Gemini Code Assist (config.py, zai_llm.py) - CodeRabbit (.env.example, zai_llm.py) - Sentry (HIGH severity bug: openai_base_url not passed to clients) Co-Authored-By: Claude Opus 4.5 --- apps/backend/.env.example | 15 ++++++++++++++- apps/backend/integrations/graphiti/config.py | 4 ++-- .../embedder_providers/openai_embedder.py | 13 +++++++++---- .../providers_pkg/llm_providers/openai_llm.py | 13 +++++++++---- .../providers_pkg/llm_providers/zai_llm.py | 12 ++++++++++-- 5 files changed, 44 insertions(+), 13 deletions(-) diff --git a/apps/backend/.env.example b/apps/backend/.env.example index d542839684..87c6dad145 100644 --- a/apps/backend/.env.example +++ b/apps/backend/.env.example @@ -354,7 +354,9 @@ GRAPHITI_ENABLED=true # GRAPHITI_EMBEDDER_PROVIDER=openai # OPENAI_API_KEY=sk-xxxxxxxx # -# --- Example 1b: Z.AI + Voyage (OpenAI-compatible) --- +# --- Example 1b: Z.AI via OpenAI provider (custom base URL) --- +# Use this approach when you want to use Z.AI models through the OpenAI provider. +# This is useful if you want to use Z.AI for LLM and a different embedder (like Voyage). # GRAPHITI_ENABLED=true # GRAPHITI_LLM_PROVIDER=openai # GRAPHITI_EMBEDDER_PROVIDER=voyage @@ -363,6 +365,17 @@ GRAPHITI_ENABLED=true # OPENAI_MODEL=GLM-4.7 # VOYAGE_API_KEY=pa-xxxxxxxx # +# --- Example 1c: Z.AI via dedicated zai provider --- +# Use this approach for native Z.AI handling with dedicated configuration. +# This keeps Z.AI credentials separate from OpenAI credentials. +# GRAPHITI_ENABLED=true +# GRAPHITI_LLM_PROVIDER=zai +# GRAPHITI_EMBEDDER_PROVIDER=voyage +# ZAI_API_KEY=your-z-ai-key +# ZAI_BASE_URL=https://api.z.ai/api/coding/paas/v4 +# ZAI_MODEL=GLM-4.7 +# VOYAGE_API_KEY=pa-xxxxxxxx +# # --- Example 2: Anthropic + Voyage (high quality) --- # GRAPHITI_ENABLED=true # GRAPHITI_LLM_PROVIDER=anthropic diff --git a/apps/backend/integrations/graphiti/config.py b/apps/backend/integrations/graphiti/config.py index f2360d55d1..faf7468d0d 100644 --- a/apps/backend/integrations/graphiti/config.py +++ b/apps/backend/integrations/graphiti/config.py @@ -704,8 +704,8 @@ def get_available_providers() -> dict: available_llm.append("openrouter") available_embedder.append("openrouter") - # Check Z.AI - if config.zai_api_key: + # Check Z.AI (requires both API key and base URL) + if config.zai_api_key and config.zai_base_url: available_llm.append("zai") # Check Ollama diff --git a/apps/backend/integrations/graphiti/providers_pkg/embedder_providers/openai_embedder.py b/apps/backend/integrations/graphiti/providers_pkg/embedder_providers/openai_embedder.py index a2561180dd..e8fd3f64c5 100644 --- a/apps/backend/integrations/graphiti/providers_pkg/embedder_providers/openai_embedder.py +++ b/apps/backend/integrations/graphiti/providers_pkg/embedder_providers/openai_embedder.py @@ -39,9 +39,14 @@ def create_openai_embedder(config: "GraphitiConfig") -> Any: if not config.openai_api_key: raise ProviderError("OpenAI embedder requires OPENAI_API_KEY") - embedder_config = OpenAIEmbedderConfig( - api_key=config.openai_api_key, - embedding_model=config.openai_embedding_model, - ) + # Build embedder config with optional custom base URL + embedder_config_kwargs = { + "api_key": config.openai_api_key, + "embedding_model": config.openai_embedding_model, + } + if config.openai_base_url: + embedder_config_kwargs["base_url"] = config.openai_base_url + + embedder_config = OpenAIEmbedderConfig(**embedder_config_kwargs) return OpenAIEmbedder(config=embedder_config) diff --git a/apps/backend/integrations/graphiti/providers_pkg/llm_providers/openai_llm.py b/apps/backend/integrations/graphiti/providers_pkg/llm_providers/openai_llm.py index 0d6567fc41..08c1c574ed 100644 --- a/apps/backend/integrations/graphiti/providers_pkg/llm_providers/openai_llm.py +++ b/apps/backend/integrations/graphiti/providers_pkg/llm_providers/openai_llm.py @@ -40,10 +40,15 @@ def create_openai_llm_client(config: "GraphitiConfig") -> Any: f"Error: {e}" ) - llm_config = LLMConfig( - api_key=config.openai_api_key, - model=config.openai_model, - ) + # Build LLM config with optional custom base URL + llm_config_kwargs = { + "api_key": config.openai_api_key, + "model": config.openai_model, + } + if config.openai_base_url: + llm_config_kwargs["base_url"] = config.openai_base_url + + llm_config = LLMConfig(**llm_config_kwargs) # GPT-5 family and o1/o3 models support reasoning/verbosity params model_lower = config.openai_model.lower() diff --git a/apps/backend/integrations/graphiti/providers_pkg/llm_providers/zai_llm.py b/apps/backend/integrations/graphiti/providers_pkg/llm_providers/zai_llm.py index 32b477a611..1f7bc2afe6 100644 --- a/apps/backend/integrations/graphiti/providers_pkg/llm_providers/zai_llm.py +++ b/apps/backend/integrations/graphiti/providers_pkg/llm_providers/zai_llm.py @@ -51,6 +51,14 @@ def create_zai_llm_client(config: "GraphitiConfig") -> Any: base_url=config.zai_base_url, ) - # Determine if model supports reasoning (GLM models usually do) + # Determine if model supports reasoning (GLM-4 models usually do) # Z.AI primarily serves GLM models (GLM-4.7, etc.) - return OpenAIClient(config=llm_config) + model_lower = config.zai_model.lower() + supports_reasoning = model_lower.startswith("glm-4") + + if supports_reasoning: + # Use defaults for models that support reasoning params + return OpenAIClient(config=llm_config) + else: + # Disable reasoning/verbosity for models that don't support them + return OpenAIClient(config=llm_config, reasoning=None, verbosity=None) From ce81ded8c61f0132bc64b72e1a4e2b924e7f5949 Mon Sep 17 00:00:00 2001 From: Mateusz Ruszkowski Date: Wed, 21 Jan 2026 12:02:14 +0100 Subject: [PATCH 5/7] fix: always disable reasoning/verbosity for Z.AI provider Z.AI uses its own parameter names (e.g., 'thinking') and doesn't support OpenAI's 'reasoning' or 'verbosity' parameters. Always disable them for compatibility, similar to how OpenRouter provider handles this. Addresses: Sentry review comment about incompatible parameters Co-Authored-By: Claude Opus 4.5 --- .../providers_pkg/llm_providers/zai_llm.py | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/apps/backend/integrations/graphiti/providers_pkg/llm_providers/zai_llm.py b/apps/backend/integrations/graphiti/providers_pkg/llm_providers/zai_llm.py index 1f7bc2afe6..16f93b05e6 100644 --- a/apps/backend/integrations/graphiti/providers_pkg/llm_providers/zai_llm.py +++ b/apps/backend/integrations/graphiti/providers_pkg/llm_providers/zai_llm.py @@ -51,14 +51,6 @@ def create_zai_llm_client(config: "GraphitiConfig") -> Any: base_url=config.zai_base_url, ) - # Determine if model supports reasoning (GLM-4 models usually do) - # Z.AI primarily serves GLM models (GLM-4.7, etc.) - model_lower = config.zai_model.lower() - supports_reasoning = model_lower.startswith("glm-4") - - if supports_reasoning: - # Use defaults for models that support reasoning params - return OpenAIClient(config=llm_config) - else: - # Disable reasoning/verbosity for models that don't support them - return OpenAIClient(config=llm_config, reasoning=None, verbosity=None) + # Z.AI uses its own parameter names (e.g., 'thinking') and doesn't support + # OpenAI's 'reasoning' or 'verbosity' parameters - disable them for compatibility + return OpenAIClient(config=llm_config, reasoning=None, verbosity=None) From 34a39937039a310548e2eba013ea0540ccba7ab1 Mon Sep 17 00:00:00 2001 From: Mateusz Ruszkowski Date: Wed, 21 Jan 2026 12:41:08 +0100 Subject: [PATCH 6/7] style(env): use consistent placeholder format for ZAI_API_KEY Changed placeholder from 'your_key_here' to 'zai-xxxxxxxx...' to match the convention used by other API keys in the file. Co-Authored-By: Claude Opus 4.5 --- apps/backend/.env.example | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/backend/.env.example b/apps/backend/.env.example index 87c6dad145..ba4b7707e1 100644 --- a/apps/backend/.env.example +++ b/apps/backend/.env.example @@ -289,7 +289,7 @@ GRAPHITI_ENABLED=true # Note: Z.AI uses an OpenAI-compatible API structure. # Z.AI API Key -# ZAI_API_KEY=your_key_here +# ZAI_API_KEY=zai-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx # Z.AI Base URL (default: https://api.z.ai/api/coding/paas/v4) # ZAI_BASE_URL=https://api.z.ai/api/coding/paas/v4 From edc67259d4893e801b87844e80151c38c3c8fc11 Mon Sep 17 00:00:00 2001 From: Mateusz Ruszkowski Date: Wed, 21 Jan 2026 15:19:18 +0100 Subject: [PATCH 7/7] docs(env): add zai to LLM provider selection list --- apps/backend/.env.example | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/backend/.env.example b/apps/backend/.env.example index ba4b7707e1..d0b5dd3739 100644 --- a/apps/backend/.env.example +++ b/apps/backend/.env.example @@ -186,7 +186,7 @@ GRAPHITI_ENABLED=true # Choose which providers to use for LLM and embeddings. # Default is "openai" for both. -# LLM provider: openai | anthropic | azure_openai | ollama | google | openrouter +# LLM provider: openai | anthropic | azure_openai | ollama | google | openrouter | zai # GRAPHITI_LLM_PROVIDER=openai # Embedder provider: openai | voyage | azure_openai | ollama | google | openrouter