From e12a4cfa0805cc8a835f624f1f48bd79f117f2f9 Mon Sep 17 00:00:00 2001 From: Dylan Shade Date: Wed, 22 Oct 2025 23:39:15 -0400 Subject: [PATCH 1/3] Add minimal Z.AI support - Add GLM-4.5-air as fastest default model - Add GLM-4.6 with -z flag - Auto-detect Z.AI endpoint via ZAI_BASE_URL env var - Maintain full OpenRouter compatibility - Zero breaking changes, preserves lightweight ethos --- ask | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/ask b/ask index 6ee2b3f..2ee920a 100755 --- a/ask +++ b/ask @@ -21,11 +21,13 @@ get_model() { k) echo "moonshotai/kimi-k2:nitro" ;; q) echo "qwen/qwen3-235b-a22b-2507:nitro" ;; o) echo "openai/gpt-5:nitro" ;; + a) echo "GLM-4.5-air" ;; + z) echo "GLM-4.6" ;; esac } # Default values -MODEL="qwen/qwen3-235b-a22b-2507:nitro" +MODEL="GLM-4.5-air" SYSTEM_PROMPT="" PROMPT="" STREAMING=false @@ -60,8 +62,12 @@ Options: -s Use anthropic/claude-sonnet-4.5 -x Use x-ai/grok-code-fast-1 -k Use moonshotai/kimi-k2 - -q Use qwen/qwen3-235b-a22b-2507 (default) + -q Use qwen/qwen3-235b-a22b-2507 -o Use openai/gpt-5 + -a Use GLM-4.5-air (default) + -z Use GLM-4.6 + -a Use GLM-4.5-air (default) + -z Use GLM-4.6 -m MODEL Use custom model -r Disable system prompt (raw model behavior) --stream Enable streaming output @@ -84,7 +90,7 @@ EOF while [ $# -gt 0 ]; do case "$1" in -h|--help) show_help ;; - -[cgskqxo]) + -[cgskqxoaz]) MODEL="$(get_model "${1:1}")" shift ;; -m) @@ -145,7 +151,12 @@ JSON_PAYLOAD='{ "stream": '$([ "$STREAMING" = true ] && echo true || echo false)"$PROVIDER_JSON"' }' -API_URL="https://openrouter.ai/api/v1/chat/completions" +# Use Z.AI if configured, otherwise OpenRouter +if [ -n "${ZAI_BASE_URL:-}" ]; then + API_URL="${ZAI_BASE_URL%/}/chat/completions" +else + API_URL="https://openrouter.ai/api/v1/chat/completions" +fi # Add newline before answer echo From 13eaaf083d968b03973e5b7a6b810c8e25fa5b8a Mon Sep 17 00:00:00 2001 From: Dylan Shade Date: Wed, 22 Oct 2025 23:44:48 -0400 Subject: [PATCH 2/3] Fix duplicate help flags Remove duplicate -a and -z flag descriptions in help text --- ask | 2 -- 1 file changed, 2 deletions(-) diff --git a/ask b/ask index 2ee920a..1a9bb11 100755 --- a/ask +++ b/ask @@ -66,8 +66,6 @@ Options: -o Use openai/gpt-5 -a Use GLM-4.5-air (default) -z Use GLM-4.6 - -a Use GLM-4.5-air (default) - -z Use GLM-4.6 -m MODEL Use custom model -r Disable system prompt (raw model behavior) --stream Enable streaming output From 4288ad2cec8d66af96f192401ddcc09f0f361d2e Mon Sep 17 00:00:00 2001 From: Dylan Shade Date: Wed, 22 Oct 2025 23:48:44 -0400 Subject: [PATCH 3/3] fix: preserve original default model Keep qwen/qwen3-235b-a22b-2507:nitro as default to maintain backward compatibility. Z.AI models available via -a and -z flags. --- ask | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ask b/ask index 1a9bb11..232bb71 100755 --- a/ask +++ b/ask @@ -27,7 +27,7 @@ get_model() { } # Default values -MODEL="GLM-4.5-air" +MODEL="qwen/qwen3-235b-a22b-2507:nitro" SYSTEM_PROMPT="" PROMPT="" STREAMING=false @@ -62,9 +62,9 @@ Options: -s Use anthropic/claude-sonnet-4.5 -x Use x-ai/grok-code-fast-1 -k Use moonshotai/kimi-k2 - -q Use qwen/qwen3-235b-a22b-2507 + -q Use qwen/qwen3-235b-a22b-2507 (default) -o Use openai/gpt-5 - -a Use GLM-4.5-air (default) + -a Use GLM-4.5-air -z Use GLM-4.6 -m MODEL Use custom model -r Disable system prompt (raw model behavior)