Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -55,6 +55,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
tests and blocks all package builds (deb, rpm, arch, AppImage, snap,
Homebrew, Docker) if they fail.

### Added

- **XML tag stripping**: Strip internal XML tags from LLM responses to prevent
tag leakage in chat (thinking, reasoning, scratchpad, etc.)
- **Runtime model metadata**: Fetch model metadata from provider APIs for
accurate context window detection during auto-compaction
- **Run detail UI**: Panel showing tool calls and message flow for agent runs,
accessible via expandable button on assistant messages

### Fixed

- **Docker TLS setup**: All Docker examples now expose port 13132 for CA
Expand Down
1 change: 1 addition & 0 deletions crates/agents/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ pub use {
runner::AgentRunError,
};
pub mod provider_chain;
pub mod response_sanitizer;
pub mod silent_turn;
pub mod skills;
pub mod tool_registry;
75 changes: 67 additions & 8 deletions crates/agents/src/model.rs
Original file line number Diff line number Diff line change
Expand Up @@ -364,6 +364,18 @@ pub trait LlmProvider: Send + Sync {
) -> Pin<Box<dyn Stream<Item = StreamEvent> + Send + '_>> {
self.stream(messages)
}

/// Fetch runtime model metadata from the provider API.
///
/// The default implementation returns a `ModelMetadata` derived from the
/// static `context_window()` value. Providers that support a `/models`
/// endpoint can override this to fetch the actual context length at runtime.
async fn model_metadata(&self) -> anyhow::Result<ModelMetadata> {
Ok(ModelMetadata {
id: self.id().to_string(),
context_length: self.context_window(),
})
}
}

/// Response from an LLM completion call.
Expand All @@ -389,6 +401,13 @@ pub struct Usage {
pub cache_write_tokens: u32,
}

/// Runtime model metadata fetched from provider APIs.
#[derive(Debug, Clone)]
pub struct ModelMetadata {
pub id: String,
pub context_length: u32,
}

#[allow(clippy::unwrap_used, clippy::expect_used)]
#[cfg(test)]
mod tests {
Expand Down Expand Up @@ -473,14 +492,11 @@ mod tests {

#[test]
fn to_openai_assistant_with_tools() {
let msg = ChatMessage::assistant_with_tools(
Some("thinking".into()),
vec![ToolCall {
id: "call_1".into(),
name: "exec".into(),
arguments: serde_json::json!({"cmd": "ls"}),
}],
);
let msg = ChatMessage::assistant_with_tools(Some("thinking".into()), vec![ToolCall {
id: "call_1".into(),
name: "exec".into(),
arguments: serde_json::json!({"cmd": "ls"}),
}]);
let val = msg.to_openai_value();
assert_eq!(val["role"], "assistant");
assert_eq!(val["content"], "thinking");
Expand Down Expand Up @@ -657,4 +673,47 @@ mod tests {
assert!(matches!(&msgs[0], ChatMessage::User { .. }));
assert!(matches!(&msgs[1], ChatMessage::Assistant { .. }));
}

// ── ModelMetadata default trait impl ────────────────────────────

/// Minimal provider to test default `model_metadata()` behavior.
struct StubProvider;

#[async_trait::async_trait]
impl LlmProvider for StubProvider {
fn name(&self) -> &str {
"stub"
}

fn id(&self) -> &str {
"stub-model"
}

fn context_window(&self) -> u32 {
42_000
}

async fn complete(
&self,
_: &[ChatMessage],
_: &[serde_json::Value],
) -> anyhow::Result<CompletionResponse> {
anyhow::bail!("not implemented")
}

fn stream(
&self,
_: Vec<ChatMessage>,
) -> Pin<Box<dyn Stream<Item = StreamEvent> + Send + '_>> {
Box::pin(tokio_stream::empty())
}
}

#[tokio::test]
async fn default_model_metadata_returns_context_window() {
let provider = StubProvider;
let meta = provider.model_metadata().await.unwrap();
assert_eq!(meta.id, "stub-model");
assert_eq!(meta.context_length, 42_000);
}
}
13 changes: 5 additions & 8 deletions crates/agents/src/providers/github_copilot.rs
Original file line number Diff line number Diff line change
Expand Up @@ -235,14 +235,11 @@ async fn fetch_valid_copilot_token(
}

let copilot_resp: CopilotTokenResponse = resp.json().await?;
let _ = token_store.save(
"github-copilot-api",
&OAuthTokens {
access_token: Secret::new(copilot_resp.token.clone()),
refresh_token: None,
expires_at: Some(copilot_resp.expires_at),
},
);
let _ = token_store.save("github-copilot-api", &OAuthTokens {
access_token: Secret::new(copilot_resp.token.clone()),
refresh_token: None,
expires_at: Some(copilot_resp.expires_at),
});

Ok(copilot_resp.token)
}
Expand Down
Loading
Loading