From 83f28f154570437f80fe3791ac36ee9d52b97813 Mon Sep 17 00:00:00 2001 From: precious112 Date: Wed, 11 Mar 2026 08:31:52 +0100 Subject: [PATCH] fixed issue with message truncation --- .../agent/src/argus_agent/agent/memory.py | 26 ++++++++++++++----- 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/packages/agent/src/argus_agent/agent/memory.py b/packages/agent/src/argus_agent/agent/memory.py index 50bfc8c..df19e0a 100644 --- a/packages/agent/src/argus_agent/agent/memory.py +++ b/packages/agent/src/argus_agent/agent/memory.py @@ -189,12 +189,26 @@ def _truncate_history(self, messages: list[LLMMessage]) -> list[LLMMessage]: dropped = result.pop(0) total_tokens -= _estimate_tokens(dropped) - # Drop orphaned tool results whose assistant message was truncated above. - # Gemini requires function_response to immediately follow a function_call. - while result and result[0].role == "tool": - result.pop(0) - - return result + # Remove orphaned tool results whose assistant message was + # truncated above. A tool result is valid only if its + # tool_call_id appears in a preceding assistant message's + # tool_calls list. + valid_call_ids: set[str] = set() + cleaned: list[LLMMessage] = [] + for msg in result: + if msg.role == "assistant" and msg.tool_calls: + for tc in msg.tool_calls: + tc_id = tc.get("id", "") + if tc_id: + valid_call_ids.add(tc_id) + cleaned.append(msg) + elif msg.role == "tool": + if msg.tool_call_id in valid_call_ids: + cleaned.append(msg) + else: + cleaned.append(msg) + + return cleaned def _estimate_tokens(msg: LLMMessage) -> int: