From ef75d7d29657fdba019faabd530055492278436b Mon Sep 17 00:00:00 2001 From: Rishabh <134101578+GitHoobar@users.noreply.github.com> Date: Thu, 27 Nov 2025 14:23:48 +0530 Subject: [PATCH] [SWE-Bench Test] Fix for Python issue Addresses issue in Significant-Gravitas/AutoGPT --- autogpt/memory/message_history.py | 91 +++++++++++++------------------ 1 file changed, 37 insertions(+), 54 deletions(-) diff --git a/autogpt/memory/message_history.py b/autogpt/memory/message_history.py index 22f96a4aaa6a..ed38fc5e1195 100644 --- a/autogpt/memory/message_history.py +++ b/autogpt/memory/message_history.py @@ -2,6 +2,8 @@ import copy import json +import os +import sys from dataclasses import dataclass, field from typing import TYPE_CHECKING @@ -14,7 +16,8 @@ is_string_valid_json, ) from autogpt.llm.base import ChatSequence, Message, MessageRole, MessageType -from autogpt.llm.utils import create_chat_completion +from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS +from autogpt.llm.utils import count_string_tokens, create_chat_completion from autogpt.log_cycle.log_cycle import PROMPT_SUMMARY_FILE_NAME, SUMMARY_FILE_NAME from autogpt.logs import logger @@ -52,23 +55,10 @@ def trim_messages( self, current_message_chain: list[Message], ) -> tuple[Message, list[Message]]: - """ - Returns a list of trimmed messages: messages which are in the message history - but not in current_message_chain. - - Args: - current_message_chain (list[Message]): The messages currently in the context. - - Returns: - Message: A message with the new running summary after adding the trimmed messages. - list[Message]: A list of messages that are in full_message_history with an index higher than last_trimmed_index and absent from current_message_chain. - """ - # Select messages in full_message_history with an index higher than last_trimmed_index new_messages = [ msg for i, msg in enumerate(self) if i > self.last_trimmed_index ] - # Remove messages that are already present in current_message_chain new_messages_not_in_chain = [ msg for msg in new_messages if msg not in current_message_chain ] @@ -80,19 +70,12 @@ def trim_messages( new_events=new_messages_not_in_chain ) - # Find the index of the last message processed last_message = new_messages_not_in_chain[-1] self.last_trimmed_index = self.messages.index(last_message) return new_summary_message, new_messages_not_in_chain def per_cycle(self, messages: list[Message] | None = None): - """ - Yields: - Message: a message containing user input - Message: a message from the AI containing a proposed action - Message: the message containing the result of the AI's proposed action - """ messages = messages or self.messages for i in range(0, len(messages) - 1): ai_message = messages[i] @@ -121,36 +104,17 @@ def summary_message(self) -> Message: ) def update_running_summary(self, new_events: list[Message]) -> Message: - """ - This function takes a list of dictionaries representing new events and combines them with the current summary, - focusing on key and potentially important information to remember. The updated summary is returned in a message - formatted in the 1st person past tense. - - Args: - new_events (List[Dict]): A list of dictionaries containing the latest events to be added to the summary. - - Returns: - str: A message containing the updated summary of actions, formatted in the 1st person past tense. - - Example: - new_events = [{"event": "entered the kitchen."}, {"event": "found a scrawled note with the number 7"}] - update_running_summary(new_events) - # Returns: "This reminds you of these events from your past: \nI entered the kitchen and found a scrawled note saying 7." - """ cfg = Config() if not new_events: return self.summary_message() - # Create a copy of the new_events list to prevent modifying the original list new_events = copy.deepcopy(new_events) - # Replace "assistant" with "you". This produces much better first person past tense results. for event in new_events: if event.role.lower() == "assistant": event.role = "you" - # Remove "thoughts" dictionary from "content" try: content_dict = json.loads(event.content) if "thoughts" in content_dict: @@ -163,24 +127,45 @@ def update_running_summary(self, new_events: list[Message]) -> Message: elif event.role.lower() == "system": event.role = "your computer" - # Delete all user messages elif event.role == "user": new_events.remove(event) + prompt_template_length = 100 + max_tokens = OPEN_AI_CHAT_MODELS.get(cfg.fast_llm_model).max_tokens + batch = [] + batch_tlength = 0 + + for event in new_events: + event_tlength = count_string_tokens(str(event), cfg.fast_llm_model) + + if batch_tlength + event_tlength > max_tokens - prompt_template_length: + self.summarize_batch(batch, cfg) + batch = [event] + batch_tlength = event_tlength + else: + batch.append(event) + batch_tlength += event_tlength + + if batch: + self.summarize_batch(batch, cfg) + + return self.summary_message() + + def summarize_batch(self, new_events_batch, cfg): prompt = f'''Your task is to create a concise running summary of actions and information results in the provided text, focusing on key and potentially important information to remember. -You will receive the current summary and the your latest actions. Combine them, adding relevant key information from the latest development in 1st person past tense and keeping the summary concise. + You will receive the current summary and your latest actions. Combine them, adding relevant key information from the latest development in 1st person past tense and keeping the summary concise. -Summary So Far: -""" -{self.summary} -""" + Summary So Far: + """ + {self.summary} + """ -Latest Development: -""" -{new_events or "Nothing new happened."} -""" -''' + Latest Development: + """ + {new_events_batch or "Nothing new happened."} + """ + ''' prompt = ChatSequence.for_model(cfg.fast_llm_model, [Message("user", prompt)]) self.agent.log_cycle_handler.log_cycle( @@ -199,6 +184,4 @@ def update_running_summary(self, new_events: list[Message]) -> Message: self.agent.cycle_count, self.summary, SUMMARY_FILE_NAME, - ) - - return self.summary_message() + ) \ No newline at end of file