From 0ca003d858f8faa11ad13aefd102ba05496f939b Mon Sep 17 00:00:00 2001 From: Reinier van der Leer Date: Wed, 20 Sep 2023 02:40:35 +0200 Subject: [PATCH] AutoGPT: Deprecate MessageHistory --- autogpts/autogpt/autogpt/agents/agent.py | 38 +--------- autogpts/autogpt/autogpt/agents/base.py | 72 ++----------------- .../autogpt/agents/features/watchdog.py | 2 +- .../autogpt/autogpt/agents/planning_agent.py | 30 ++------ autogpts/autogpt/autogpt/logs/log_cycle.py | 1 - .../{agent_actions.py => action_history.py} | 0 autogpts/autogpt/tests/unit/test_agent.py | 2 +- 7 files changed, 12 insertions(+), 133 deletions(-) rename autogpts/autogpt/autogpt/models/{agent_actions.py => action_history.py} (100%) diff --git a/autogpts/autogpt/autogpt/agents/agent.py b/autogpts/autogpt/autogpt/agents/agent.py index 245097a0f610..187a996cc947 100644 --- a/autogpts/autogpt/autogpt/agents/agent.py +++ b/autogpts/autogpt/autogpt/agents/agent.py @@ -18,12 +18,11 @@ from autogpt.llm.utils import count_string_tokens from autogpt.logs.log_cycle import ( CURRENT_CONTEXT_FILE_NAME, - FULL_MESSAGE_HISTORY_FILE_NAME, NEXT_ACTION_FILE_NAME, USER_INPUT_FILE_NAME, LogCycleHandler, ) -from autogpt.models.agent_actions import ( +from autogpt.models.action_history import ( Action, ActionErrorResult, ActionInterruptedByHuman, @@ -113,22 +112,12 @@ def construct_base_prompt(self, *args, **kwargs) -> ChatSequence: kwargs["append_messages"] = [] kwargs["append_messages"].append(budget_msg) - # # Include message history in base prompt - # kwargs["with_message_history"] = True - return super().construct_base_prompt(*args, **kwargs) def on_before_think(self, *args, **kwargs) -> ChatSequence: prompt = super().on_before_think(*args, **kwargs) self.log_cycle_handler.log_count_within_cycle = 0 - self.log_cycle_handler.log_cycle( - self.ai_config.ai_name, - self.created_at, - self.cycle_count, - self.message_history.raw(), - FULL_MESSAGE_HISTORY_FILE_NAME, - ) self.log_cycle_handler.log_cycle( self.ai_config.ai_name, self.created_at, @@ -148,11 +137,6 @@ def execute( if command_name == "human_feedback": result = ActionInterruptedByHuman(feedback=user_input) - self.message_history.add( - "user", - "I interrupted the execution of the command you proposed " - f"to give you some feedback: {user_input}", - ) self.log_cycle_handler.log_cycle( self.ai_config.ai_name, self.created_at, @@ -207,26 +191,6 @@ def execute( elif result.status == "error": result.reason = plugin.post_command(command_name, result.reason) - # Check if there's a result from the command append it to the message - if result.status == "success": - self.message_history.add( - "system", - f"Command {command_name} returned: {result.outputs}", - "action_result", - ) - elif result.status == "error": - message = f"Command {command_name} failed: {result.reason}" - - # Append hint to the error message if the exception has a hint - if ( - result.error - and isinstance(result.error, AgentException) - and result.error.hint - ): - message = message.rstrip(".") + f". {result.error.hint}" - - self.message_history.add("system", message, "action_result") - # Update action history self.event_history.register_result(result) diff --git a/autogpts/autogpt/autogpt/agents/base.py b/autogpts/autogpt/autogpt/agents/base.py index 51ba1be11269..780331c492fc 100644 --- a/autogpts/autogpt/autogpt/agents/base.py +++ b/autogpts/autogpt/autogpt/agents/base.py @@ -15,8 +15,7 @@ from autogpt.llm.base import ChatSequence, Message from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS, get_openai_command_specs from autogpt.llm.utils import count_message_tokens, create_chat_completion -from autogpt.memory.message_history import MessageHistory -from autogpt.models.agent_actions import EpisodicActionHistory, ActionResult +from autogpt.models.action_history import EpisodicActionHistory, ActionResult from autogpt.prompts.generator import PromptGenerator from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT @@ -92,11 +91,6 @@ def __init__( self.event_history = EpisodicActionHistory() - self.message_history = MessageHistory( - model=self.llm, - max_summary_tlength=summary_max_tlength or self.send_token_limit // 6, - ) - # Support multi-inheritance and mixins for subclasses super(BaseAgent, self).__init__() @@ -168,7 +162,6 @@ def construct_base_prompt( prepend_messages: list[Message] = [], append_messages: list[Message] = [], reserve_tokens: int = 0, - with_message_history: bool = False, ) -> ChatSequence: """Constructs and returns a prompt with the following structure: 1. System prompt @@ -196,24 +189,6 @@ def construct_base_prompt( [Message("system", self.system_prompt)] + prepend_messages, ) - if with_message_history: - # Reserve tokens for messages to be appended later, if any - reserve_tokens += self.message_history.max_summary_tlength - if append_messages: - reserve_tokens += count_message_tokens(append_messages, self.llm.name) - - # Fill message history, up to a margin of reserved_tokens. - # Trim remaining historical messages and add them to the running summary. - history_start_index = len(prompt) - trimmed_history = add_history_upto_token_limit( - prompt, self.message_history, self.send_token_limit - reserve_tokens - ) - if trimmed_history: - new_summary_msg, _ = self.message_history.trim_messages( - list(prompt), self.config - ) - prompt.insert(history_start_index, new_summary_msg) - if append_messages: prompt.extend(append_messages) @@ -372,24 +347,9 @@ def on_response( The parsed command name and command args, if any, and the agent thoughts. """ - # Save assistant reply to message history - self.message_history.append(prompt[-1]) - self.message_history.add( - "assistant", llm_response.content, "ai_response" - ) # FIXME: support function calls - - try: - return self.parse_and_process_response( - llm_response, thought_process_id, prompt, instruction - ) - except InvalidAgentResponseError as e: - # TODO: tune this message - self.message_history.add( - "system", - f"Your response could not be parsed: {e}" - "\n\nRemember to only respond using the specified format above!", - ) - raise + return self.parse_and_process_response( + llm_response, thought_process_id, prompt, instruction + ) # TODO: update memory/context @@ -415,27 +375,3 @@ def parse_and_process_response( The parsed command name and command args, if any, and the agent thoughts. """ pass - - -def add_history_upto_token_limit( - prompt: ChatSequence, history: MessageHistory, t_limit: int -) -> list[Message]: - current_prompt_length = prompt.token_length - insertion_index = len(prompt) - limit_reached = False - trimmed_messages: list[Message] = [] - for cycle in reversed(list(history.per_cycle())): - messages_to_add = [msg for msg in cycle if msg is not None] - tokens_to_add = count_message_tokens(messages_to_add, prompt.model.name) - if current_prompt_length + tokens_to_add > t_limit: - limit_reached = True - - if not limit_reached: - # Add the most recent message to the start of the chain, - # after the system prompts. - prompt.insert(insertion_index, *messages_to_add) - current_prompt_length += tokens_to_add - else: - trimmed_messages = messages_to_add + trimmed_messages - - return trimmed_messages diff --git a/autogpts/autogpt/autogpt/agents/features/watchdog.py b/autogpts/autogpt/autogpt/agents/features/watchdog.py index 7c43ef90599d..e7310e2a0d62 100644 --- a/autogpts/autogpt/autogpt/agents/features/watchdog.py +++ b/autogpts/autogpt/autogpt/agents/features/watchdog.py @@ -3,7 +3,7 @@ import logging from contextlib import ExitStack -from autogpt.models.agent_actions import EpisodicActionHistory +from autogpt.models.action_history import EpisodicActionHistory from ..base import BaseAgent diff --git a/autogpts/autogpt/autogpt/agents/planning_agent.py b/autogpts/autogpt/autogpt/agents/planning_agent.py index 46e6615b214f..5e36e0798c9f 100644 --- a/autogpts/autogpt/autogpt/agents/planning_agent.py +++ b/autogpts/autogpt/autogpt/agents/planning_agent.py @@ -21,9 +21,8 @@ USER_INPUT_FILE_NAME, LogCycleHandler, ) -from autogpt.models.agent_actions import ( +from autogpt.models.action_history import ( ActionErrorResult, - EpisodicActionHistory, ActionInterruptedByHuman, ActionResult, ActionSuccessResult, @@ -69,8 +68,6 @@ def __init__( self.log_cycle_handler = LogCycleHandler() """LogCycleHandler for structured debug logging.""" - self.action_history = EpisodicActionHistory() - self.plan: list[str] = [] """List of steps that the Agent plans to take""" @@ -90,12 +87,12 @@ def construct_base_prompt( plan_section += [f"{i}. {s}" for i, s in enumerate(self.plan, 1)] # Add the actions so far to the prompt - if self.action_history: + if self.event_history: plan_section += [ "\n### Progress", "So far, you have executed the following actions based on the plan:", ] - for i, cycle in enumerate(self.action_history, 1): + for i, cycle in enumerate(self.event_history, 1): if not (cycle.action and cycle.result): logger.warn(f"Incomplete action in history: {cycle}") continue @@ -229,7 +226,7 @@ def on_before_think(self, *args, **kwargs) -> ChatSequence: self.ai_config.ai_name, self.created_at, self.cycle_count, - self.action_history.episodes, + self.event_history.episodes, "action_history.json", ) self.log_cycle_handler.log_cycle( @@ -285,7 +282,7 @@ def execute( result_tlength = count_string_tokens(str(result), self.llm.name) memory_tlength = count_string_tokens( - str(self.message_history.summary_message()), self.llm.name + str(self.event_history.fmt_paragraph()), self.llm.name ) if result_tlength + memory_tlength > self.send_token_limit: result = ActionErrorResult( @@ -301,23 +298,6 @@ def execute( elif result.status == "error": result.reason = plugin.post_command(command_name, result.reason) - # Check if there's a result from the command append it to the message - if result.status == "success": - self.message_history.add( - "system", - f"Command {command_name} returned: {result.outputs}", - "action_result", - ) - elif result.status == "error": - message = f"Command {command_name} failed: {result.reason}" - if ( - result.error - and isinstance(result.error, AgentException) - and result.error.hint - ): - message = message.rstrip(".") + f". {result.error.hint}" - self.message_history.add("system", message, "action_result") - return result def parse_and_process_response( diff --git a/autogpts/autogpt/autogpt/logs/log_cycle.py b/autogpts/autogpt/autogpt/logs/log_cycle.py index bb821d3ce5c0..062455fcbc0a 100644 --- a/autogpts/autogpt/autogpt/logs/log_cycle.py +++ b/autogpts/autogpt/autogpt/logs/log_cycle.py @@ -6,7 +6,6 @@ from .config import LOG_DIR DEFAULT_PREFIX = "agent" -FULL_MESSAGE_HISTORY_FILE_NAME = "full_message_history.json" CURRENT_CONTEXT_FILE_NAME = "current_context.json" NEXT_ACTION_FILE_NAME = "next_action.json" PROMPT_SUMMARY_FILE_NAME = "prompt_summary.json" diff --git a/autogpts/autogpt/autogpt/models/agent_actions.py b/autogpts/autogpt/autogpt/models/action_history.py similarity index 100% rename from autogpts/autogpt/autogpt/models/agent_actions.py rename to autogpts/autogpt/autogpt/models/action_history.py diff --git a/autogpts/autogpt/tests/unit/test_agent.py b/autogpts/autogpt/tests/unit/test_agent.py index 02b6e349afd1..967022e22c11 100644 --- a/autogpts/autogpt/tests/unit/test_agent.py +++ b/autogpts/autogpt/tests/unit/test_agent.py @@ -3,7 +3,7 @@ def test_agent_initialization(agent: Agent): assert agent.ai_config.ai_name == "Base" - assert agent.message_history.messages == [] + assert agent.event_history.episodes == [] assert agent.cycle_budget is None assert "You are Base" in agent.system_prompt