Skip to content

Commit

Permalink
AutoGPT: Deprecate MessageHistory
Browse files Browse the repository at this point in the history
  • Loading branch information
Pwuts committed Sep 20, 2023
1 parent 6b22abd commit 0ca003d
Show file tree
Hide file tree
Showing 7 changed files with 12 additions and 133 deletions.
38 changes: 1 addition & 37 deletions autogpts/autogpt/autogpt/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,11 @@
from autogpt.llm.utils import count_string_tokens
from autogpt.logs.log_cycle import (
CURRENT_CONTEXT_FILE_NAME,
FULL_MESSAGE_HISTORY_FILE_NAME,
NEXT_ACTION_FILE_NAME,
USER_INPUT_FILE_NAME,
LogCycleHandler,
)
from autogpt.models.agent_actions import (
from autogpt.models.action_history import (
Action,
ActionErrorResult,
ActionInterruptedByHuman,
Expand Down Expand Up @@ -113,22 +112,12 @@ def construct_base_prompt(self, *args, **kwargs) -> ChatSequence:
kwargs["append_messages"] = []
kwargs["append_messages"].append(budget_msg)

# # Include message history in base prompt
# kwargs["with_message_history"] = True

return super().construct_base_prompt(*args, **kwargs)

def on_before_think(self, *args, **kwargs) -> ChatSequence:
prompt = super().on_before_think(*args, **kwargs)

self.log_cycle_handler.log_count_within_cycle = 0
self.log_cycle_handler.log_cycle(
self.ai_config.ai_name,
self.created_at,
self.cycle_count,
self.message_history.raw(),
FULL_MESSAGE_HISTORY_FILE_NAME,
)
self.log_cycle_handler.log_cycle(
self.ai_config.ai_name,
self.created_at,
Expand All @@ -148,11 +137,6 @@ def execute(

if command_name == "human_feedback":
result = ActionInterruptedByHuman(feedback=user_input)
self.message_history.add(
"user",
"I interrupted the execution of the command you proposed "
f"to give you some feedback: {user_input}",
)
self.log_cycle_handler.log_cycle(
self.ai_config.ai_name,
self.created_at,
Expand Down Expand Up @@ -207,26 +191,6 @@ def execute(
elif result.status == "error":
result.reason = plugin.post_command(command_name, result.reason)

# Check if there's a result from the command append it to the message
if result.status == "success":
self.message_history.add(
"system",
f"Command {command_name} returned: {result.outputs}",
"action_result",
)
elif result.status == "error":
message = f"Command {command_name} failed: {result.reason}"

# Append hint to the error message if the exception has a hint
if (
result.error
and isinstance(result.error, AgentException)
and result.error.hint
):
message = message.rstrip(".") + f". {result.error.hint}"

self.message_history.add("system", message, "action_result")

# Update action history
self.event_history.register_result(result)

Expand Down
72 changes: 4 additions & 68 deletions autogpts/autogpt/autogpt/agents/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,7 @@
from autogpt.llm.base import ChatSequence, Message
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS, get_openai_command_specs
from autogpt.llm.utils import count_message_tokens, create_chat_completion
from autogpt.memory.message_history import MessageHistory
from autogpt.models.agent_actions import EpisodicActionHistory, ActionResult
from autogpt.models.action_history import EpisodicActionHistory, ActionResult
from autogpt.prompts.generator import PromptGenerator
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT

Expand Down Expand Up @@ -92,11 +91,6 @@ def __init__(

self.event_history = EpisodicActionHistory()

self.message_history = MessageHistory(
model=self.llm,
max_summary_tlength=summary_max_tlength or self.send_token_limit // 6,
)

# Support multi-inheritance and mixins for subclasses
super(BaseAgent, self).__init__()

Expand Down Expand Up @@ -168,7 +162,6 @@ def construct_base_prompt(
prepend_messages: list[Message] = [],
append_messages: list[Message] = [],
reserve_tokens: int = 0,
with_message_history: bool = False,
) -> ChatSequence:
"""Constructs and returns a prompt with the following structure:
1. System prompt
Expand Down Expand Up @@ -196,24 +189,6 @@ def construct_base_prompt(
[Message("system", self.system_prompt)] + prepend_messages,
)

if with_message_history:
# Reserve tokens for messages to be appended later, if any
reserve_tokens += self.message_history.max_summary_tlength
if append_messages:
reserve_tokens += count_message_tokens(append_messages, self.llm.name)

# Fill message history, up to a margin of reserved_tokens.
# Trim remaining historical messages and add them to the running summary.
history_start_index = len(prompt)
trimmed_history = add_history_upto_token_limit(
prompt, self.message_history, self.send_token_limit - reserve_tokens
)
if trimmed_history:
new_summary_msg, _ = self.message_history.trim_messages(
list(prompt), self.config
)
prompt.insert(history_start_index, new_summary_msg)

if append_messages:
prompt.extend(append_messages)

Expand Down Expand Up @@ -372,24 +347,9 @@ def on_response(
The parsed command name and command args, if any, and the agent thoughts.
"""

# Save assistant reply to message history
self.message_history.append(prompt[-1])
self.message_history.add(
"assistant", llm_response.content, "ai_response"
) # FIXME: support function calls

try:
return self.parse_and_process_response(
llm_response, thought_process_id, prompt, instruction
)
except InvalidAgentResponseError as e:
# TODO: tune this message
self.message_history.add(
"system",
f"Your response could not be parsed: {e}"
"\n\nRemember to only respond using the specified format above!",
)
raise
return self.parse_and_process_response(
llm_response, thought_process_id, prompt, instruction
)

# TODO: update memory/context

Expand All @@ -415,27 +375,3 @@ def parse_and_process_response(
The parsed command name and command args, if any, and the agent thoughts.
"""
pass


def add_history_upto_token_limit(
prompt: ChatSequence, history: MessageHistory, t_limit: int
) -> list[Message]:
current_prompt_length = prompt.token_length
insertion_index = len(prompt)
limit_reached = False
trimmed_messages: list[Message] = []
for cycle in reversed(list(history.per_cycle())):
messages_to_add = [msg for msg in cycle if msg is not None]
tokens_to_add = count_message_tokens(messages_to_add, prompt.model.name)
if current_prompt_length + tokens_to_add > t_limit:
limit_reached = True

if not limit_reached:
# Add the most recent message to the start of the chain,
# after the system prompts.
prompt.insert(insertion_index, *messages_to_add)
current_prompt_length += tokens_to_add
else:
trimmed_messages = messages_to_add + trimmed_messages

return trimmed_messages
2 changes: 1 addition & 1 deletion autogpts/autogpt/autogpt/agents/features/watchdog.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import logging
from contextlib import ExitStack

from autogpt.models.agent_actions import EpisodicActionHistory
from autogpt.models.action_history import EpisodicActionHistory

from ..base import BaseAgent

Expand Down
30 changes: 5 additions & 25 deletions autogpts/autogpt/autogpt/agents/planning_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,8 @@
USER_INPUT_FILE_NAME,
LogCycleHandler,
)
from autogpt.models.agent_actions import (
from autogpt.models.action_history import (
ActionErrorResult,
EpisodicActionHistory,
ActionInterruptedByHuman,
ActionResult,
ActionSuccessResult,
Expand Down Expand Up @@ -69,8 +68,6 @@ def __init__(
self.log_cycle_handler = LogCycleHandler()
"""LogCycleHandler for structured debug logging."""

self.action_history = EpisodicActionHistory()

self.plan: list[str] = []
"""List of steps that the Agent plans to take"""

Expand All @@ -90,12 +87,12 @@ def construct_base_prompt(
plan_section += [f"{i}. {s}" for i, s in enumerate(self.plan, 1)]

# Add the actions so far to the prompt
if self.action_history:
if self.event_history:
plan_section += [
"\n### Progress",
"So far, you have executed the following actions based on the plan:",
]
for i, cycle in enumerate(self.action_history, 1):
for i, cycle in enumerate(self.event_history, 1):
if not (cycle.action and cycle.result):
logger.warn(f"Incomplete action in history: {cycle}")
continue
Expand Down Expand Up @@ -229,7 +226,7 @@ def on_before_think(self, *args, **kwargs) -> ChatSequence:
self.ai_config.ai_name,
self.created_at,
self.cycle_count,
self.action_history.episodes,
self.event_history.episodes,
"action_history.json",
)
self.log_cycle_handler.log_cycle(
Expand Down Expand Up @@ -285,7 +282,7 @@ def execute(

result_tlength = count_string_tokens(str(result), self.llm.name)
memory_tlength = count_string_tokens(
str(self.message_history.summary_message()), self.llm.name
str(self.event_history.fmt_paragraph()), self.llm.name
)
if result_tlength + memory_tlength > self.send_token_limit:
result = ActionErrorResult(
Expand All @@ -301,23 +298,6 @@ def execute(
elif result.status == "error":
result.reason = plugin.post_command(command_name, result.reason)

# Check if there's a result from the command append it to the message
if result.status == "success":
self.message_history.add(
"system",
f"Command {command_name} returned: {result.outputs}",
"action_result",
)
elif result.status == "error":
message = f"Command {command_name} failed: {result.reason}"
if (
result.error
and isinstance(result.error, AgentException)
and result.error.hint
):
message = message.rstrip(".") + f". {result.error.hint}"
self.message_history.add("system", message, "action_result")

return result

def parse_and_process_response(
Expand Down
1 change: 0 additions & 1 deletion autogpts/autogpt/autogpt/logs/log_cycle.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
from .config import LOG_DIR

DEFAULT_PREFIX = "agent"
FULL_MESSAGE_HISTORY_FILE_NAME = "full_message_history.json"
CURRENT_CONTEXT_FILE_NAME = "current_context.json"
NEXT_ACTION_FILE_NAME = "next_action.json"
PROMPT_SUMMARY_FILE_NAME = "prompt_summary.json"
Expand Down
2 changes: 1 addition & 1 deletion autogpts/autogpt/tests/unit/test_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@

def test_agent_initialization(agent: Agent):
assert agent.ai_config.ai_name == "Base"
assert agent.message_history.messages == []
assert agent.event_history.episodes == []
assert agent.cycle_budget is None
assert "You are Base" in agent.system_prompt

Expand Down

0 comments on commit 0ca003d

Please sign in to comment.