Skip to content

Commit

Permalink
Merge branch 'master' into upload-logs-as-artifacts
Browse files Browse the repository at this point in the history
  • Loading branch information
waynehamadi authored Jun 10, 2023
2 parents bd72aeb + 6b9e3b2 commit ba7bcf8
Show file tree
Hide file tree
Showing 15 changed files with 400 additions and 65 deletions.
2 changes: 1 addition & 1 deletion .github/PULL_REQUEST_TEMPLATE.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ By following these guidelines, your PRs are more likely to be merged quickly aft
black .
isort .
mypy
autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports autogpt tests --in-place
autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests --in-place
```

<!-- If you haven't added tests, please explain why. If you have, check the appropriate box. If you've ensured your PR is atomic and well-documented, check the corresponding boxes. -->
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ jobs:

- name: Check for unused imports and pass statements
run: |
cmd="autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports autogpt tests"
cmd="autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests"
$cmd --check || (echo "You have unused imports or pass statements, please run '${cmd} --in-place'" && exit 1)
test:
Expand Down
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ repos:
hooks:
- id: autoflake
name: autoflake
entry: autoflake --in-place --remove-all-unused-imports --recursive --ignore-init-module-imports autogpt tests
entry: autoflake --in-place --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests
language: python
types: [ python ]
- id: pytest-check
Expand Down
72 changes: 39 additions & 33 deletions autogpt/agent/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,29 +65,31 @@ def __init__(
memory: VectorMemory,
next_action_count: int,
command_registry: CommandRegistry,
config: AIConfig,
ai_config: AIConfig,
system_prompt: str,
triggering_prompt: str,
workspace_directory: str,
config: Config,
):
cfg = Config()
self.ai_name = ai_name
self.memory = memory
self.history = MessageHistory(self)
self.next_action_count = next_action_count
self.command_registry = command_registry
self.config = config
self.ai_config = ai_config
self.system_prompt = system_prompt
self.triggering_prompt = triggering_prompt
self.workspace = Workspace(workspace_directory, cfg.restrict_to_workspace)
self.workspace = Workspace(workspace_directory, config.restrict_to_workspace)
self.created_at = datetime.now().strftime("%Y%m%d_%H%M%S")
self.cycle_count = 0
self.log_cycle_handler = LogCycleHandler()
self.fast_token_limit = OPEN_AI_CHAT_MODELS.get(cfg.fast_llm_model).max_tokens
self.fast_token_limit = OPEN_AI_CHAT_MODELS.get(
config.fast_llm_model
).max_tokens

def start_interaction_loop(self):
# Interaction Loop
cfg = Config()
self.cycle_count = 0
command_name = None
arguments = None
Expand All @@ -112,34 +114,36 @@ def signal_handler(signum, frame):
self.cycle_count += 1
self.log_cycle_handler.log_count_within_cycle = 0
self.log_cycle_handler.log_cycle(
self.config.ai_name,
self.ai_config.ai_name,
self.created_at,
self.cycle_count,
[m.raw() for m in self.history],
FULL_MESSAGE_HISTORY_FILE_NAME,
)
if (
cfg.continuous_mode
and cfg.continuous_limit > 0
and self.cycle_count > cfg.continuous_limit
self.config.continuous_mode
and self.config.continuous_limit > 0
and self.cycle_count > self.config.continuous_limit
):
logger.typewriter_log(
"Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}"
"Continuous Limit Reached: ",
Fore.YELLOW,
f"{self.config.continuous_limit}",
)
break
# Send message to AI, get response
with Spinner("Thinking... ", plain_output=cfg.plain_output):
with Spinner("Thinking... ", plain_output=self.config.plain_output):
assistant_reply = chat_with_ai(
cfg,
self.config,
self,
self.system_prompt,
self.triggering_prompt,
self.fast_token_limit,
cfg.fast_llm_model,
self.config.fast_llm_model,
)

assistant_reply_json = fix_json_using_multiple_techniques(assistant_reply)
for plugin in cfg.plugins:
for plugin in self.config.plugins:
if not plugin.can_handle_post_planning():
continue
assistant_reply_json = plugin.post_planning(assistant_reply_json)
Expand All @@ -150,18 +154,18 @@ def signal_handler(signum, frame):
# Get command name and arguments
try:
print_assistant_thoughts(
self.ai_name, assistant_reply_json, cfg.speak_mode
self.ai_name, assistant_reply_json, self.config.speak_mode
)
command_name, arguments = get_command(assistant_reply_json)
if cfg.speak_mode:
if self.config.speak_mode:
say_text(f"I want to execute {command_name}")

arguments = self._resolve_pathlike_command_args(arguments)

except Exception as e:
logger.error("Error: \n", str(e))
self.log_cycle_handler.log_cycle(
self.config.ai_name,
self.ai_config.ai_name,
self.created_at,
self.cycle_count,
assistant_reply_json,
Expand All @@ -177,7 +181,7 @@ def signal_handler(signum, frame):
f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
)

if not cfg.continuous_mode and self.next_action_count == 0:
if not self.config.continuous_mode and self.next_action_count == 0:
# ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
# Get key press: Prompt the user to press enter to continue or escape
# to exit
Expand All @@ -188,13 +192,13 @@ def signal_handler(signum, frame):
f"{self.ai_name}..."
)
while True:
if cfg.chat_messages_enabled:
if self.config.chat_messages_enabled:
console_input = clean_input("Waiting for your response...")
else:
console_input = clean_input(
Fore.MAGENTA + "Input:" + Style.RESET_ALL
)
if console_input.lower().strip() == cfg.authorise_key:
if console_input.lower().strip() == self.config.authorise_key:
user_input = "GENERATE NEXT COMMAND JSON"
break
elif console_input.lower().strip() == "s":
Expand All @@ -205,7 +209,7 @@ def signal_handler(signum, frame):
)
thoughts = assistant_reply_json.get("thoughts", {})
self_feedback_resp = self.get_self_feedback(
thoughts, cfg.fast_llm_model
thoughts, self.config.fast_llm_model
)
logger.typewriter_log(
f"SELF FEEDBACK: {self_feedback_resp}",
Expand All @@ -218,7 +222,9 @@ def signal_handler(signum, frame):
elif console_input.lower().strip() == "":
logger.warn("Invalid input format.")
continue
elif console_input.lower().startswith(f"{cfg.authorise_key} -"):
elif console_input.lower().startswith(
f"{self.config.authorise_key} -"
):
try:
self.next_action_count = abs(
int(console_input.split(" ")[1])
Expand All @@ -231,14 +237,14 @@ def signal_handler(signum, frame):
)
continue
break
elif console_input.lower() == cfg.exit_key:
elif console_input.lower() == self.config.exit_key:
user_input = "EXIT"
break
else:
user_input = console_input
command_name = "human_feedback"
self.log_cycle_handler.log_cycle(
self.config.ai_name,
self.ai_config.ai_name,
self.created_at,
self.cycle_count,
user_input,
Expand Down Expand Up @@ -271,7 +277,7 @@ def signal_handler(signum, frame):
elif command_name == "self_feedback":
result = f"Self feedback: {user_input}"
else:
for plugin in cfg.plugins:
for plugin in self.config.plugins:
if not plugin.can_handle_pre_command():
continue
command_name, arguments = plugin.pre_command(
Expand All @@ -281,22 +287,22 @@ def signal_handler(signum, frame):
self.command_registry,
command_name,
arguments,
self.config.prompt_generator,
config=cfg,
self.ai_config.prompt_generator,
config=self.config,
)
result = f"Command {command_name} returned: " f"{command_result}"

result_tlength = count_string_tokens(
str(command_result), cfg.fast_llm_model
str(command_result), self.config.fast_llm_model
)
memory_tlength = count_string_tokens(
str(self.history.summary_message()), cfg.fast_llm_model
str(self.history.summary_message()), self.config.fast_llm_model
)
if result_tlength + memory_tlength + 600 > self.fast_token_limit:
result = f"Failure: command {command_name} returned too much output. \
Do not execute this command again with the same arguments."

for plugin in cfg.plugins:
for plugin in self.config.plugins:
if not plugin.can_handle_post_command():
continue
result = plugin.post_command(command_name, result)
Expand Down Expand Up @@ -337,7 +343,7 @@ def get_self_feedback(self, thoughts: dict, llm_model: str) -> str:
Returns:
str: A feedback response generated using the provided thoughts dictionary.
"""
ai_role = self.config.ai_role
ai_role = self.ai_config.ai_role

feedback_prompt = f"Below is a message from me, an AI Agent, assuming the role of {ai_role}. whilst keeping knowledge of my slight limitations as an AI Agent Please evaluate my thought process, reasoning, and plan, and provide a concise paragraph outlining potential improvements. Consider adding or removing ideas that do not align with my role and explaining why, prioritizing thoughts based on their significance, or simply refining my overall thought process."
reasoning = thoughts.get("reasoning", "")
Expand All @@ -349,7 +355,7 @@ def get_self_feedback(self, thoughts: dict, llm_model: str) -> str:
prompt.add("user", feedback_prompt + feedback_thoughts)

self.log_cycle_handler.log_cycle(
self.config.ai_name,
self.ai_config.ai_name,
self.created_at,
self.cycle_count,
prompt.raw(),
Expand All @@ -359,7 +365,7 @@ def get_self_feedback(self, thoughts: dict, llm_model: str) -> str:
feedback = create_chat_completion(prompt)

self.log_cycle_handler.log_cycle(
self.config.ai_name,
self.ai_config.ai_name,
self.created_at,
self.cycle_count,
feedback,
Expand Down
4 changes: 2 additions & 2 deletions autogpt/llm/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def chat_with_ai(
if not plugin.can_handle_on_planning():
continue
plugin_response = plugin.on_planning(
agent.config.prompt_generator, message_sequence.raw()
agent.ai_config.prompt_generator, message_sequence.raw()
)
if not plugin_response or plugin_response == "":
continue
Expand Down Expand Up @@ -181,7 +181,7 @@ def chat_with_ai(
logger.debug("")
logger.debug("----------- END OF CONTEXT ----------------")
agent.log_cycle_handler.log_cycle(
agent.config.ai_name,
agent.ai_config.ai_name,
agent.created_at,
agent.cycle_count,
message_sequence.raw(),
Expand Down
3 changes: 2 additions & 1 deletion autogpt/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -189,9 +189,10 @@ def run_auto_gpt(
memory=memory,
next_action_count=next_action_count,
command_registry=command_registry,
config=ai_config,
system_prompt=system_prompt,
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
workspace_directory=workspace_directory,
ai_config=ai_config,
config=cfg,
)
agent.start_interaction_loop()
4 changes: 2 additions & 2 deletions autogpt/memory/message_history.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ def update_running_summary(self, new_events: list[Message]) -> Message:

prompt = ChatSequence.for_model(cfg.fast_llm_model, [Message("user", prompt)])
self.agent.log_cycle_handler.log_cycle(
self.agent.config.ai_name,
self.agent.ai_config.ai_name,
self.agent.created_at,
self.agent.cycle_count,
prompt.raw(),
Expand All @@ -194,7 +194,7 @@ def update_running_summary(self, new_events: list[Message]) -> Message:
self.summary = create_chat_completion(prompt)

self.agent.log_cycle_handler.log_cycle(
self.agent.config.ai_name,
self.agent.ai_config.ai_name,
self.agent.created_at,
self.agent.cycle_count,
self.summary,
Expand Down
25 changes: 25 additions & 0 deletions autogpt/plugins.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
"""Handles loading of plugins."""

import importlib.util
import inspect
import json
import os
import sys
import zipfile
from pathlib import Path
from typing import List
Expand Down Expand Up @@ -217,6 +219,28 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate
logger.debug(f"Allowlisted Plugins: {cfg.plugins_allowlist}")
logger.debug(f"Denylisted Plugins: {cfg.plugins_denylist}")

# Directory-based plugins
for plugin_path in [f.path for f in os.scandir(cfg.plugins_dir) if f.is_dir()]:
# Avoid going into __pycache__ or other hidden directories
if plugin_path.startswith("__"):
continue

plugin_module_path = plugin_path.split(os.path.sep)
plugin_module_name = plugin_module_path[-1]
qualified_module_name = ".".join(plugin_module_path)

__import__(qualified_module_name)
plugin = sys.modules[qualified_module_name]

for _, class_obj in inspect.getmembers(plugin):
if (
hasattr(class_obj, "_abc_impl")
and AutoGPTPluginTemplate in class_obj.__bases__
and denylist_allowlist_check(plugin_module_name, cfg)
):
loaded_plugins.append(class_obj())

# Zip-based plugins
for plugin in plugins_path_path.glob("*.zip"):
if moduleList := inspect_zip_for_modules(str(plugin), debug):
for module in moduleList:
Expand All @@ -236,6 +260,7 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate
and denylist_allowlist_check(a_module.__name__, cfg)
):
loaded_plugins.append(a_module())

# OpenAI plugins
if cfg.plugins_openai:
manifests_specs = fetch_openai_plugins_manifest_and_spec(cfg)
Expand Down
10 changes: 10 additions & 0 deletions scripts/install_plugin_deps.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import subprocess
import sys
import zipfile
from glob import glob
from pathlib import Path


Expand All @@ -16,6 +17,8 @@ def install_plugin_dependencies():
None
"""
plugins_dir = Path(os.getenv("PLUGINS_DIR", "plugins"))

# Install zip-based plugins
for plugin in plugins_dir.glob("*.zip"):
with zipfile.ZipFile(str(plugin), "r") as zfile:
try:
Expand All @@ -30,6 +33,13 @@ def install_plugin_dependencies():
except KeyError:
continue

# Install directory-based plugins
for requirements_file in glob(f"{plugins_dir}/*/requirements.txt"):
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "-r", requirements_file],
stdout=subprocess.DEVNULL,
)


if __name__ == "__main__":
install_plugin_dependencies()
Loading

0 comments on commit ba7bcf8

Please sign in to comment.