* Pi's message.

* Fix most everything.

* Blacked

* Add Typing, Docstrings everywhere, organize the code a bit.

* Black

* fix import

* Update message, dedupe.

* Increase backoff time.

* bump up retries
This commit is contained in:
BillSchumacher
2023-04-15 07:56:23 -05:00
committed by GitHub
parent e986af5de0
commit 1073954fb7
64 changed files with 2254 additions and 1883 deletions

View File

@@ -376,6 +376,10 @@ IMAGE_PROVIDER=sd
HUGGINGFACE_API_TOKEN="YOUR_HUGGINGFACE_API_TOKEN"
```
## Selenium
sudo Xvfb :10 -ac -screen 0 1024x768x24 &
DISPLAY=:10 your-client
## ⚠️ Limitations
This experiment aims to showcase the potential of GPT-4 but comes with some limitations:

View File

@@ -1,371 +1,20 @@
import argparse
import json
"""Main script for the autogpt package."""
import logging
import traceback
from autogpt.agent.agent import Agent
from autogpt.args import parse_arguments
from colorama import Fore, Style
from autogpt.config import Config, check_openai_api_key
from autogpt.logs import logger
from autogpt.memory import get_memory
from autogpt import chat
from autogpt import commands as cmd
from autogpt import speak, utils
from autogpt.ai_config import AIConfig
from autogpt.config import Config
from autogpt.json_parser import fix_and_parse_json
from autogpt.logger import logger
from autogpt.memory import get_memory, get_supported_memory_backends
from autogpt.spinner import Spinner
from autogpt.prompt import construct_prompt
cfg = Config()
config = None
# Load environment variables from .env file
def check_openai_api_key():
"""Check if the OpenAI API key is set in config.py or as an environment variable."""
if not cfg.openai_api_key:
print(
Fore.RED
+ "Please set your OpenAI API key in .env or as an environment variable."
)
print("You can get your key from https://beta.openai.com/account/api-keys")
exit(1)
def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
if cfg.speak_mode and cfg.debug_mode:
speak.say_text(
"I have received an invalid JSON response from the OpenAI API. "
"Trying to fix it now."
)
logger.typewriter_log("Attempting to fix JSON by finding outermost brackets\n")
try:
# Use regex to search for JSON objects
import regex
json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}")
json_match = json_pattern.search(json_string)
if json_match:
# Extract the valid JSON object from the string
json_string = json_match.group(0)
logger.typewriter_log(
title="Apparently json was fixed.", title_color=Fore.GREEN
)
if cfg.speak_mode and cfg.debug_mode:
speak.say_text("Apparently json was fixed.")
else:
raise ValueError("No valid JSON object found")
except (json.JSONDecodeError, ValueError) as e:
if cfg.debug_mode:
logger.error("Error: Invalid JSON: %s\n", json_string)
if cfg.speak_mode:
speak.say_text("Didn't work. I will have to ignore this response then.")
logger.error("Error: Invalid JSON, setting it to empty JSON now.\n")
json_string = {}
return json_string
def print_assistant_thoughts(assistant_reply):
"""Prints the assistant's thoughts to the console"""
global ai_name
global cfg
try:
try:
# Parse and print Assistant response
assistant_reply_json = fix_and_parse_json(assistant_reply)
except json.JSONDecodeError as e:
logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply)
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(
assistant_reply
)
assistant_reply_json = fix_and_parse_json(assistant_reply_json)
# Check if assistant_reply_json is a string and attempt to parse it into a
# JSON object
if isinstance(assistant_reply_json, str):
try:
assistant_reply_json = json.loads(assistant_reply_json)
except json.JSONDecodeError as e:
logger.error("Error: Invalid JSON\n", assistant_reply)
assistant_reply_json = (
attempt_to_fix_json_by_finding_outermost_brackets(
assistant_reply_json
)
)
assistant_thoughts_reasoning = None
assistant_thoughts_plan = None
assistant_thoughts_speak = None
assistant_thoughts_criticism = None
assistant_thoughts = assistant_reply_json.get("thoughts", {})
assistant_thoughts_text = assistant_thoughts.get("text")
if assistant_thoughts:
assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
assistant_thoughts_plan = assistant_thoughts.get("plan")
assistant_thoughts_criticism = assistant_thoughts.get("criticism")
assistant_thoughts_speak = assistant_thoughts.get("speak")
logger.typewriter_log(
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}"
)
logger.typewriter_log(
"REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}"
)
if assistant_thoughts_plan:
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
# If it's a list, join it into a string
if isinstance(assistant_thoughts_plan, list):
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
elif isinstance(assistant_thoughts_plan, dict):
assistant_thoughts_plan = str(assistant_thoughts_plan)
# Split the input_string using the newline character and dashes
lines = assistant_thoughts_plan.split("\n")
for line in lines:
line = line.lstrip("- ")
logger.typewriter_log("- ", Fore.GREEN, line.strip())
logger.typewriter_log(
"CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}"
)
# Speak the assistant's thoughts
if cfg.speak_mode and assistant_thoughts_speak:
speak.say_text(assistant_thoughts_speak)
return assistant_reply_json
except json.decoder.JSONDecodeError:
call_stack = traceback.format_exc()
logger.error("Error: Invalid JSON\n", assistant_reply)
logger.error("Traceback: \n", call_stack)
if cfg.speak_mode:
speak.say_text(
"I have received an invalid JSON response from the OpenAI API."
" I cannot ignore this response."
)
# All other errors, return "Error: + error message"
except Exception:
call_stack = traceback.format_exc()
logger.error("Error: \n", call_stack)
def construct_prompt():
"""Construct the prompt for the AI to respond to"""
config: AIConfig = AIConfig.load(cfg.ai_settings_file)
if cfg.skip_reprompt and config.ai_name:
logger.typewriter_log("Name :", Fore.GREEN, config.ai_name)
logger.typewriter_log("Role :", Fore.GREEN, config.ai_role)
logger.typewriter_log("Goals:", Fore.GREEN, f"{config.ai_goals}")
elif config.ai_name:
logger.typewriter_log(
"Welcome back! ",
Fore.GREEN,
f"Would you like me to return to being {config.ai_name}?",
speak_text=True,
)
should_continue = utils.clean_input(
f"""Continue with the last settings?
Name: {config.ai_name}
Role: {config.ai_role}
Goals: {config.ai_goals}
Continue (y/n): """
)
if should_continue.lower() == "n":
config = AIConfig()
if not config.ai_name:
config = prompt_user()
config.save()
# Get rid of this global:
global ai_name
ai_name = config.ai_name
return config.construct_full_prompt()
def prompt_user():
"""Prompt the user for input"""
ai_name = ""
# Construct the prompt
logger.typewriter_log(
"Welcome to Auto-GPT! ",
Fore.GREEN,
"Enter the name of your AI and its role below. Entering nothing will load"
" defaults.",
speak_text=True,
)
# Get AI Name from User
logger.typewriter_log(
"Name your AI: ", Fore.GREEN, "For example, 'Entrepreneur-GPT'"
)
ai_name = utils.clean_input("AI Name: ")
if ai_name == "":
ai_name = "Entrepreneur-GPT"
logger.typewriter_log(
f"{ai_name} here!", Fore.LIGHTBLUE_EX, "I am at your service.", speak_text=True
)
# Get AI Role from User
logger.typewriter_log(
"Describe your AI's role: ",
Fore.GREEN,
"For example, 'an AI designed to autonomously develop and run businesses with"
" the sole goal of increasing your net worth.'",
)
ai_role = utils.clean_input(f"{ai_name} is: ")
if ai_role == "":
ai_role = "an AI designed to autonomously develop and run businesses with the"
" sole goal of increasing your net worth."
# Enter up to 5 goals for the AI
logger.typewriter_log(
"Enter up to 5 goals for your AI: ",
Fore.GREEN,
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage"
" multiple businesses autonomously'",
)
print("Enter nothing to load defaults, enter nothing when finished.", flush=True)
ai_goals = []
for i in range(5):
ai_goal = utils.clean_input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ")
if ai_goal == "":
break
ai_goals.append(ai_goal)
if len(ai_goals) == 0:
ai_goals = [
"Increase net worth",
"Grow Twitter Account",
"Develop and manage multiple businesses autonomously",
]
config = AIConfig(ai_name, ai_role, ai_goals)
return config
def parse_arguments():
"""Parses the arguments passed to the script"""
global cfg
cfg.set_debug_mode(False)
cfg.set_continuous_mode(False)
cfg.set_speak_mode(False)
parser = argparse.ArgumentParser(description="Process arguments.")
parser.add_argument(
"--continuous", "-c", action="store_true", help="Enable Continuous Mode"
)
parser.add_argument(
"--continuous-limit",
"-l",
type=int,
dest="continuous_limit",
help="Defines the number of times to run in continuous mode",
)
parser.add_argument("--speak", action="store_true", help="Enable Speak Mode")
parser.add_argument("--debug", action="store_true", help="Enable Debug Mode")
parser.add_argument(
"--gpt3only", action="store_true", help="Enable GPT3.5 Only Mode"
)
parser.add_argument("--gpt4only", action="store_true", help="Enable GPT4 Only Mode")
parser.add_argument(
"--use-memory",
"-m",
dest="memory_type",
help="Defines which Memory backend to use",
)
parser.add_argument(
"--skip-reprompt",
"-y",
dest="skip_reprompt",
action="store_true",
help="Skips the re-prompting messages at the beginning of the script",
)
parser.add_argument(
"--ai-settings",
"-C",
dest="ai_settings_file",
help="Specifies which ai_settings.yaml file to use, will also automatically"
" skip the re-prompt.",
)
args = parser.parse_args()
if args.debug:
logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
cfg.set_debug_mode(True)
if args.continuous:
logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED")
logger.typewriter_log(
"WARNING: ",
Fore.RED,
"Continuous mode is not recommended. It is potentially dangerous and may"
" cause your AI to run forever or carry out actions you would not usually"
" authorise. Use at your own risk.",
)
cfg.set_continuous_mode(True)
if args.continuous_limit:
logger.typewriter_log(
"Continuous Limit: ", Fore.GREEN, f"{args.continuous_limit}"
)
cfg.set_continuous_limit(args.continuous_limit)
# Check if continuous limit is used without continuous mode
if args.continuous_limit and not args.continuous:
parser.error("--continuous-limit can only be used with --continuous")
if args.speak:
logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED")
cfg.set_speak_mode(True)
if args.gpt3only:
logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
cfg.set_smart_llm_model(cfg.fast_llm_model)
if args.gpt4only:
logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
cfg.set_fast_llm_model(cfg.smart_llm_model)
if args.memory_type:
supported_memory = get_supported_memory_backends()
chosen = args.memory_type
if not chosen in supported_memory:
logger.typewriter_log(
"ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ",
Fore.RED,
f"{supported_memory}",
)
logger.typewriter_log("Defaulting to: ", Fore.YELLOW, cfg.memory_backend)
else:
cfg.memory_backend = chosen
if args.skip_reprompt:
logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED")
cfg.skip_reprompt = True
if args.ai_settings_file:
file = args.ai_settings_file
# Validate file
(validated, message) = utils.validate_yaml_file(file)
if not validated:
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
logger.double_check()
exit(1)
logger.typewriter_log("Using AI Settings File:", Fore.GREEN, file)
cfg.ai_settings_file = file
cfg.skip_reprompt = True
def main():
global ai_name, memory
def main() -> None:
"""Main function for the script"""
cfg = Config()
# TODO: fill in llm values here
check_openai_api_key()
parse_arguments()
@@ -396,177 +45,5 @@ def main():
agent.start_interaction_loop()
class Agent:
"""Agent class for interacting with Auto-GPT.
Attributes:
ai_name: The name of the agent.
memory: The memory object to use.
full_message_history: The full message history.
next_action_count: The number of actions to execute.
prompt: The prompt to use.
user_input: The user input.
"""
def __init__(
self,
ai_name,
memory,
full_message_history,
next_action_count,
prompt,
user_input,
):
self.ai_name = ai_name
self.memory = memory
self.full_message_history = full_message_history
self.next_action_count = next_action_count
self.prompt = prompt
self.user_input = user_input
def start_interaction_loop(self):
# Interaction Loop
loop_count = 0
command_name = None
arguments = None
while True:
# Discontinue if continuous limit is reached
loop_count += 1
if (
cfg.continuous_mode
and cfg.continuous_limit > 0
and loop_count > cfg.continuous_limit
):
logger.typewriter_log(
"Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}"
)
break
# Send message to AI, get response
with Spinner("Thinking... "):
assistant_reply = chat.chat_with_ai(
self.prompt,
self.user_input,
self.full_message_history,
self.memory,
cfg.fast_token_limit,
) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
# Print Assistant thoughts
print_assistant_thoughts(assistant_reply)
# Get command name and arguments
try:
command_name, arguments = cmd.get_command(
attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply)
)
if cfg.speak_mode:
speak.say_text(f"I want to execute {command_name}")
except Exception as e:
logger.error("Error: \n", str(e))
if not cfg.continuous_mode and self.next_action_count == 0:
### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
# Get key press: Prompt the user to press enter to continue or escape
# to exit
self.user_input = ""
logger.typewriter_log(
"NEXT ACTION: ",
Fore.CYAN,
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL}"
f" ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
)
print(
"Enter 'y' to authorise command, 'y -N' to run N continuous"
" commands, 'n' to exit program, or enter feedback for"
f" {self.ai_name}...",
flush=True,
)
while True:
console_input = utils.clean_input(
Fore.MAGENTA + "Input:" + Style.RESET_ALL
)
if console_input.lower().rstrip() == "y":
self.user_input = "GENERATE NEXT COMMAND JSON"
break
elif console_input.lower().startswith("y -"):
try:
self.next_action_count = abs(
int(console_input.split(" ")[1])
)
self.user_input = "GENERATE NEXT COMMAND JSON"
except ValueError:
print(
"Invalid input format. Please enter 'y -n' where n"
" is the number of continuous tasks."
)
continue
break
elif console_input.lower() == "n":
self.user_input = "EXIT"
break
else:
self.user_input = console_input
command_name = "human_feedback"
break
if self.user_input == "GENERATE NEXT COMMAND JSON":
logger.typewriter_log(
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
Fore.MAGENTA,
"",
)
elif self.user_input == "EXIT":
print("Exiting...", flush=True)
break
else:
# Print command
logger.typewriter_log(
"NEXT ACTION: ",
Fore.CYAN,
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL}"
f" ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
)
# Execute command
if command_name is not None and command_name.lower().startswith("error"):
result = (
f"Command {command_name} threw the following error: {arguments}"
)
elif command_name == "human_feedback":
result = f"Human feedback: {self.user_input}"
else:
result = (
f"Command {command_name} "
f"returned: {cmd.execute_command(command_name, arguments)}"
)
if self.next_action_count > 0:
self.next_action_count -= 1
memory_to_add = (
f"Assistant Reply: {assistant_reply} "
f"\nResult: {result} "
f"\nHuman Feedback: {self.user_input} "
)
self.memory.add(memory_to_add)
# Check if there's a result from the command append it to the message
# history
if result is not None:
self.full_message_history.append(
chat.create_chat_message("system", result)
)
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
else:
self.full_message_history.append(
chat.create_chat_message("system", "Unable to execute command")
)
logger.typewriter_log(
"SYSTEM: ", Fore.YELLOW, "Unable to execute command"
)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,4 @@
from autogpt.agent.agent import Agent
from autogpt.agent.agent_manager import AgentManager
__all__ = ["Agent", "AgentManager"]

View File

@@ -1,15 +1,13 @@
import json
import regex
import traceback
from colorama import Fore, Style
from autogpt.app import execute_command, get_command
from autogpt.chat import chat_with_ai, create_chat_message
import autogpt.commands as cmd
from autogpt.config import Config
from autogpt.json_parser import fix_and_parse_json
from autogpt.logger import logger
from autogpt.speak import say_text
from autogpt.json_fixes.bracket_termination import (
attempt_to_fix_json_by_finding_outermost_brackets,
)
from autogpt.logs import logger, print_assistant_thoughts
from autogpt.speech import say_text
from autogpt.spinner import Spinner
from autogpt.utils import clean_input
@@ -77,7 +75,7 @@ class Agent:
# Get command name and arguments
try:
command_name, arguments = cmd.get_command(
command_name, arguments = get_command(
attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply)
)
if cfg.speak_mode:
@@ -158,7 +156,7 @@ class Agent:
else:
result = (
f"Command {command_name} returned: "
f"{cmd.execute_command(command_name, arguments)}"
f"{execute_command(command_name, arguments)}"
)
if self.next_action_count > 0:
self.next_action_count -= 1
@@ -183,122 +181,3 @@ class Agent:
logger.typewriter_log(
"SYSTEM: ", Fore.YELLOW, "Unable to execute command"
)
def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
cfg = Config()
if cfg.speak_mode and cfg.debug_mode:
say_text(
"I have received an invalid JSON response from the OpenAI API. "
"Trying to fix it now."
)
logger.typewriter_log("Attempting to fix JSON by finding outermost brackets\n")
try:
json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}")
json_match = json_pattern.search(json_string)
if json_match:
# Extract the valid JSON object from the string
json_string = json_match.group(0)
logger.typewriter_log(
title="Apparently json was fixed.", title_color=Fore.GREEN
)
if cfg.speak_mode and cfg.debug_mode:
say_text("Apparently json was fixed.")
else:
raise ValueError("No valid JSON object found")
except (json.JSONDecodeError, ValueError):
if cfg.speak_mode:
say_text("Didn't work. I will have to ignore this response then.")
logger.error("Error: Invalid JSON, setting it to empty JSON now.\n")
json_string = {}
return json_string
def print_assistant_thoughts(ai_name, assistant_reply):
"""Prints the assistant's thoughts to the console"""
cfg = Config()
try:
try:
# Parse and print Assistant response
assistant_reply_json = fix_and_parse_json(assistant_reply)
except json.JSONDecodeError:
logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply)
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(
assistant_reply
)
if isinstance(assistant_reply_json, str):
assistant_reply_json = fix_and_parse_json(assistant_reply_json)
# Check if assistant_reply_json is a string and attempt to parse
# it into a JSON object
if isinstance(assistant_reply_json, str):
try:
assistant_reply_json = json.loads(assistant_reply_json)
except json.JSONDecodeError:
logger.error("Error: Invalid JSON\n", assistant_reply)
assistant_reply_json = (
attempt_to_fix_json_by_finding_outermost_brackets(
assistant_reply_json
)
)
assistant_thoughts_reasoning = None
assistant_thoughts_plan = None
assistant_thoughts_speak = None
assistant_thoughts_criticism = None
if not isinstance(assistant_reply_json, dict):
assistant_reply_json = {}
assistant_thoughts = assistant_reply_json.get("thoughts", {})
assistant_thoughts_text = assistant_thoughts.get("text")
if assistant_thoughts:
assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
assistant_thoughts_plan = assistant_thoughts.get("plan")
assistant_thoughts_criticism = assistant_thoughts.get("criticism")
assistant_thoughts_speak = assistant_thoughts.get("speak")
logger.typewriter_log(
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}"
)
logger.typewriter_log(
"REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}"
)
if assistant_thoughts_plan:
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
# If it's a list, join it into a string
if isinstance(assistant_thoughts_plan, list):
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
elif isinstance(assistant_thoughts_plan, dict):
assistant_thoughts_plan = str(assistant_thoughts_plan)
# Split the input_string using the newline character and dashes
lines = assistant_thoughts_plan.split("\n")
for line in lines:
line = line.lstrip("- ")
logger.typewriter_log("- ", Fore.GREEN, line.strip())
logger.typewriter_log(
"CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}"
)
# Speak the assistant's thoughts
if cfg.speak_mode and assistant_thoughts_speak:
say_text(assistant_thoughts_speak)
return assistant_reply_json
except json.decoder.JSONDecodeError:
logger.error("Error: Invalid JSON\n", assistant_reply)
if cfg.speak_mode:
say_text(
"I have received an invalid JSON response from the OpenAI API."
" I cannot ignore this response."
)
# All other errors, return "Error: + error message"
except Exception:
call_stack = traceback.format_exc()
logger.error("Error: \n", call_stack)

View File

@@ -0,0 +1,100 @@
"""Agent manager for managing GPT agents"""
from typing import List, Tuple, Union
from autogpt.llm_utils import create_chat_completion
from autogpt.config.config import Singleton
class AgentManager(metaclass=Singleton):
"""Agent manager for managing GPT agents"""
def __init__(self):
self.next_key = 0
self.agents = {} # key, (task, full_message_history, model)
# Create new GPT agent
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
def create_agent(self, task: str, prompt: str, model: str) -> tuple[int, str]:
"""Create a new agent and return its key
Args:
task: The task to perform
prompt: The prompt to use
model: The model to use
Returns:
The key of the new agent
"""
messages = [
{"role": "user", "content": prompt},
]
# Start GPT instance
agent_reply = create_chat_completion(
model=model,
messages=messages,
)
# Update full message history
messages.append({"role": "assistant", "content": agent_reply})
key = self.next_key
# This is done instead of len(agents) to make keys unique even if agents
# are deleted
self.next_key += 1
self.agents[key] = (task, messages, model)
return key, agent_reply
def message_agent(self, key: Union[str, int], message: str) -> str:
"""Send a message to an agent and return its response
Args:
key: The key of the agent to message
message: The message to send to the agent
Returns:
The agent's response
"""
task, messages, model = self.agents[int(key)]
# Add user message to message history before sending to agent
messages.append({"role": "user", "content": message})
# Start GPT instance
agent_reply = create_chat_completion(
model=model,
messages=messages,
)
# Update full message history
messages.append({"role": "assistant", "content": agent_reply})
return agent_reply
def list_agents(self) -> List[Tuple[Union[str, int], str]]:
"""Return a list of all agents
Returns:
A list of tuples of the form (key, task)
"""
# Return a list of agent keys and their tasks
return [(key, task) for key, (task, _, _) in self.agents.items()]
def delete_agent(self, key: Union[str, int]) -> bool:
"""Delete an agent from the agent manager
Args:
key: The key of the agent to delete
Returns:
True if successful, False otherwise
"""
try:
del self.agents[int(key)]
return True
except KeyError:
return False

View File

@@ -1,75 +0,0 @@
from autogpt.llm_utils import create_chat_completion
next_key = 0
agents = {} # key, (task, full_message_history, model)
# Create new GPT agent
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
def create_agent(task, prompt, model):
"""Create a new agent and return its key"""
global next_key
global agents
messages = [
{"role": "user", "content": prompt},
]
# Start GPT instance
agent_reply = create_chat_completion(
model=model,
messages=messages,
)
# Update full message history
messages.append({"role": "assistant", "content": agent_reply})
key = next_key
# This is done instead of len(agents) to make keys unique even if agents
# are deleted
next_key += 1
agents[key] = (task, messages, model)
return key, agent_reply
def message_agent(key, message):
"""Send a message to an agent and return its response"""
global agents
task, messages, model = agents[int(key)]
# Add user message to message history before sending to agent
messages.append({"role": "user", "content": message})
# Start GPT instance
agent_reply = create_chat_completion(
model=model,
messages=messages,
)
# Update full message history
messages.append({"role": "assistant", "content": agent_reply})
return agent_reply
def list_agents():
"""Return a list of all agents"""
global agents
# Return a list of agent keys and their tasks
return [(key, task) for key, (task, _, _) in agents.items()]
def delete_agent(key):
"""Delete an agent and return True if successful, False otherwise"""
global agents
try:
del agents[int(key)]
return True
except KeyError:
return False

View File

@@ -1,77 +0,0 @@
import json
from typing import List
from autogpt.call_ai_function import call_ai_function
from autogpt.config import Config
cfg = Config()
def evaluate_code(code: str) -> List[str]:
"""
A function that takes in a string and returns a response from create chat
completion api call.
Parameters:
code (str): Code to be evaluated.
Returns:
A result string from create chat completion. A list of suggestions to
improve the code.
"""
function_string = "def analyze_code(code: str) -> List[str]:"
args = [code]
description_string = (
"Analyzes the given code and returns a list of suggestions" " for improvements."
)
return call_ai_function(function_string, args, description_string)
def improve_code(suggestions: List[str], code: str) -> str:
"""
A function that takes in code and suggestions and returns a response from create
chat completion api call.
Parameters:
suggestions (List): A list of suggestions around what needs to be improved.
code (str): Code to be improved.
Returns:
A result string from create chat completion. Improved code in response.
"""
function_string = (
"def generate_improved_code(suggestions: List[str], code: str) -> str:"
)
args = [json.dumps(suggestions), code]
description_string = (
"Improves the provided code based on the suggestions"
" provided, making no other changes."
)
return call_ai_function(function_string, args, description_string)
def write_tests(code: str, focus: List[str]) -> str:
"""
A function that takes in code and focus topics and returns a response from create
chat completion api call.
Parameters:
focus (List): A list of suggestions around what needs to be improved.
code (str): Code for test cases to be generated against.
Returns:
A result string from create chat completion. Test cases for the submitted code
in response.
"""
function_string = (
"def create_test_cases(code: str, focus: Optional[str] = None) -> str:"
)
args = [code, json.dumps(focus)]
description_string = (
"Generates test cases for the existing code, focusing on"
" specific areas if required."
)
return call_ai_function(function_string, args, description_string)

View File

@@ -1,29 +1,42 @@
""" Command and Control """
import json
import datetime
import autogpt.agent_manager as agents
from typing import List, NoReturn, Union
from autogpt.agent.agent_manager import AgentManager
from autogpt.commands.evaluate_code import evaluate_code
from autogpt.commands.google_search import google_official_search, google_search
from autogpt.commands.improve_code import improve_code
from autogpt.commands.write_tests import write_tests
from autogpt.config import Config
from autogpt.json_parser import fix_and_parse_json
from autogpt.image_gen import generate_image
from duckduckgo_search import ddg
from autogpt.ai_functions import evaluate_code, improve_code, write_tests
from autogpt.browse import scrape_links, scrape_text, summarize_text
from autogpt.execute_code import execute_python_file, execute_shell
from autogpt.file_operations import (
from autogpt.commands.image_gen import generate_image
from autogpt.commands.web_requests import scrape_links, scrape_text
from autogpt.commands.execute_code import execute_python_file, execute_shell
from autogpt.commands.file_operations import (
append_to_file,
delete_file,
read_file,
search_files,
write_to_file,
)
from autogpt.json_fixes.parsing import fix_and_parse_json
from autogpt.memory import get_memory
from autogpt.speak import say_text
from autogpt.web import browse_website
from autogpt.processing.text import summarize_text
from autogpt.speech import say_text
from autogpt.commands.web_selenium import browse_website
cfg = Config()
CFG = Config()
AGENT_MANAGER = AgentManager()
def is_valid_int(value) -> bool:
def is_valid_int(value: str) -> bool:
"""Check if the value is a valid integer
Args:
value (str): The value to check
Returns:
bool: True if the value is a valid integer, False otherwise
"""
try:
int(value)
return True
@@ -31,8 +44,20 @@ def is_valid_int(value) -> bool:
return False
def get_command(response):
"""Parse the response and return the command name and arguments"""
def get_command(response: str):
"""Parse the response and return the command name and arguments
Args:
response (str): The response from the user
Returns:
tuple: The command name and arguments
Raises:
json.decoder.JSONDecodeError: If the response is not valid JSON
Exception: If any other error occurs
"""
try:
response_json = fix_and_parse_json(response)
@@ -62,16 +87,23 @@ def get_command(response):
return "Error:", str(e)
def execute_command(command_name, arguments):
"""Execute the command and return the result"""
memory = get_memory(cfg)
def execute_command(command_name: str, arguments):
"""Execute the command and return the result
Args:
command_name (str): The name of the command to execute
arguments (dict): The arguments for the command
Returns:
str: The result of the command"""
memory = get_memory(CFG)
try:
if command_name == "google":
# Check if the Google API key is set and use the official search method
# If the API key is not set or has only whitespaces, use the unofficial
# search method
key = cfg.google_api_key
key = CFG.google_api_key
if key and key.strip() and key != "your-google-api-key":
return google_official_search(arguments["input"])
else:
@@ -116,7 +148,7 @@ def execute_command(command_name, arguments):
elif command_name == "execute_python_file": # Add this command
return execute_python_file(arguments["file"])
elif command_name == "execute_shell":
if cfg.execute_local_commands:
if CFG.execute_local_commands:
return execute_shell(arguments["command_line"])
else:
return (
@@ -136,96 +168,55 @@ def execute_command(command_name, arguments):
" list for available commands and only respond in the specified JSON"
" format."
)
# All errors, return "Error: + error message"
except Exception as e:
return "Error: " + str(e)
return f"Error: {str(e)}"
def get_datetime():
"""Return the current date and time"""
return "Current date and time: " + datetime.datetime.now().strftime(
"%Y-%m-%d %H:%M:%S"
)
def get_text_summary(url: str, question: str) -> str:
"""Return the results of a google search
Args:
url (str): The url to scrape
question (str): The question to summarize the text for
def google_search(query, num_results=8):
"""Return the results of a google search"""
search_results = []
if not query:
return json.dumps(search_results)
for j in ddg(query, max_results=num_results):
search_results.append(j)
return json.dumps(search_results, ensure_ascii=False, indent=4)
def google_official_search(query, num_results=8):
"""Return the results of a google search using the official Google API"""
import json
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
try:
# Get the Google API key and Custom Search Engine ID from the config file
api_key = cfg.google_api_key
custom_search_engine_id = cfg.custom_search_engine_id
# Initialize the Custom Search API service
service = build("customsearch", "v1", developerKey=api_key)
# Send the search query and retrieve the results
result = (
service.cse()
.list(q=query, cx=custom_search_engine_id, num=num_results)
.execute()
)
# Extract the search result items from the response
search_results = result.get("items", [])
# Create a list of only the URLs from the search results
search_results_links = [item["link"] for item in search_results]
except HttpError as e:
# Handle errors in the API call
error_details = json.loads(e.content.decode())
# Check if the error is related to an invalid or missing API key
if error_details.get("error", {}).get(
"code"
) == 403 and "invalid API key" in error_details.get("error", {}).get(
"message", ""
):
return "Error: The provided Google API key is invalid or missing."
else:
return f"Error: {e}"
# Return the list of search result URLs
return search_results_links
def get_text_summary(url, question):
"""Return the results of a google search"""
Returns:
str: The summary of the text
"""
text = scrape_text(url)
summary = summarize_text(url, text, question)
return """ "Result" : """ + summary
return f""" "Result" : {summary}"""
def get_hyperlinks(url):
"""Return the results of a google search"""
def get_hyperlinks(url: str) -> Union[str, List[str]]:
"""Return the results of a google search
Args:
url (str): The url to scrape
Returns:
str or list: The hyperlinks on the page
"""
return scrape_links(url)
def shutdown():
def shutdown() -> NoReturn:
"""Shut down the program"""
print("Shutting down...")
quit()
def start_agent(name, task, prompt, model=cfg.fast_llm_model):
"""Start an agent with a given name, task, and prompt"""
def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) -> str:
"""Start an agent with a given name, task, and prompt
Args:
name (str): The name of the agent
task (str): The task of the agent
prompt (str): The prompt for the agent
model (str): The model to use for the agent
Returns:
str: The response of the agent
"""
# Remove underscores from name
voice_name = name.replace("_", " ")
@@ -233,42 +224,53 @@ def start_agent(name, task, prompt, model=cfg.fast_llm_model):
agent_intro = f"{voice_name} here, Reporting for duty!"
# Create agent
if cfg.speak_mode:
if CFG.speak_mode:
say_text(agent_intro, 1)
key, ack = agents.create_agent(task, first_message, model)
key, ack = AGENT_MANAGER.create_agent(task, first_message, model)
if cfg.speak_mode:
if CFG.speak_mode:
say_text(f"Hello {voice_name}. Your task is as follows. {task}.")
# Assign task (prompt), get response
agent_response = agents.message_agent(key, prompt)
agent_response = AGENT_MANAGER.message_agent(key, prompt)
return f"Agent {name} created with key {key}. First response: {agent_response}"
def message_agent(key, message):
def message_agent(key: str, message: str) -> str:
"""Message an agent with a given key and message"""
# Check if the key is a valid integer
if is_valid_int(key):
agent_response = agents.message_agent(int(key), message)
agent_response = AGENT_MANAGER.message_agent(int(key), message)
# Check if the key is a valid string
elif isinstance(key, str):
agent_response = agents.message_agent(key, message)
agent_response = AGENT_MANAGER.message_agent(key, message)
else:
return "Invalid key, must be an integer or a string."
# Speak response
if cfg.speak_mode:
if CFG.speak_mode:
say_text(agent_response, 1)
return agent_response
def list_agents():
"""List all agents"""
return list_agents()
"""List all agents
Returns:
list: A list of all agents
"""
return AGENT_MANAGER.list_agents()
def delete_agent(key):
"""Delete an agent with a given key"""
result = agents.delete_agent(key)
def delete_agent(key: str) -> str:
"""Delete an agent with a given key
Args:
key (str): The key of the agent to delete
Returns:
str: A message indicating whether the agent was deleted or not
"""
result = AGENT_MANAGER.delete_agent(key)
return f"Agent {key} deleted." if result else f"Agent {key} does not exist."

128
autogpt/args.py Normal file
View File

@@ -0,0 +1,128 @@
"""This module contains the argument parsing logic for the script."""
import argparse
from colorama import Fore
from autogpt import utils
from autogpt.config import Config
from autogpt.logs import logger
from autogpt.memory import get_supported_memory_backends
CFG = Config()
def parse_arguments() -> None:
"""Parses the arguments passed to the script
Returns:
None
"""
CFG.set_debug_mode(False)
CFG.set_continuous_mode(False)
CFG.set_speak_mode(False)
parser = argparse.ArgumentParser(description="Process arguments.")
parser.add_argument(
"--continuous", "-c", action="store_true", help="Enable Continuous Mode"
)
parser.add_argument(
"--continuous-limit",
"-l",
type=int,
dest="continuous_limit",
help="Defines the number of times to run in continuous mode",
)
parser.add_argument("--speak", action="store_true", help="Enable Speak Mode")
parser.add_argument("--debug", action="store_true", help="Enable Debug Mode")
parser.add_argument(
"--gpt3only", action="store_true", help="Enable GPT3.5 Only Mode"
)
parser.add_argument("--gpt4only", action="store_true", help="Enable GPT4 Only Mode")
parser.add_argument(
"--use-memory",
"-m",
dest="memory_type",
help="Defines which Memory backend to use",
)
parser.add_argument(
"--skip-reprompt",
"-y",
dest="skip_reprompt",
action="store_true",
help="Skips the re-prompting messages at the beginning of the script",
)
parser.add_argument(
"--ai-settings",
"-C",
dest="ai_settings_file",
help="Specifies which ai_settings.yaml file to use, will also automatically"
" skip the re-prompt.",
)
args = parser.parse_args()
if args.debug:
logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
CFG.set_debug_mode(True)
if args.continuous:
logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED")
logger.typewriter_log(
"WARNING: ",
Fore.RED,
"Continuous mode is not recommended. It is potentially dangerous and may"
" cause your AI to run forever or carry out actions you would not usually"
" authorise. Use at your own risk.",
)
CFG.set_continuous_mode(True)
if args.continuous_limit:
logger.typewriter_log(
"Continuous Limit: ", Fore.GREEN, f"{args.continuous_limit}"
)
CFG.set_continuous_limit(args.continuous_limit)
# Check if continuous limit is used without continuous mode
if args.continuous_limit and not args.continuous:
parser.error("--continuous-limit can only be used with --continuous")
if args.speak:
logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED")
CFG.set_speak_mode(True)
if args.gpt3only:
logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
CFG.set_smart_llm_model(CFG.fast_llm_model)
if args.gpt4only:
logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
CFG.set_fast_llm_model(CFG.smart_llm_model)
if args.memory_type:
supported_memory = get_supported_memory_backends()
chosen = args.memory_type
if chosen not in supported_memory:
logger.typewriter_log(
"ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ",
Fore.RED,
f"{supported_memory}",
)
logger.typewriter_log("Defaulting to: ", Fore.YELLOW, CFG.memory_backend)
else:
CFG.memory_backend = chosen
if args.skip_reprompt:
logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED")
CFG.skip_reprompt = True
if args.ai_settings_file:
file = args.ai_settings_file
# Validate file
(validated, message) = utils.validate_yaml_file(file)
if not validated:
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
logger.double_check()
exit(1)
logger.typewriter_log("Using AI Settings File:", Fore.GREEN, file)
CFG.ai_settings_file = file
CFG.skip_reprompt = True

View File

@@ -1,26 +0,0 @@
from autogpt.config import Config
from autogpt.llm_utils import create_chat_completion
cfg = Config()
# This is a magic function that can do anything with no-code. See
# https://github.com/Torantulino/AI-Functions for more info.
def call_ai_function(function, args, description, model=None) -> str:
"""Call an AI function"""
if model is None:
model = cfg.smart_llm_model
# For each arg, if any are None, convert to "None":
args = [str(arg) if arg is not None else "None" for arg in args]
# parse args to comma separated string
args = ", ".join(args)
messages = [
{
"role": "system",
"content": f"You are now the following python function: ```# {description}"
f"\n{function}```\n\nOnly respond with your `return` value.",
},
{"role": "user", "content": args},
]
return create_chat_completion(model=model, messages=messages, temperature=0)

View File

@@ -5,7 +5,7 @@ from openai.error import RateLimitError
from autogpt import token_counter
from autogpt.config import Config
from autogpt.llm_utils import create_chat_completion
from autogpt.logger import logger
from autogpt.logs import logger
cfg = Config()

View File

View File

@@ -0,0 +1,25 @@
"""Code evaluation module."""
from typing import List
from autogpt.llm_utils import call_ai_function
def evaluate_code(code: str) -> List[str]:
"""
A function that takes in a string and returns a response from create chat
completion api call.
Parameters:
code (str): Code to be evaluated.
Returns:
A result string from create chat completion. A list of suggestions to
improve the code.
"""
function_string = "def analyze_code(code: str) -> List[str]:"
args = [code]
description_string = (
"Analyzes the given code and returns a list of suggestions" " for improvements."
)
return call_ai_function(function_string, args, description_string)

View File

@@ -0,0 +1,125 @@
"""Execute code in a Docker container"""
import os
from pathlib import Path
import subprocess
import docker
from docker.errors import ImageNotFound
WORKING_DIRECTORY = Path(__file__).parent.parent / "auto_gpt_workspace"
def execute_python_file(file: str):
"""Execute a Python file in a Docker container and return the output
Args:
file (str): The name of the file to execute
Returns:
str: The output of the file
"""
print(f"Executing file '{file}' in workspace '{WORKING_DIRECTORY}'")
if not file.endswith(".py"):
return "Error: Invalid file type. Only .py files are allowed."
file_path = os.path.join(WORKING_DIRECTORY, file)
if not os.path.isfile(file_path):
return f"Error: File '{file}' does not exist."
if we_are_running_in_a_docker_container():
result = subprocess.run(
f"python {file_path}", capture_output=True, encoding="utf8", shell=True
)
if result.returncode == 0:
return result.stdout
else:
return f"Error: {result.stderr}"
try:
client = docker.from_env()
image_name = "python:3.10"
try:
client.images.get(image_name)
print(f"Image '{image_name}' found locally")
except ImageNotFound:
print(f"Image '{image_name}' not found locally, pulling from Docker Hub")
# Use the low-level API to stream the pull response
low_level_client = docker.APIClient()
for line in low_level_client.pull(image_name, stream=True, decode=True):
# Print the status and progress, if available
status = line.get("status")
progress = line.get("progress")
if status and progress:
print(f"{status}: {progress}")
elif status:
print(status)
# You can replace 'python:3.8' with the desired Python image/version
# You can find available Python images on Docker Hub:
# https://hub.docker.com/_/python
container = client.containers.run(
image_name,
f"python {file}",
volumes={
os.path.abspath(WORKING_DIRECTORY): {
"bind": "/workspace",
"mode": "ro",
}
},
working_dir="/workspace",
stderr=True,
stdout=True,
detach=True,
)
container.wait()
logs = container.logs().decode("utf-8")
container.remove()
# print(f"Execution complete. Output: {output}")
# print(f"Logs: {logs}")
return logs
except Exception as e:
return f"Error: {str(e)}"
def execute_shell(command_line: str) -> str:
"""Execute a shell command and return the output
Args:
command_line (str): The command line to execute
Returns:
str: The output of the command
"""
current_dir = os.getcwd()
if WORKING_DIRECTORY not in current_dir: # Change dir into workspace if necessary
work_dir = os.path.join(os.getcwd(), WORKING_DIRECTORY)
os.chdir(work_dir)
print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'")
result = subprocess.run(command_line, capture_output=True, shell=True)
output = f"STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}"
# Change back to whatever the prior working dir was
os.chdir(current_dir)
return output
def we_are_running_in_a_docker_container() -> bool:
"""Check if we are running in a Docker container
Returns:
bool: True if we are running in a Docker container, False otherwise
"""
return os.path.exists("/.dockerenv")

View File

@@ -1,16 +1,29 @@
"""File operations for AutoGPT"""
import os
import os.path
from pathlib import Path
from typing import Generator, List
# Set a dedicated folder for file I/O
working_directory = "auto_gpt_workspace"
WORKING_DIRECTORY = Path(__file__).parent.parent / "auto_gpt_workspace"
# Create the directory if it doesn't exist
if not os.path.exists(working_directory):
os.makedirs(working_directory)
if not os.path.exists(WORKING_DIRECTORY):
os.makedirs(WORKING_DIRECTORY)
WORKING_DIRECTORY = str(WORKING_DIRECTORY)
def safe_join(base, *paths):
"""Join one or more path components intelligently."""
def safe_join(base: str, *paths) -> str:
"""Join one or more path components intelligently.
Args:
base (str): The base path
*paths (str): The paths to join to the base path
Returns:
str: The joined path
"""
new_path = os.path.join(base, *paths)
norm_new_path = os.path.normpath(new_path)
@@ -20,7 +33,9 @@ def safe_join(base, *paths):
return norm_new_path
def split_file(content, max_length=4000, overlap=0):
def split_file(
content: str, max_length: int = 4000, overlap: int = 0
) -> Generator[str, None, None]:
"""
Split text into chunks of a specified maximum length with a specified overlap
between chunks.
@@ -45,10 +60,17 @@ def split_file(content, max_length=4000, overlap=0):
start += max_length - overlap
def read_file(filename) -> str:
"""Read a file and return the contents"""
def read_file(filename: str) -> str:
"""Read a file and return the contents
Args:
filename (str): The name of the file to read
Returns:
str: The contents of the file
"""
try:
filepath = safe_join(working_directory, filename)
filepath = safe_join(WORKING_DIRECTORY, filename)
with open(filepath, "r", encoding="utf-8") as f:
content = f.read()
return content
@@ -56,7 +78,9 @@ def read_file(filename) -> str:
return f"Error: {str(e)}"
def ingest_file(filename, memory, max_length=4000, overlap=200):
def ingest_file(
filename: str, memory, max_length: int = 4000, overlap: int = 200
) -> None:
"""
Ingest a file by reading its content, splitting it into chunks with a specified
maximum length and overlap, and adding the chunks to the memory storage.
@@ -88,10 +112,18 @@ def ingest_file(filename, memory, max_length=4000, overlap=200):
print(f"Error while ingesting file '{filename}': {str(e)}")
def write_to_file(filename, text):
"""Write text to a file"""
def write_to_file(filename: str, text: str) -> str:
"""Write text to a file
Args:
filename (str): The name of the file to write to
text (str): The text to write to the file
Returns:
str: A message indicating success or failure
"""
try:
filepath = safe_join(working_directory, filename)
filepath = safe_join(WORKING_DIRECTORY, filename)
directory = os.path.dirname(filepath)
if not os.path.exists(directory):
os.makedirs(directory)
@@ -99,43 +131,66 @@ def write_to_file(filename, text):
f.write(text)
return "File written to successfully."
except Exception as e:
return "Error: " + str(e)
return f"Error: {str(e)}"
def append_to_file(filename, text):
"""Append text to a file"""
def append_to_file(filename: str, text: str) -> str:
"""Append text to a file
Args:
filename (str): The name of the file to append to
text (str): The text to append to the file
Returns:
str: A message indicating success or failure
"""
try:
filepath = safe_join(working_directory, filename)
filepath = safe_join(WORKING_DIRECTORY, filename)
with open(filepath, "a") as f:
f.write(text)
return "Text appended successfully."
except Exception as e:
return "Error: " + str(e)
return f"Error: {str(e)}"
def delete_file(filename):
"""Delete a file"""
def delete_file(filename: str) -> str:
"""Delete a file
Args:
filename (str): The name of the file to delete
Returns:
str: A message indicating success or failure
"""
try:
filepath = safe_join(working_directory, filename)
filepath = safe_join(WORKING_DIRECTORY, filename)
os.remove(filepath)
return "File deleted successfully."
except Exception as e:
return "Error: " + str(e)
return f"Error: {str(e)}"
def search_files(directory):
def search_files(directory: str) -> List[str]:
"""Search for files in a directory
Args:
directory (str): The directory to search in
Returns:
List[str]: A list of files found in the directory
"""
found_files = []
if directory == "" or directory == "/":
search_directory = working_directory
if directory in {"", "/"}:
search_directory = WORKING_DIRECTORY
else:
search_directory = safe_join(working_directory, directory)
search_directory = safe_join(WORKING_DIRECTORY, directory)
for root, _, files in os.walk(search_directory):
for file in files:
if file.startswith("."):
continue
relative_path = os.path.relpath(os.path.join(root, file), working_directory)
relative_path = os.path.relpath(os.path.join(root, file), WORKING_DIRECTORY)
found_files.append(relative_path)
return found_files

View File

@@ -0,0 +1,86 @@
"""Google search command for Autogpt."""
import json
from typing import List, Union
from duckduckgo_search import ddg
from autogpt.config import Config
CFG = Config()
def google_search(query: str, num_results: int = 8) -> str:
"""Return the results of a google search
Args:
query (str): The search query.
num_results (int): The number of results to return.
Returns:
str: The results of the search.
"""
search_results = []
if not query:
return json.dumps(search_results)
results = ddg(query, max_results=num_results)
if not results:
return json.dumps(search_results)
for j in results:
search_results.append(j)
return json.dumps(search_results, ensure_ascii=False, indent=4)
def google_official_search(query: str, num_results: int = 8) -> Union[str, List[str]]:
"""Return the results of a google search using the official Google API
Args:
query (str): The search query.
num_results (int): The number of results to return.
Returns:
str: The results of the search.
"""
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
try:
# Get the Google API key and Custom Search Engine ID from the config file
api_key = CFG.google_api_key
custom_search_engine_id = CFG.custom_search_engine_id
# Initialize the Custom Search API service
service = build("customsearch", "v1", developerKey=api_key)
# Send the search query and retrieve the results
result = (
service.cse()
.list(q=query, cx=custom_search_engine_id, num=num_results)
.execute()
)
# Extract the search result items from the response
search_results = result.get("items", [])
# Create a list of only the URLs from the search results
search_results_links = [item["link"] for item in search_results]
except HttpError as e:
# Handle errors in the API call
error_details = json.loads(e.content.decode())
# Check if the error is related to an invalid or missing API key
if error_details.get("error", {}).get(
"code"
) == 403 and "invalid API key" in error_details.get("error", {}).get(
"message", ""
):
return "Error: The provided Google API key is invalid or missing."
else:
return f"Error: {e}"
# Return the list of search result URLs
return search_results_links

View File

@@ -0,0 +1,99 @@
""" Image Generation Module for AutoGPT."""
import io
import os.path
import uuid
from base64 import b64decode
import openai
import requests
from PIL import Image
from pathlib import Path
from autogpt.config import Config
CFG = Config()
WORKING_DIRECTORY = Path(__file__).parent.parent / "auto_gpt_workspace"
def generate_image(prompt: str) -> str:
"""Generate an image from a prompt.
Args:
prompt (str): The prompt to use
Returns:
str: The filename of the image
"""
filename = f"{str(uuid.uuid4())}.jpg"
# DALL-E
if CFG.image_provider == "dalle":
return generate_image_with_dalle(prompt, filename)
elif CFG.image_provider == "sd":
return generate_image_with_hf(prompt, filename)
else:
return "No Image Provider Set"
def generate_image_with_hf(prompt: str, filename: str) -> str:
"""Generate an image with HuggingFace's API.
Args:
prompt (str): The prompt to use
filename (str): The filename to save the image to
Returns:
str: The filename of the image
"""
API_URL = (
"https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4"
)
if CFG.huggingface_api_token is None:
raise ValueError(
"You need to set your Hugging Face API token in the config file."
)
headers = {"Authorization": f"Bearer {CFG.huggingface_api_token}"}
response = requests.post(
API_URL,
headers=headers,
json={
"inputs": prompt,
},
)
image = Image.open(io.BytesIO(response.content))
print(f"Image Generated for prompt:{prompt}")
image.save(os.path.join(WORKING_DIRECTORY, filename))
return f"Saved to disk:{filename}"
def generate_image_with_dalle(prompt: str, filename: str) -> str:
"""Generate an image with DALL-E.
Args:
prompt (str): The prompt to use
filename (str): The filename to save the image to
Returns:
str: The filename of the image
"""
openai.api_key = CFG.openai_api_key
response = openai.Image.create(
prompt=prompt,
n=1,
size="256x256",
response_format="b64_json",
)
print(f"Image Generated for prompt:{prompt}")
image_data = b64decode(response["data"][0]["b64_json"])
with open(f"{WORKING_DIRECTORY}/{filename}", mode="wb") as png:
png.write(image_data)
return f"Saved to disk:{filename}"

View File

@@ -0,0 +1,28 @@
import json
from typing import List
from autogpt.llm_utils import call_ai_function
def improve_code(suggestions: List[str], code: str) -> str:
"""
A function that takes in code and suggestions and returns a response from create
chat completion api call.
Parameters:
suggestions (List): A list of suggestions around what needs to be improved.
code (str): Code to be improved.
Returns:
A result string from create chat completion. Improved code in response.
"""
function_string = (
"def generate_improved_code(suggestions: List[str], code: str) -> str:"
)
args = [json.dumps(suggestions), code]
description_string = (
"Improves the provided code based on the suggestions"
" provided, making no other changes."
)
return call_ai_function(function_string, args, description_string)

10
autogpt/commands/times.py Normal file
View File

@@ -0,0 +1,10 @@
from datetime import datetime
def get_datetime() -> str:
"""Return the current date and time
Returns:
str: The current date and time
"""
return "Current date and time: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S")

View File

@@ -1,21 +1,30 @@
"""Browse a webpage and summarize it using the LLM model"""
from typing import List, Tuple, Union
from urllib.parse import urljoin, urlparse
import requests
from requests import Response
from bs4 import BeautifulSoup
from autogpt.config import Config
from autogpt.llm_utils import create_chat_completion
from autogpt.memory import get_memory
cfg = Config()
memory = get_memory(cfg)
CFG = Config()
memory = get_memory(CFG)
session = requests.Session()
session.headers.update({"User-Agent": cfg.user_agent})
session.headers.update({"User-Agent": CFG.user_agent})
# Function to check if the URL is valid
def is_valid_url(url):
def is_valid_url(url: str) -> bool:
"""Check if the URL is valid
Args:
url (str): The URL to check
Returns:
bool: True if the URL is valid, False otherwise
"""
try:
result = urlparse(url)
return all([result.scheme, result.netloc])
@@ -23,13 +32,27 @@ def is_valid_url(url):
return False
# Function to sanitize the URL
def sanitize_url(url):
def sanitize_url(url: str) -> str:
"""Sanitize the URL
Args:
url (str): The URL to sanitize
Returns:
str: The sanitized URL
"""
return urljoin(url, urlparse(url).path)
# Define and check for local file address prefixes
def check_local_file_access(url):
def check_local_file_access(url: str) -> bool:
"""Check if the URL is a local file
Args:
url (str): The URL to check
Returns:
bool: True if the URL is a local file, False otherwise
"""
local_prefixes = [
"file:///",
"file://localhost",
@@ -39,7 +62,22 @@ def check_local_file_access(url):
return any(url.startswith(prefix) for prefix in local_prefixes)
def get_response(url, timeout=10):
def get_response(
url: str, timeout: int = 10
) -> Union[Tuple[None, str], Tuple[Response, None]]:
"""Get the response from a URL
Args:
url (str): The URL to get the response from
timeout (int): The timeout for the HTTP request
Returns:
tuple[None, str] | tuple[Response, None]: The response and error message
Raises:
ValueError: If the URL is invalid
requests.exceptions.RequestException: If the HTTP request fails
"""
try:
# Restrict access to local files
if check_local_file_access(url):
@@ -55,21 +93,28 @@ def get_response(url, timeout=10):
# Check if the response contains an HTTP error
if response.status_code >= 400:
return None, "Error: HTTP " + str(response.status_code) + " error"
return None, f"Error: HTTP {str(response.status_code)} error"
return response, None
except ValueError as ve:
# Handle invalid URL format
return None, "Error: " + str(ve)
return None, f"Error: {str(ve)}"
except requests.exceptions.RequestException as re:
# Handle exceptions related to the HTTP request
# (e.g., connection errors, timeouts, etc.)
return None, "Error: " + str(re)
return None, f"Error: {str(re)}"
def scrape_text(url):
"""Scrape text from a webpage"""
def scrape_text(url: str) -> str:
"""Scrape text from a webpage
Args:
url (str): The URL to scrape text from
Returns:
str: The scraped text
"""
response, error_message = get_response(url)
if error_message:
return error_message
@@ -89,24 +134,45 @@ def scrape_text(url):
return text
def extract_hyperlinks(soup):
"""Extract hyperlinks from a BeautifulSoup object"""
def extract_hyperlinks(soup: BeautifulSoup) -> List[Tuple[str, str]]:
"""Extract hyperlinks from a BeautifulSoup object
Args:
soup (BeautifulSoup): The BeautifulSoup object
Returns:
List[Tuple[str, str]]: The extracted hyperlinks
"""
hyperlinks = []
for link in soup.find_all("a", href=True):
hyperlinks.append((link.text, link["href"]))
return hyperlinks
def format_hyperlinks(hyperlinks):
"""Format hyperlinks into a list of strings"""
def format_hyperlinks(hyperlinks: List[Tuple[str, str]]) -> List[str]:
"""Format hyperlinks into a list of strings
Args:
hyperlinks (List[Tuple[str, str]]): The hyperlinks to format
Returns:
List[str]: The formatted hyperlinks
"""
formatted_links = []
for link_text, link_url in hyperlinks:
formatted_links.append(f"{link_text} ({link_url})")
return formatted_links
def scrape_links(url):
"""Scrape links from a webpage"""
def scrape_links(url: str) -> Union[str, List[str]]:
"""Scrape links from a webpage
Args:
url (str): The URL to scrape links from
Returns:
Union[str, List[str]]: The scraped links
"""
response, error_message = get_response(url)
if error_message:
return error_message
@@ -122,25 +188,6 @@ def scrape_links(url):
return format_hyperlinks(hyperlinks)
def split_text(text, max_length=cfg.browse_chunk_max_length):
"""Split text into chunks of a maximum length"""
paragraphs = text.split("\n")
current_length = 0
current_chunk = []
for paragraph in paragraphs:
if current_length + len(paragraph) + 1 <= max_length:
current_chunk.append(paragraph)
current_length += len(paragraph) + 1
else:
yield "\n".join(current_chunk)
current_chunk = [paragraph]
current_length = len(paragraph) + 1
if current_chunk:
yield "\n".join(current_chunk)
def create_message(chunk, question):
"""Create a message for the user to summarize a chunk of text"""
return {
@@ -149,50 +196,3 @@ def create_message(chunk, question):
f' question: "{question}" -- if the question cannot be answered using the'
" text, please summarize the text.",
}
def summarize_text(url, text, question):
"""Summarize text using the LLM model"""
if not text:
return "Error: No text to summarize"
text_length = len(text)
print(f"Text length: {text_length} characters")
summaries = []
chunks = list(split_text(text))
for i, chunk in enumerate(chunks):
print(f"Adding chunk {i + 1} / {len(chunks)} to memory")
memory_to_add = f"Source: {url}\n" f"Raw content part#{i + 1}: {chunk}"
memory.add(memory_to_add)
print(f"Summarizing chunk {i + 1} / {len(chunks)}")
messages = [create_message(chunk, question)]
summary = create_chat_completion(
model=cfg.fast_llm_model,
messages=messages,
max_tokens=cfg.browse_summary_max_token,
)
summaries.append(summary)
print(f"Added chunk {i + 1} summary to memory")
memory_to_add = f"Source: {url}\n" f"Content summary part#{i + 1}: {summary}"
memory.add(memory_to_add)
print(f"Summarized {len(chunks)} chunks.")
combined_summary = "\n".join(summaries)
messages = [create_message(combined_summary, question)]
final_summary = create_chat_completion(
model=cfg.fast_llm_model,
messages=messages,
max_tokens=cfg.browse_summary_max_token,
)
return final_summary

View File

@@ -0,0 +1,145 @@
"""Selenium web scraping module."""
from selenium import webdriver
import autogpt.processing.text as summary
from bs4 import BeautifulSoup
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
import logging
from pathlib import Path
from autogpt.config import Config
FILE_DIR = Path(__file__).parent.parent
CFG = Config()
def browse_website(url: str, question: str) -> tuple[str, WebDriver]:
"""Browse a website and return the answer and links to the user
Args:
url (str): The url of the website to browse
question (str): The question asked by the user
Returns:
tuple[str, WebDriver]: The answer and links to the user and the webdriver
"""
driver, text = scrape_text_with_selenium(url)
add_header(driver)
summary_text = summary.summarize_text(url, text, question, driver)
links = scrape_links_with_selenium(driver)
# Limit links to 5
if len(links) > 5:
links = links[:5]
close_browser(driver)
return f"Answer gathered from website: {summary_text} \n \n Links: {links}", driver
def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
"""Scrape text from a website using selenium
Args:
url (str): The url of the website to scrape
Returns:
tuple[WebDriver, str]: The webdriver and the text scraped from the website
"""
logging.getLogger("selenium").setLevel(logging.CRITICAL)
options = Options()
options.add_argument(
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36"
)
driver = webdriver.Chrome(
executable_path=ChromeDriverManager().install(), options=options
)
driver.get(url)
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.TAG_NAME, "body"))
)
# Get the HTML content directly from the browser's DOM
page_source = driver.execute_script("return document.body.outerHTML;")
soup = BeautifulSoup(page_source, "html.parser")
for script in soup(["script", "style"]):
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
text = "\n".join(chunk for chunk in chunks if chunk)
return driver, text
def scrape_links_with_selenium(driver: WebDriver) -> list[str]:
"""Scrape links from a website using selenium
Args:
driver (WebDriver): The webdriver to use to scrape the links
Returns:
list[str]: The links scraped from the website
"""
page_source = driver.page_source
soup = BeautifulSoup(page_source, "html.parser")
for script in soup(["script", "style"]):
script.extract()
hyperlinks = extract_hyperlinks(soup)
return format_hyperlinks(hyperlinks)
def close_browser(driver: WebDriver) -> None:
"""Close the browser
Args:
driver (WebDriver): The webdriver to close
Returns:
None
"""
driver.quit()
def extract_hyperlinks(soup: BeautifulSoup) -> list[tuple[str, str]]:
"""Extract hyperlinks from a BeautifulSoup object
Args:
soup (BeautifulSoup): The BeautifulSoup object to extract the hyperlinks from
Returns:
list[tuple[str, str]]: The hyperlinks extracted from the BeautifulSoup object
"""
return [(link.text, link["href"]) for link in soup.find_all("a", href=True)]
def format_hyperlinks(hyperlinks: list[tuple[str, str]]) -> list[str]:
"""Format hyperlinks to be displayed to the user
Args:
hyperlinks (list[tuple[str, str]]): The hyperlinks to format
Returns:
list[str]: The formatted hyperlinks
"""
return [f"{link_text} ({link_url})" for link_text, link_url in hyperlinks]
def add_header(driver: WebDriver) -> None:
"""Add a header to the website
Args:
driver (WebDriver): The webdriver to use to add the header
Returns:
None
"""
driver.execute_script(open(f"{FILE_DIR}/js/overlay.js", "r").read())

View File

@@ -0,0 +1,29 @@
"""A module that contains a function to generate test cases for the submitted code."""
import json
from typing import List
from autogpt.llm_utils import call_ai_function
def write_tests(code: str, focus: List[str]) -> str:
"""
A function that takes in code and focus topics and returns a response from create
chat completion api call.
Parameters:
focus (List): A list of suggestions around what needs to be improved.
code (str): Code for test cases to be generated against.
Returns:
A result string from create chat completion. Test cases for the submitted code
in response.
"""
function_string = (
"def create_test_cases(code: str, focus: Optional[str] = None) -> str:"
)
args = [code, json.dumps(focus)]
description_string = (
"Generates test cases for the existing code, focusing on"
" specific areas if required."
)
return call_ai_function(function_string, args, description_string)

View File

@@ -0,0 +1,14 @@
"""
This module contains the configuration classes for AutoGPT.
"""
from autogpt.config.ai_config import AIConfig
from autogpt.config.config import check_openai_api_key, Config
from autogpt.config.singleton import AbstractSingleton, Singleton
__all__ = [
"check_openai_api_key",
"AbstractSingleton",
"AIConfig",
"Config",
"Singleton",
]

View File

@@ -1,9 +1,11 @@
# sourcery skip: do-not-use-staticmethod
"""
A module that contains the AIConfig class object that contains the configuration
"""
import os
from typing import Type
from typing import List, Optional, Type
import yaml
from autogpt.prompt import get_prompt
class AIConfig:
"""
@@ -16,7 +18,7 @@ class AIConfig:
"""
def __init__(
self, ai_name: str = "", ai_role: str = "", ai_goals: list = []
self, ai_name: str = "", ai_role: str = "", ai_goals: Optional[List] = None
) -> None:
"""
Initialize a class instance
@@ -28,7 +30,8 @@ class AIConfig:
Returns:
None
"""
if ai_goals is None:
ai_goals = []
self.ai_name = ai_name
self.ai_role = ai_role
self.ai_goals = ai_goals
@@ -36,15 +39,14 @@ class AIConfig:
# Soon this will go in a folder where it remembers more stuff about the run(s)
SAVE_FILE = os.path.join(os.path.dirname(__file__), "..", "ai_settings.yaml")
@classmethod
def load(cls: "Type[AIConfig]", config_file: str = SAVE_FILE) -> "Type[AIConfig]":
@staticmethod
def load(config_file: str = SAVE_FILE) -> "AIConfig":
"""
Returns class object with parameters (ai_name, ai_role, ai_goals) loaded from
yaml file if yaml file exists,
else returns class with no parameters.
Parameters:
cls (class object): An AIConfig Class object.
config_file (int): The path to the config yaml file.
DEFAULT: "../ai_settings.yaml"
@@ -62,7 +64,7 @@ class AIConfig:
ai_role = config_params.get("ai_role", "")
ai_goals = config_params.get("ai_goals", [])
# type: Type[AIConfig]
return cls(ai_name, ai_role, ai_goals)
return AIConfig(ai_name, ai_role, ai_goals)
def save(self, config_file: str = SAVE_FILE) -> None:
"""
@@ -103,6 +105,8 @@ class AIConfig:
""
)
from autogpt.prompt import get_prompt
# Construct full prompt
full_prompt = (
f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n"

View File

@@ -1,30 +1,15 @@
import abc
"""Configuration class to store the state of bools for different scripts access."""
import os
from colorama import Fore
from autogpt.config.singleton import Singleton
import openai
import yaml
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
class Singleton(abc.ABCMeta, type):
"""
Singleton metaclass for ensuring only one instance of a class.
"""
_instances = {}
def __call__(cls, *args, **kwargs):
"""Call method for the singleton metaclass."""
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class AbstractSingleton(abc.ABC, metaclass=Singleton):
pass
load_dotenv(verbose=True)
class Config(metaclass=Singleton):
@@ -32,7 +17,7 @@ class Config(metaclass=Singleton):
Configuration class to store the state of bools for different scripts access.
"""
def __init__(self):
def __init__(self) -> None:
"""Initialize the Config class"""
self.debug_mode = False
self.continuous_mode = False
@@ -81,10 +66,12 @@ class Config(metaclass=Singleton):
self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN")
# User agent headers to use when browsing web
# Some websites might just completely deny request with an error code if no user agent was found.
# Some websites might just completely deny request with an error code if
# no user agent was found.
self.user_agent = os.getenv(
"USER_AGENT",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
)
self.redis_host = os.getenv("REDIS_HOST", "localhost")
self.redis_port = os.getenv("REDIS_PORT", "6379")
@@ -108,15 +95,17 @@ class Config(metaclass=Singleton):
The matching deployment id if found, otherwise an empty string.
"""
if model == self.fast_llm_model:
return self.azure_model_to_deployment_id_map["fast_llm_model_deployment_id"]
return self.azure_model_to_deployment_id_map[
"fast_llm_model_deployment_id"
] # type: ignore
elif model == self.smart_llm_model:
return self.azure_model_to_deployment_id_map[
"smart_llm_model_deployment_id"
]
] # type: ignore
elif model == "text-embedding-ada-002":
return self.azure_model_to_deployment_id_map[
"embedding_model_deployment_id"
]
] # type: ignore
else:
return ""
@@ -124,7 +113,8 @@ class Config(metaclass=Singleton):
def load_azure_config(self, config_file: str = AZURE_CONFIG_FILE) -> None:
"""
Loads the configuration parameters for Azure hosting from the specified file path as a yaml file.
Loads the configuration parameters for Azure hosting from the specified file
path as a yaml file.
Parameters:
config_file(str): The path to the config yaml file. DEFAULT: "../azure.yaml"
@@ -148,74 +138,86 @@ class Config(metaclass=Singleton):
)
self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", [])
def set_continuous_mode(self, value: bool):
def set_continuous_mode(self, value: bool) -> None:
"""Set the continuous mode value."""
self.continuous_mode = value
def set_continuous_limit(self, value: int):
def set_continuous_limit(self, value: int) -> None:
"""Set the continuous limit value."""
self.continuous_limit = value
def set_speak_mode(self, value: bool):
def set_speak_mode(self, value: bool) -> None:
"""Set the speak mode value."""
self.speak_mode = value
def set_fast_llm_model(self, value: str):
def set_fast_llm_model(self, value: str) -> None:
"""Set the fast LLM model value."""
self.fast_llm_model = value
def set_smart_llm_model(self, value: str):
def set_smart_llm_model(self, value: str) -> None:
"""Set the smart LLM model value."""
self.smart_llm_model = value
def set_fast_token_limit(self, value: int):
def set_fast_token_limit(self, value: int) -> None:
"""Set the fast token limit value."""
self.fast_token_limit = value
def set_smart_token_limit(self, value: int):
def set_smart_token_limit(self, value: int) -> None:
"""Set the smart token limit value."""
self.smart_token_limit = value
def set_browse_chunk_max_length(self, value: int):
def set_browse_chunk_max_length(self, value: int) -> None:
"""Set the browse_website command chunk max length value."""
self.browse_chunk_max_length = value
def set_browse_summary_max_token(self, value: int):
def set_browse_summary_max_token(self, value: int) -> None:
"""Set the browse_website command summary max token value."""
self.browse_summary_max_token = value
def set_openai_api_key(self, value: str):
def set_openai_api_key(self, value: str) -> None:
"""Set the OpenAI API key value."""
self.openai_api_key = value
def set_elevenlabs_api_key(self, value: str):
def set_elevenlabs_api_key(self, value: str) -> None:
"""Set the ElevenLabs API key value."""
self.elevenlabs_api_key = value
def set_elevenlabs_voice_1_id(self, value: str):
def set_elevenlabs_voice_1_id(self, value: str) -> None:
"""Set the ElevenLabs Voice 1 ID value."""
self.elevenlabs_voice_1_id = value
def set_elevenlabs_voice_2_id(self, value: str):
def set_elevenlabs_voice_2_id(self, value: str) -> None:
"""Set the ElevenLabs Voice 2 ID value."""
self.elevenlabs_voice_2_id = value
def set_google_api_key(self, value: str):
def set_google_api_key(self, value: str) -> None:
"""Set the Google API key value."""
self.google_api_key = value
def set_custom_search_engine_id(self, value: str):
def set_custom_search_engine_id(self, value: str) -> None:
"""Set the custom search engine id value."""
self.custom_search_engine_id = value
def set_pinecone_api_key(self, value: str):
def set_pinecone_api_key(self, value: str) -> None:
"""Set the Pinecone API key value."""
self.pinecone_api_key = value
def set_pinecone_region(self, value: str):
def set_pinecone_region(self, value: str) -> None:
"""Set the Pinecone region value."""
self.pinecone_region = value
def set_debug_mode(self, value: bool):
def set_debug_mode(self, value: bool) -> None:
"""Set the debug mode value."""
self.debug_mode = value
def check_openai_api_key() -> None:
"""Check if the OpenAI API key is set in config.py or as an environment variable."""
cfg = Config()
if not cfg.openai_api_key:
print(
Fore.RED
+ "Please set your OpenAI API key in .env or as an environment variable."
)
print("You can get your key from https://beta.openai.com/account/api-keys")
exit(1)

View File

@@ -0,0 +1,24 @@
"""The singleton metaclass for ensuring only one instance of a class."""
import abc
class Singleton(abc.ABCMeta, type):
"""
Singleton metaclass for ensuring only one instance of a class.
"""
_instances = {}
def __call__(cls, *args, **kwargs):
"""Call method for the singleton metaclass."""
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class AbstractSingleton(abc.ABC, metaclass=Singleton):
"""
Abstract singleton class for ensuring only one instance of a class.
"""
pass

View File

@@ -2,7 +2,7 @@ import argparse
import logging
from autogpt.config import Config
from autogpt.file_operations import ingest_file, search_files
from autogpt.commands.file_operations import ingest_file, search_files
from autogpt.memory import get_memory
cfg = Config()
@@ -87,7 +87,8 @@ def main() -> None:
print(f"Error while ingesting directory '{args.dir}': {str(e)}")
else:
print(
"Please provide either a file path (--file) or a directory name (--dir) inside the auto_gpt_workspace directory as input."
"Please provide either a file path (--file) or a directory name (--dir)"
" inside the auto_gpt_workspace directory as input."
)

View File

@@ -1,105 +0,0 @@
import os
import subprocess
import docker
from docker.errors import ImageNotFound
WORKSPACE_FOLDER = "auto_gpt_workspace"
def execute_python_file(file):
"""Execute a Python file in a Docker container and return the output"""
print(f"Executing file '{file}' in workspace '{WORKSPACE_FOLDER}'")
if not file.endswith(".py"):
return "Error: Invalid file type. Only .py files are allowed."
file_path = os.path.join(WORKSPACE_FOLDER, file)
if not os.path.isfile(file_path):
return f"Error: File '{file}' does not exist."
if we_are_running_in_a_docker_container():
result = subprocess.run(
f"python {file_path}", capture_output=True, encoding="utf8", shell=True
)
if result.returncode == 0:
return result.stdout
else:
return f"Error: {result.stderr}"
else:
try:
client = docker.from_env()
image_name = "python:3.10"
try:
client.images.get(image_name)
print(f"Image '{image_name}' found locally")
except ImageNotFound:
print(
f"Image '{image_name}' not found locally, pulling from Docker Hub"
)
# Use the low-level API to stream the pull response
low_level_client = docker.APIClient()
for line in low_level_client.pull(image_name, stream=True, decode=True):
# Print the status and progress, if available
status = line.get("status")
progress = line.get("progress")
if status and progress:
print(f"{status}: {progress}")
elif status:
print(status)
# You can replace 'python:3.8' with the desired Python image/version
# You can find available Python images on Docker Hub:
# https://hub.docker.com/_/python
container = client.containers.run(
image_name,
f"python {file}",
volumes={
os.path.abspath(WORKSPACE_FOLDER): {
"bind": "/workspace",
"mode": "ro",
}
},
working_dir="/workspace",
stderr=True,
stdout=True,
detach=True,
)
container.wait()
logs = container.logs().decode("utf-8")
container.remove()
# print(f"Execution complete. Output: {output}")
# print(f"Logs: {logs}")
return logs
except Exception as e:
return f"Error: {str(e)}"
def execute_shell(command_line):
current_dir = os.getcwd()
if WORKSPACE_FOLDER not in current_dir: # Change dir into workspace if necessary
work_dir = os.path.join(os.getcwd(), WORKSPACE_FOLDER)
os.chdir(work_dir)
print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'")
result = subprocess.run(command_line, capture_output=True, shell=True)
output = f"STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}"
# Change back to whatever the prior working dir was
os.chdir(current_dir)
return output
def we_are_running_in_a_docker_container():
os.path.exists("/.dockerenv")

View File

@@ -1,67 +0,0 @@
import io
import os.path
import uuid
from base64 import b64decode
import openai
import requests
from PIL import Image
from autogpt.config import Config
cfg = Config()
working_directory = "auto_gpt_workspace"
def generate_image(prompt):
filename = str(uuid.uuid4()) + ".jpg"
# DALL-E
if cfg.image_provider == "dalle":
openai.api_key = cfg.openai_api_key
response = openai.Image.create(
prompt=prompt,
n=1,
size="256x256",
response_format="b64_json",
)
print("Image Generated for prompt:" + prompt)
image_data = b64decode(response["data"][0]["b64_json"])
with open(working_directory + "/" + filename, mode="wb") as png:
png.write(image_data)
return "Saved to disk:" + filename
# STABLE DIFFUSION
elif cfg.image_provider == "sd":
API_URL = (
"https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4"
)
if cfg.huggingface_api_token is None:
raise ValueError(
"You need to set your Hugging Face API token in the config file."
)
headers = {"Authorization": "Bearer " + cfg.huggingface_api_token}
response = requests.post(
API_URL,
headers=headers,
json={
"inputs": prompt,
},
)
image = Image.open(io.BytesIO(response.content))
print("Image Generated for prompt:" + prompt)
image.save(os.path.join(working_directory, filename))
return "Saved to disk:" + filename
else:
return "No Image Provider Set"

View File

View File

@@ -0,0 +1,40 @@
"""This module contains the function to fix JSON strings using GPT-3."""
import json
from autogpt.llm_utils import call_ai_function
from autogpt.logs import logger
def fix_json(json_str: str, schema: str) -> str:
"""Fix the given JSON string to make it parseable and fully compliant with the provided schema."""
# Try to fix the JSON using GPT:
function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
args = [f"'''{json_str}'''", f"'''{schema}'''"]
description_string = (
"Fixes the provided JSON string to make it parseable"
" and fully compliant with the provided schema.\n If an object or"
" field specified in the schema isn't contained within the correct"
" JSON, it is omitted.\n This function is brilliant at guessing"
" when the format is incorrect."
)
# If it doesn't already start with a "`", add one:
if not json_str.startswith("`"):
json_str = "```json\n" + json_str + "\n```"
result_string = call_ai_function(
function_string, args, description_string, model=cfg.fast_llm_model
)
logger.debug("------------ JSON FIX ATTEMPT ---------------")
logger.debug(f"Original JSON: {json_str}")
logger.debug("-----------")
logger.debug(f"Fixed JSON: {result_string}")
logger.debug("----------- END OF FIX ATTEMPT ----------------")
try:
json.loads(result_string) # just check the validity
return result_string
except: # noqa: E722
# Get the call stack:
# import traceback
# call_stack = traceback.format_exc()
# print(f"Failed to fix JSON: '{json_str}' "+call_stack)
return "failed"

View File

@@ -0,0 +1,73 @@
"""Fix JSON brackets."""
import contextlib
import json
from typing import Optional
import regex
from colorama import Fore
from autogpt.logs import logger
from autogpt.config import Config
from autogpt.speech import say_text
CFG = Config()
def attempt_to_fix_json_by_finding_outermost_brackets(json_string: str):
if CFG.speak_mode and CFG.debug_mode:
say_text(
"I have received an invalid JSON response from the OpenAI API. "
"Trying to fix it now."
)
logger.typewriter_log("Attempting to fix JSON by finding outermost brackets\n")
try:
json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}")
json_match = json_pattern.search(json_string)
if json_match:
# Extract the valid JSON object from the string
json_string = json_match.group(0)
logger.typewriter_log(
title="Apparently json was fixed.", title_color=Fore.GREEN
)
if CFG.speak_mode and CFG.debug_mode:
say_text("Apparently json was fixed.")
else:
raise ValueError("No valid JSON object found")
except (json.JSONDecodeError, ValueError):
if CFG.debug_mode:
logger.error("Error: Invalid JSON: %s\n", json_string)
if CFG.speak_mode:
say_text("Didn't work. I will have to ignore this response then.")
logger.error("Error: Invalid JSON, setting it to empty JSON now.\n")
json_string = {}
return json_string
def balance_braces(json_string: str) -> Optional[str]:
"""
Balance the braces in a JSON string.
Args:
json_string (str): The JSON string.
Returns:
str: The JSON string with braces balanced.
"""
open_braces_count = json_string.count("{")
close_braces_count = json_string.count("}")
while open_braces_count > close_braces_count:
json_string += "}"
close_braces_count += 1
while close_braces_count > open_braces_count:
json_string = json_string.rstrip("}")
close_braces_count -= 1
with contextlib.suppress(json.JSONDecodeError):
json.loads(json_string)
return json_string

View File

@@ -0,0 +1,33 @@
""" Fix invalid escape sequences in JSON strings. """
import json
from autogpt.config import Config
from autogpt.json_fixes.utilities import extract_char_position
CFG = Config()
def fix_invalid_escape(json_to_load: str, error_message: str) -> str:
"""Fix invalid escape sequences in JSON strings.
Args:
json_to_load (str): The JSON string.
error_message (str): The error message from the JSONDecodeError
exception.
Returns:
str: The JSON string with invalid escape sequences fixed.
"""
while error_message.startswith("Invalid \\escape"):
bad_escape_location = extract_char_position(error_message)
json_to_load = (
json_to_load[:bad_escape_location] + json_to_load[bad_escape_location + 1 :]
)
try:
json.loads(json_to_load)
return json_to_load
except json.JSONDecodeError as e:
if CFG.debug_mode:
print("json loads error - fix invalid escape", e)
error_message = str(e)
return json_to_load

View File

@@ -0,0 +1,27 @@
"""Fix quotes in a JSON string."""
import json
import re
def add_quotes_to_property_names(json_string: str) -> str:
"""
Add quotes to property names in a JSON string.
Args:
json_string (str): The JSON string.
Returns:
str: The JSON string with quotes added to property names.
"""
def replace_func(match: re.Match) -> str:
return f'"{match[1]}":'
property_name_pattern = re.compile(r"(\w+):")
corrected_json_string = property_name_pattern.sub(replace_func, json_string)
try:
json.loads(corrected_json_string)
return corrected_json_string
except json.JSONDecodeError as e:
raise e

View File

@@ -0,0 +1,143 @@
"""Fix and parse JSON strings."""
import contextlib
import json
from typing import Any, Dict, Union
from autogpt.config import Config
from autogpt.json_fixes.auto_fix import fix_json
from autogpt.json_fixes.bracket_termination import balance_braces
from autogpt.json_fixes.escaping import fix_invalid_escape
from autogpt.json_fixes.missing_quotes import add_quotes_to_property_names
from autogpt.logs import logger
CFG = Config()
JSON_SCHEMA = """
{
"command": {
"name": "command name",
"args": {
"arg name": "value"
}
},
"thoughts":
{
"text": "thought",
"reasoning": "reasoning",
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
"criticism": "constructive self-criticism",
"speak": "thoughts summary to say to user"
}
}
"""
def correct_json(json_to_load: str) -> str:
"""
Correct common JSON errors.
Args:
json_to_load (str): The JSON string.
"""
try:
if CFG.debug_mode:
print("json", json_to_load)
json.loads(json_to_load)
return json_to_load
except json.JSONDecodeError as e:
if CFG.debug_mode:
print("json loads error", e)
error_message = str(e)
if error_message.startswith("Invalid \\escape"):
json_to_load = fix_invalid_escape(json_to_load, error_message)
if error_message.startswith(
"Expecting property name enclosed in double quotes"
):
json_to_load = add_quotes_to_property_names(json_to_load)
try:
json.loads(json_to_load)
return json_to_load
except json.JSONDecodeError as e:
if CFG.debug_mode:
print("json loads error - add quotes", e)
error_message = str(e)
if balanced_str := balance_braces(json_to_load):
return balanced_str
return json_to_load
def fix_and_parse_json(
json_to_load: str, try_to_fix_with_gpt: bool = True
) -> Union[str, Dict[Any, Any]]:
"""Fix and parse JSON string
Args:
json_to_load (str): The JSON string.
try_to_fix_with_gpt (bool, optional): Try to fix the JSON with GPT.
Defaults to True.
Returns:
Union[str, Dict[Any, Any]]: The parsed JSON.
"""
with contextlib.suppress(json.JSONDecodeError):
json_to_load = json_to_load.replace("\t", "")
return json.loads(json_to_load)
with contextlib.suppress(json.JSONDecodeError):
json_to_load = correct_json(json_to_load)
return json.loads(json_to_load)
# Let's do something manually:
# sometimes GPT responds with something BEFORE the braces:
# "I'm sorry, I don't understand. Please try again."
# {"text": "I'm sorry, I don't understand. Please try again.",
# "confidence": 0.0}
# So let's try to find the first brace and then parse the rest
# of the string
try:
brace_index = json_to_load.index("{")
maybe_fixed_json = json_to_load[brace_index:]
last_brace_index = maybe_fixed_json.rindex("}")
maybe_fixed_json = maybe_fixed_json[: last_brace_index + 1]
return json.loads(maybe_fixed_json)
except (json.JSONDecodeError, ValueError) as e:
return try_ai_fix(try_to_fix_with_gpt, e, json_to_load)
def try_ai_fix(
try_to_fix_with_gpt: bool, exception: Exception, json_to_load: str
) -> Union[str, Dict[Any, Any]]:
"""Try to fix the JSON with the AI
Args:
try_to_fix_with_gpt (bool): Whether to try to fix the JSON with the AI.
exception (Exception): The exception that was raised.
json_to_load (str): The JSON string to load.
Raises:
exception: If try_to_fix_with_gpt is False.
Returns:
Union[str, Dict[Any, Any]]: The JSON string or dictionary.
"""
if not try_to_fix_with_gpt:
raise exception
logger.warn(
"Warning: Failed to parse AI output, attempting to fix."
"\n If you see this warning frequently, it's likely that"
" your prompt is confusing the AI. Try changing it up"
" slightly."
)
# Now try to fix this up using the ai_functions
ai_fixed_json = fix_json(json_to_load, JSON_SCHEMA)
if ai_fixed_json != "failed":
return json.loads(ai_fixed_json)
# This allows the AI to react to the error message,
# which usually results in it correcting its ways.
logger.error("Failed to fix AI output, telling the AI.")
return json_to_load

View File

@@ -0,0 +1,20 @@
"""Utilities for the json_fixes package."""
import re
def extract_char_position(error_message: str) -> int:
"""Extract the character position from the JSONDecodeError message.
Args:
error_message (str): The error message from the JSONDecodeError
exception.
Returns:
int: The character position.
"""
char_pattern = re.compile(r"\(char (\d+)\)")
if match := char_pattern.search(error_message):
return int(match[1])
else:
raise ValueError("Character position not found in the error message.")

View File

@@ -1,113 +0,0 @@
import json
from typing import Any, Dict, Union
from autogpt.call_ai_function import call_ai_function
from autogpt.config import Config
from autogpt.json_utils import correct_json
from autogpt.logger import logger
cfg = Config()
JSON_SCHEMA = """
{
"command": {
"name": "command name",
"args": {
"arg name": "value"
}
},
"thoughts":
{
"text": "thought",
"reasoning": "reasoning",
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
"criticism": "constructive self-criticism",
"speak": "thoughts summary to say to user"
}
}
"""
def fix_and_parse_json(
json_str: str, try_to_fix_with_gpt: bool = True
) -> Union[str, Dict[Any, Any]]:
"""Fix and parse JSON string"""
try:
json_str = json_str.replace("\t", "")
return json.loads(json_str)
except json.JSONDecodeError as _: # noqa: F841
try:
json_str = correct_json(json_str)
return json.loads(json_str)
except json.JSONDecodeError as _: # noqa: F841
pass
# Let's do something manually:
# sometimes GPT responds with something BEFORE the braces:
# "I'm sorry, I don't understand. Please try again."
# {"text": "I'm sorry, I don't understand. Please try again.",
# "confidence": 0.0}
# So let's try to find the first brace and then parse the rest
# of the string
try:
brace_index = json_str.index("{")
json_str = json_str[brace_index:]
last_brace_index = json_str.rindex("}")
json_str = json_str[: last_brace_index + 1]
return json.loads(json_str)
# Can throw a ValueError if there is no "{" or "}" in the json_str
except (json.JSONDecodeError, ValueError) as e: # noqa: F841
if try_to_fix_with_gpt:
logger.warn(
"Warning: Failed to parse AI output, attempting to fix."
"\n If you see this warning frequently, it's likely that"
" your prompt is confusing the AI. Try changing it up"
" slightly."
)
# Now try to fix this up using the ai_functions
ai_fixed_json = fix_json(json_str, JSON_SCHEMA)
if ai_fixed_json != "failed":
return json.loads(ai_fixed_json)
else:
# This allows the AI to react to the error message,
# which usually results in it correcting its ways.
logger.error("Failed to fix AI output, telling the AI.")
return json_str
else:
raise e
def fix_json(json_str: str, schema: str) -> str:
"""Fix the given JSON string to make it parseable and fully compliant with the provided schema."""
# Try to fix the JSON using GPT:
function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
args = [f"'''{json_str}'''", f"'''{schema}'''"]
description_string = (
"Fixes the provided JSON string to make it parseable"
" and fully compliant with the provided schema.\n If an object or"
" field specified in the schema isn't contained within the correct"
" JSON, it is omitted.\n This function is brilliant at guessing"
" when the format is incorrect."
)
# If it doesn't already start with a "`", add one:
if not json_str.startswith("`"):
json_str = "```json\n" + json_str + "\n```"
result_string = call_ai_function(
function_string, args, description_string, model=cfg.fast_llm_model
)
logger.debug("------------ JSON FIX ATTEMPT ---------------")
logger.debug(f"Original JSON: {json_str}")
logger.debug("-----------")
logger.debug(f"Fixed JSON: {result_string}")
logger.debug("----------- END OF FIX ATTEMPT ----------------")
try:
json.loads(result_string) # just check the validity
return result_string
except: # noqa: E722
# Get the call stack:
# import traceback
# call_stack = traceback.format_exc()
# print(f"Failed to fix JSON: '{json_str}' "+call_stack)
return "failed"

View File

@@ -1,128 +0,0 @@
import json
import re
from typing import Optional
from autogpt.config import Config
cfg = Config()
def extract_char_position(error_message: str) -> int:
"""Extract the character position from the JSONDecodeError message.
Args:
error_message (str): The error message from the JSONDecodeError
exception.
Returns:
int: The character position.
"""
import re
char_pattern = re.compile(r"\(char (\d+)\)")
if match := char_pattern.search(error_message):
return int(match[1])
else:
raise ValueError("Character position not found in the error message.")
def add_quotes_to_property_names(json_string: str) -> str:
"""
Add quotes to property names in a JSON string.
Args:
json_string (str): The JSON string.
Returns:
str: The JSON string with quotes added to property names.
"""
def replace_func(match):
return f'"{match.group(1)}":'
property_name_pattern = re.compile(r"(\w+):")
corrected_json_string = property_name_pattern.sub(replace_func, json_string)
try:
json.loads(corrected_json_string)
return corrected_json_string
except json.JSONDecodeError as e:
raise e
def balance_braces(json_string: str) -> Optional[str]:
"""
Balance the braces in a JSON string.
Args:
json_string (str): The JSON string.
Returns:
str: The JSON string with braces balanced.
"""
open_braces_count = json_string.count("{")
close_braces_count = json_string.count("}")
while open_braces_count > close_braces_count:
json_string += "}"
close_braces_count += 1
while close_braces_count > open_braces_count:
json_string = json_string.rstrip("}")
close_braces_count -= 1
try:
json.loads(json_string)
return json_string
except json.JSONDecodeError:
pass
def fix_invalid_escape(json_str: str, error_message: str) -> str:
while error_message.startswith("Invalid \\escape"):
bad_escape_location = extract_char_position(error_message)
json_str = json_str[:bad_escape_location] + json_str[bad_escape_location + 1 :]
try:
json.loads(json_str)
return json_str
except json.JSONDecodeError as e:
if cfg.debug_mode:
print("json loads error - fix invalid escape", e)
error_message = str(e)
return json_str
def correct_json(json_str: str) -> str:
"""
Correct common JSON errors.
Args:
json_str (str): The JSON string.
"""
try:
if cfg.debug_mode:
print("json", json_str)
json.loads(json_str)
return json_str
except json.JSONDecodeError as e:
if cfg.debug_mode:
print("json loads error", e)
error_message = str(e)
if error_message.startswith("Invalid \\escape"):
json_str = fix_invalid_escape(json_str, error_message)
if error_message.startswith(
"Expecting property name enclosed in double quotes"
):
json_str = add_quotes_to_property_names(json_str)
try:
json.loads(json_str)
return json_str
except json.JSONDecodeError as e:
if cfg.debug_mode:
print("json loads error - add quotes", e)
error_message = str(e)
if balanced_str := balance_braces(json_str):
return balanced_str
return json_str

View File

@@ -1,4 +1,6 @@
from ast import List
import time
from typing import Dict, Optional
import openai
from openai.error import APIError, RateLimitError
@@ -6,30 +8,79 @@ from colorama import Fore
from autogpt.config import Config
cfg = Config()
CFG = Config()
openai.api_key = cfg.openai_api_key
openai.api_key = CFG.openai_api_key
def call_ai_function(
function: str, args: List, description: str, model: Optional[str] = None
) -> str:
"""Call an AI function
This is a magic function that can do anything with no-code. See
https://github.com/Torantulino/AI-Functions for more info.
Args:
function (str): The function to call
args (list): The arguments to pass to the function
description (str): The description of the function
model (str, optional): The model to use. Defaults to None.
Returns:
str: The response from the function
"""
if model is None:
model = CFG.smart_llm_model
# For each arg, if any are None, convert to "None":
args = [str(arg) if arg is not None else "None" for arg in args]
# parse args to comma separated string
args = ", ".join(args)
messages = [
{
"role": "system",
"content": f"You are now the following python function: ```# {description}"
f"\n{function}```\n\nOnly respond with your `return` value.",
},
{"role": "user", "content": args},
]
return create_chat_completion(model=model, messages=messages, temperature=0)
# Overly simple abstraction until we create something better
# simple retry mechanism when getting a rate error or a bad gateway
def create_chat_completion(
messages, model=None, temperature=cfg.temperature, max_tokens=None
messages: List, # type: ignore
model: Optional[str] = None,
temperature: float = CFG.temperature,
max_tokens: Optional[int] = None,
) -> str:
"""Create a chat completion using the OpenAI API"""
"""Create a chat completion using the OpenAI API
Args:
messages (list[dict[str, str]]): The messages to send to the chat completion
model (str, optional): The model to use. Defaults to None.
temperature (float, optional): The temperature to use. Defaults to 0.9.
max_tokens (int, optional): The max tokens to use. Defaults to None.
Returns:
str: The response from the chat completion
"""
response = None
num_retries = 5
if cfg.debug_mode:
num_retries = 10
if CFG.debug_mode:
print(
Fore.GREEN
+ f"Creating chat completion with model {model}, temperature {temperature},"
f" max_tokens {max_tokens}" + Fore.RESET
)
for attempt in range(num_retries):
backoff = 2 ** (attempt + 2)
try:
if cfg.use_azure:
if CFG.use_azure:
response = openai.ChatCompletion.create(
deployment_id=cfg.get_azure_deployment_id_for_model(model),
deployment_id=CFG.get_azure_deployment_id_for_model(model),
model=model,
messages=messages,
temperature=temperature,
@@ -44,26 +95,21 @@ def create_chat_completion(
)
break
except RateLimitError:
if cfg.debug_mode:
print(
Fore.RED + "Error: ",
"API Rate Limit Reached. Waiting 20 seconds..." + Fore.RESET,
)
time.sleep(20)
pass
except APIError as e:
if e.http_status == 502:
if cfg.debug_mode:
print(
Fore.RED + "Error: ",
"API Bad gateway. Waiting 20 seconds..." + Fore.RESET,
)
time.sleep(20)
pass
else:
raise
if attempt == num_retries - 1:
raise
if CFG.debug_mode:
print(
Fore.RED + "Error: ",
f"API Bad gateway. Waiting {backoff} seconds..." + Fore.RESET,
)
time.sleep(backoff)
if response is None:
raise RuntimeError("Failed to get response after 5 retries")
raise RuntimeError(f"Failed to get response after {num_retries} retries")
return response.choices[0].message["content"]

View File

@@ -1,25 +1,28 @@
"""Logging module for Auto-GPT."""
import json
import logging
import os
import random
import re
import time
from logging import LogRecord
import traceback
from colorama import Fore, Style
from autogpt import speak
from autogpt.speech import say_text
from autogpt.config import Config, Singleton
cfg = Config()
"""
Logger that handle titles in different colors.
Outputs logs in console, activity.log, and errors.log
For console handler: simulates typing
"""
CFG = Config()
class Logger(metaclass=Singleton):
"""
Logger that handle titles in different colors.
Outputs logs in console, activity.log, and errors.log
For console handler: simulates typing
"""
def __init__(self):
# create log directory if it doesn't exist
this_files_dir_path = os.path.dirname(__file__)
@@ -74,8 +77,8 @@ class Logger(metaclass=Singleton):
def typewriter_log(
self, title="", title_color="", content="", speak_text=False, level=logging.INFO
):
if speak_text and cfg.speak_mode:
speak.say_text(f"{title}. {content}")
if speak_text and CFG.speak_mode:
say_text(f"{title}. {content}")
if content:
if isinstance(content, list):
@@ -193,3 +196,93 @@ def remove_color_codes(s: str) -> str:
logger = Logger()
def print_assistant_thoughts(ai_name, assistant_reply):
"""Prints the assistant's thoughts to the console"""
from autogpt.json_fixes.bracket_termination import (
attempt_to_fix_json_by_finding_outermost_brackets,
)
from autogpt.json_fixes.parsing import fix_and_parse_json
try:
try:
# Parse and print Assistant response
assistant_reply_json = fix_and_parse_json(assistant_reply)
except json.JSONDecodeError:
logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply)
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(
assistant_reply
)
if isinstance(assistant_reply_json, str):
assistant_reply_json = fix_and_parse_json(assistant_reply_json)
# Check if assistant_reply_json is a string and attempt to parse
# it into a JSON object
if isinstance(assistant_reply_json, str):
try:
assistant_reply_json = json.loads(assistant_reply_json)
except json.JSONDecodeError:
logger.error("Error: Invalid JSON\n", assistant_reply)
assistant_reply_json = (
attempt_to_fix_json_by_finding_outermost_brackets(
assistant_reply_json
)
)
assistant_thoughts_reasoning = None
assistant_thoughts_plan = None
assistant_thoughts_speak = None
assistant_thoughts_criticism = None
if not isinstance(assistant_reply_json, dict):
assistant_reply_json = {}
assistant_thoughts = assistant_reply_json.get("thoughts", {})
assistant_thoughts_text = assistant_thoughts.get("text")
if assistant_thoughts:
assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
assistant_thoughts_plan = assistant_thoughts.get("plan")
assistant_thoughts_criticism = assistant_thoughts.get("criticism")
assistant_thoughts_speak = assistant_thoughts.get("speak")
logger.typewriter_log(
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}"
)
logger.typewriter_log(
"REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}"
)
if assistant_thoughts_plan:
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
# If it's a list, join it into a string
if isinstance(assistant_thoughts_plan, list):
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
elif isinstance(assistant_thoughts_plan, dict):
assistant_thoughts_plan = str(assistant_thoughts_plan)
# Split the input_string using the newline character and dashes
lines = assistant_thoughts_plan.split("\n")
for line in lines:
line = line.lstrip("- ")
logger.typewriter_log("- ", Fore.GREEN, line.strip())
logger.typewriter_log(
"CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}"
)
# Speak the assistant's thoughts
if CFG.speak_mode and assistant_thoughts_speak:
say_text(assistant_thoughts_speak)
return assistant_reply_json
except json.decoder.JSONDecodeError:
logger.error("Error: Invalid JSON\n", assistant_reply)
if CFG.speak_mode:
say_text(
"I have received an invalid JSON response from the OpenAI API."
" I cannot ignore this response."
)
# All other errors, return "Error: + error message"
except Exception:
call_stack = traceback.format_exc()
logger.error("Error: \n", call_stack)

View File

@@ -1,6 +1,6 @@
import dataclasses
import os
from typing import Any, List, Optional
from typing import Any, List, Optional, Tuple
import numpy as np
import orjson
@@ -24,8 +24,17 @@ class CacheContent:
class LocalCache(MemoryProviderSingleton):
# on load, load our database
"""A class that stores the memory in a local file"""
def __init__(self, cfg) -> None:
"""Initialize a class instance
Args:
cfg: Config object
Returns:
None
"""
self.filename = f"{cfg.memory_index}.json"
if os.path.exists(self.filename):
try:
@@ -42,7 +51,8 @@ class LocalCache(MemoryProviderSingleton):
self.data = CacheContent()
else:
print(
f"Warning: The file '{self.filename}' does not exist. Local memory would not be saved to a file."
f"Warning: The file '{self.filename}' does not exist."
"Local memory would not be saved to a file."
)
self.data = CacheContent()
@@ -116,7 +126,7 @@ class LocalCache(MemoryProviderSingleton):
return [self.data.texts[i] for i in top_k_indices]
def get_stats(self):
def get_stats(self) -> Tuple[int, Tuple[int, ...]]:
"""
Returns: The stats of the local cache.
"""

View File

@@ -1,9 +1,14 @@
"""A class that does not store any data. This is the default memory provider."""
from typing import Optional, List, Any
from autogpt.memory.base import MemoryProviderSingleton
class NoMemory(MemoryProviderSingleton):
"""
A class that does not store any data. This is the default memory provider.
"""
def __init__(self, cfg):
"""
Initializes the NoMemory provider.

View File

@@ -1,7 +1,7 @@
import pinecone
from colorama import Fore, Style
from autogpt.logger import logger
from autogpt.logs import logger
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
@@ -16,7 +16,8 @@ class PineconeMemory(MemoryProviderSingleton):
table_name = "auto-gpt"
# this assumes we don't start with memory.
# for now this works.
# we'll need a more complicated and robust system if we want to start with memory.
# we'll need a more complicated and robust system if we want to start with
# memory.
self.vec_num = 0
try:
@@ -28,8 +29,10 @@ class PineconeMemory(MemoryProviderSingleton):
Style.BRIGHT + str(e) + Style.RESET_ALL,
)
logger.double_check(
"Please ensure you have setup and configured Pinecone properly for use. "
+ f"You can check out {Fore.CYAN + Style.BRIGHT}https://github.com/Torantulino/Auto-GPT#-pinecone-api-key-setup{Style.RESET_ALL} to ensure you've set up everything correctly."
"Please ensure you have setup and configured Pinecone properly for use."
+ f"You can check out {Fore.CYAN + Style.BRIGHT}"
"https://github.com/Torantulino/Auto-GPT#-pinecone-api-key-setup"
f"{Style.RESET_ALL} to ensure you've set up everything correctly."
)
exit(1)
@@ -42,7 +45,7 @@ class PineconeMemory(MemoryProviderSingleton):
def add(self, data):
vector = get_ada_embedding(data)
# no metadata here. We may wish to change that long term.
resp = self.index.upsert([(str(self.vec_num), vector, {"raw_text": data})])
self.index.upsert([(str(self.vec_num), vector, {"raw_text": data})])
_text = f"Inserting data into memory at index: {self.vec_num}:\n data: {data}"
self.vec_num += 1
return _text

View File

@@ -8,7 +8,7 @@ from redis.commands.search.field import TextField, VectorField
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
from redis.commands.search.query import Query
from autogpt.logger import logger
from autogpt.logs import logger
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
SCHEMA = [
@@ -54,7 +54,9 @@ class RedisMemory(MemoryProviderSingleton):
)
logger.double_check(
"Please ensure you have setup and configured Redis properly for use. "
+ f"You can check out {Fore.CYAN + Style.BRIGHT}https://github.com/Torantulino/Auto-GPT#redis-setup{Style.RESET_ALL} to ensure you've set up everything correctly."
+ f"You can check out {Fore.CYAN + Style.BRIGHT}"
f"https://github.com/Torantulino/Auto-GPT#redis-setup{Style.RESET_ALL}"
" to ensure you've set up everything correctly."
)
exit(1)

View File

132
autogpt/processing/text.py Normal file
View File

@@ -0,0 +1,132 @@
"""Text processing functions"""
from typing import Generator, Optional
from selenium.webdriver.remote.webdriver import WebDriver
from autogpt.memory import get_memory
from autogpt.config import Config
from autogpt.llm_utils import create_chat_completion
CFG = Config()
MEMORY = get_memory(CFG)
def split_text(text: str, max_length: int = 8192) -> Generator[str, None, None]:
"""Split text into chunks of a maximum length
Args:
text (str): The text to split
max_length (int, optional): The maximum length of each chunk. Defaults to 8192.
Yields:
str: The next chunk of text
Raises:
ValueError: If the text is longer than the maximum length
"""
paragraphs = text.split("\n")
current_length = 0
current_chunk = []
for paragraph in paragraphs:
if current_length + len(paragraph) + 1 <= max_length:
current_chunk.append(paragraph)
current_length += len(paragraph) + 1
else:
yield "\n".join(current_chunk)
current_chunk = [paragraph]
current_length = len(paragraph) + 1
if current_chunk:
yield "\n".join(current_chunk)
def summarize_text(
url: str, text: str, question: str, driver: Optional[WebDriver] = None
) -> str:
"""Summarize text using the OpenAI API
Args:
url (str): The url of the text
text (str): The text to summarize
question (str): The question to ask the model
driver (WebDriver): The webdriver to use to scroll the page
Returns:
str: The summary of the text
"""
if not text:
return "Error: No text to summarize"
text_length = len(text)
print(f"Text length: {text_length} characters")
summaries = []
chunks = list(split_text(text))
scroll_ratio = 1 / len(chunks)
for i, chunk in enumerate(chunks):
if driver:
scroll_to_percentage(driver, scroll_ratio * i)
print(f"Adding chunk {i + 1} / {len(chunks)} to memory")
memory_to_add = f"Source: {url}\n" f"Raw content part#{i + 1}: {chunk}"
MEMORY.add(memory_to_add)
print(f"Summarizing chunk {i + 1} / {len(chunks)}")
messages = [create_message(chunk, question)]
summary = create_chat_completion(
model=CFG.fast_llm_model,
messages=messages,
max_tokens=CFG.browse_summary_max_token,
)
summaries.append(summary)
print(f"Added chunk {i + 1} summary to memory")
memory_to_add = f"Source: {url}\n" f"Content summary part#{i + 1}: {summary}"
MEMORY.add(memory_to_add)
print(f"Summarized {len(chunks)} chunks.")
combined_summary = "\n".join(summaries)
messages = [create_message(combined_summary, question)]
return create_chat_completion(
model=CFG.fast_llm_model,
messages=messages,
max_tokens=CFG.browse_summary_max_token,
)
def scroll_to_percentage(driver: WebDriver, ratio: float) -> None:
"""Scroll to a percentage of the page
Args:
driver (WebDriver): The webdriver to use
ratio (float): The percentage to scroll to
Raises:
ValueError: If the ratio is not between 0 and 1
"""
if ratio < 0 or ratio > 1:
raise ValueError("Percentage should be between 0 and 1")
driver.execute_script(f"window.scrollTo(0, document.body.scrollHeight * {ratio});")
def create_message(chunk: str, question: str) -> dict[str, str]:
"""Create a message for the chat completion
Args:
chunk (str): The chunk of text to summarize
question (str): The question to answer
Returns:
dict[str, str]: The message to send to the chat completion
"""
return {
"role": "user",
"content": f'"""{chunk}""" Using the above text, please answer the following'
f' question: "{question}" -- if the question cannot be answered using the text,'
" please summarize the text.",
}

View File

@@ -1,4 +1,12 @@
from colorama import Fore
from autogpt.config.ai_config import AIConfig
from autogpt.config.config import Config
from autogpt.logs import logger
from autogpt.promptgenerator import PromptGenerator
from autogpt.setup import prompt_user
from autogpt.utils import clean_input
CFG = Config()
def get_prompt() -> str:
@@ -106,3 +114,42 @@ def get_prompt() -> str:
# Generate the prompt string
return prompt_generator.generate_prompt_string()
def construct_prompt() -> str:
"""Construct the prompt for the AI to respond to
Returns:
str: The prompt string
"""
config = AIConfig.load(CFG.ai_settings_file)
if CFG.skip_reprompt and config.ai_name:
logger.typewriter_log("Name :", Fore.GREEN, config.ai_name)
logger.typewriter_log("Role :", Fore.GREEN, config.ai_role)
logger.typewriter_log("Goals:", Fore.GREEN, f"{config.ai_goals}")
elif config.ai_name:
logger.typewriter_log(
"Welcome back! ",
Fore.GREEN,
f"Would you like me to return to being {config.ai_name}?",
speak_text=True,
)
should_continue = clean_input(
f"""Continue with the last settings?
Name: {config.ai_name}
Role: {config.ai_role}
Goals: {config.ai_goals}
Continue (y/n): """
)
if should_continue.lower() == "n":
config = AIConfig()
if not config.ai_name:
config = prompt_user()
config.save()
# Get rid of this global:
global ai_name
ai_name = config.ai_name
return config.construct_full_prompt()

View File

@@ -1,4 +1,6 @@
""" A module for generating custom prompt strings."""
import json
from typing import Any, Dict, List
class PromptGenerator:
@@ -7,7 +9,7 @@ class PromptGenerator:
resources, and performance evaluations.
"""
def __init__(self):
def __init__(self) -> None:
"""
Initialize the PromptGenerator object with empty lists of constraints,
commands, resources, and performance evaluations.
@@ -27,7 +29,7 @@ class PromptGenerator:
"command": {"name": "command name", "args": {"arg name": "value"}},
}
def add_constraint(self, constraint):
def add_constraint(self, constraint: str) -> None:
"""
Add a constraint to the constraints list.
@@ -36,7 +38,7 @@ class PromptGenerator:
"""
self.constraints.append(constraint)
def add_command(self, command_label, command_name, args=None):
def add_command(self, command_label: str, command_name: str, args=None) -> None:
"""
Add a command to the commands list with a label, name, and optional arguments.
@@ -59,7 +61,7 @@ class PromptGenerator:
self.commands.append(command)
def _generate_command_string(self, command):
def _generate_command_string(self, command: Dict[str, Any]) -> str:
"""
Generate a formatted string representation of a command.
@@ -92,7 +94,7 @@ class PromptGenerator:
"""
self.performance_evaluation.append(evaluation)
def _generate_numbered_list(self, items, item_type="list") -> str:
def _generate_numbered_list(self, items: List[Any], item_type="list") -> str:
"""
Generate a numbered list from given items based on the item_type.

69
autogpt/setup.py Normal file
View File

@@ -0,0 +1,69 @@
"""Setup the AI and its goals"""
from colorama import Fore, Style
from autogpt import utils
from autogpt.config.ai_config import AIConfig
from autogpt.logs import logger
def prompt_user() -> AIConfig:
"""Prompt the user for input
Returns:
AIConfig: The AIConfig object containing the user's input
"""
ai_name = ""
# Construct the prompt
logger.typewriter_log(
"Welcome to Auto-GPT! ",
Fore.GREEN,
"Enter the name of your AI and its role below. Entering nothing will load"
" defaults.",
speak_text=True,
)
# Get AI Name from User
logger.typewriter_log(
"Name your AI: ", Fore.GREEN, "For example, 'Entrepreneur-GPT'"
)
ai_name = utils.clean_input("AI Name: ")
if ai_name == "":
ai_name = "Entrepreneur-GPT"
logger.typewriter_log(
f"{ai_name} here!", Fore.LIGHTBLUE_EX, "I am at your service.", speak_text=True
)
# Get AI Role from User
logger.typewriter_log(
"Describe your AI's role: ",
Fore.GREEN,
"For example, 'an AI designed to autonomously develop and run businesses with"
" the sole goal of increasing your net worth.'",
)
ai_role = utils.clean_input(f"{ai_name} is: ")
if ai_role == "":
ai_role = "an AI designed to autonomously develop and run businesses with the"
" sole goal of increasing your net worth."
# Enter up to 5 goals for the AI
logger.typewriter_log(
"Enter up to 5 goals for your AI: ",
Fore.GREEN,
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage"
" multiple businesses autonomously'",
)
print("Enter nothing to load defaults, enter nothing when finished.", flush=True)
ai_goals = []
for i in range(5):
ai_goal = utils.clean_input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ")
if ai_goal == "":
break
ai_goals.append(ai_goal)
if not ai_goals:
ai_goals = [
"Increase net worth",
"Grow Twitter Account",
"Develop and manage multiple businesses autonomously",
]
return AIConfig(ai_name, ai_role, ai_goals)

View File

@@ -1,120 +0,0 @@
import os
import requests
from playsound import playsound
from autogpt.config import Config
import threading
from threading import Lock, Semaphore
import gtts
cfg = Config()
# Default voice IDs
default_voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
# Retrieve custom voice IDs from the Config class
custom_voice_1 = cfg.elevenlabs_voice_1_id
custom_voice_2 = cfg.elevenlabs_voice_2_id
# Placeholder values that should be treated as empty
placeholders = {"your-voice-id"}
# Use custom voice IDs if provided and not placeholders, otherwise use default voice IDs
voices = [
custom_voice_1
if custom_voice_1 and custom_voice_1 not in placeholders
else default_voices[0],
custom_voice_2
if custom_voice_2 and custom_voice_2 not in placeholders
else default_voices[1],
]
tts_headers = {"Content-Type": "application/json", "xi-api-key": cfg.elevenlabs_api_key}
mutex_lock = Lock() # Ensure only one sound is played at a time
queue_semaphore = Semaphore(
1
) # The amount of sounds to queue before blocking the main thread
def eleven_labs_speech(text, voice_index=0):
"""Speak text using elevenlabs.io's API"""
tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format(
voice_id=voices[voice_index]
)
formatted_message = {"text": text}
response = requests.post(tts_url, headers=tts_headers, json=formatted_message)
if response.status_code == 200:
with mutex_lock:
with open("speech.mpeg", "wb") as f:
f.write(response.content)
playsound("speech.mpeg", True)
os.remove("speech.mpeg")
return True
else:
print("Request failed with status code:", response.status_code)
print("Response content:", response.content)
return False
def brian_speech(text):
"""Speak text using Brian with the streamelements API"""
tts_url = f"https://api.streamelements.com/kappa/v2/speech?voice=Brian&text={text}"
response = requests.get(tts_url)
if response.status_code == 200:
with mutex_lock:
with open("speech.mp3", "wb") as f:
f.write(response.content)
playsound("speech.mp3")
os.remove("speech.mp3")
return True
else:
print("Request failed with status code:", response.status_code)
print("Response content:", response.content)
return False
def gtts_speech(text):
tts = gtts.gTTS(text)
with mutex_lock:
tts.save("speech.mp3")
playsound("speech.mp3", True)
os.remove("speech.mp3")
def macos_tts_speech(text, voice_index=0):
if voice_index == 0:
os.system(f'say "{text}"')
else:
if voice_index == 1:
os.system(f'say -v "Ava (Premium)" "{text}"')
else:
os.system(f'say -v Samantha "{text}"')
def say_text(text, voice_index=0):
def speak():
if not cfg.elevenlabs_api_key:
if cfg.use_mac_os_tts == "True":
macos_tts_speech(text)
elif cfg.use_brian_tts == "True":
success = brian_speech(text)
if not success:
gtts_speech(text)
else:
gtts_speech(text)
else:
success = eleven_labs_speech(text, voice_index)
if not success:
gtts_speech(text)
queue_semaphore.release()
queue_semaphore.acquire(True)
thread = threading.Thread(target=speak)
thread.start()

View File

@@ -0,0 +1,4 @@
"""This module contains the speech recognition and speech synthesis functions."""
from autogpt.speech.say import say_text
__all__ = ["say_text"]

50
autogpt/speech/base.py Normal file
View File

@@ -0,0 +1,50 @@
"""Base class for all voice classes."""
import abc
from threading import Lock
from autogpt.config import AbstractSingleton
class VoiceBase(AbstractSingleton):
"""
Base class for all voice classes.
"""
def __init__(self):
"""
Initialize the voice class.
"""
self._url = None
self._headers = None
self._api_key = None
self._voices = []
self._mutex = Lock()
self._setup()
def say(self, text: str, voice_index: int = 0) -> bool:
"""
Say the given text.
Args:
text (str): The text to say.
voice_index (int): The index of the voice to use.
"""
with self._mutex:
return self._speech(text, voice_index)
@abc.abstractmethod
def _setup(self) -> None:
"""
Setup the voices, API key, etc.
"""
pass
@abc.abstractmethod
def _speech(self, text: str, voice_index: int = 0) -> bool:
"""
Play the given text.
Args:
text (str): The text to play.
"""
pass

39
autogpt/speech/brian.py Normal file
View File

@@ -0,0 +1,39 @@
""" Brian speech module for autogpt """
import os
import requests
from playsound import playsound
from autogpt.speech.base import VoiceBase
class BrianSpeech(VoiceBase):
"""Brian speech module for autogpt"""
def _setup(self) -> None:
"""Setup the voices, API key, etc."""
pass
def _speech(self, text: str) -> bool:
"""Speak text using Brian with the streamelements API
Args:
text (str): The text to speak
Returns:
bool: True if the request was successful, False otherwise
"""
tts_url = (
f"https://api.streamelements.com/kappa/v2/speech?voice=Brian&text={text}"
)
response = requests.get(tts_url)
if response.status_code == 200:
with open("speech.mp3", "wb") as f:
f.write(response.content)
playsound("speech.mp3")
os.remove("speech.mp3")
return True
else:
print("Request failed with status code:", response.status_code)
print("Response content:", response.content)
return False

View File

@@ -0,0 +1,71 @@
"""ElevenLabs speech module"""
import os
from playsound import playsound
import requests
from autogpt.config import Config
from autogpt.speech.base import VoiceBase
PLACEHOLDERS = {"your-voice-id"}
class ElevenLabsSpeech(VoiceBase):
"""ElevenLabs speech class"""
def _setup(self) -> None:
"""Setup the voices, API key, etc.
Returns:
None: None
"""
cfg = Config()
default_voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
self._headers = {
"Content-Type": "application/json",
"xi-api-key": cfg.elevenlabs_api_key,
}
self._voices = default_voices.copy()
self._use_custom_voice(cfg.elevenlabs_voice_1_id, 0)
self._use_custom_voice(cfg.elevenlabs_voice_2_id, 1)
def _use_custom_voice(self, voice, voice_index) -> None:
"""Use a custom voice if provided and not a placeholder
Args:
voice (str): The voice ID
voice_index (int): The voice index
Returns:
None: None
"""
# Placeholder values that should be treated as empty
if voice and voice not in PLACEHOLDERS:
self._voices[voice_index] = voice
def _speech(self, text: str, voice_index: int = 0) -> bool:
"""Speak text using elevenlabs.io's API
Args:
text (str): The text to speak
voice_index (int, optional): The voice to use. Defaults to 0.
Returns:
bool: True if the request was successful, False otherwise
"""
tts_url = (
f"https://api.elevenlabs.io/v1/text-to-speech/{self._voices[voice_index]}"
)
response = requests.post(tts_url, headers=self._headers, json={"text": text})
if response.status_code == 200:
with open("speech.mpeg", "wb") as f:
f.write(response.content)
playsound("speech.mpeg", True)
os.remove("speech.mpeg")
return True
else:
print("Request failed with status code:", response.status_code)
print("Response content:", response.content)
return False

21
autogpt/speech/gtts.py Normal file
View File

@@ -0,0 +1,21 @@
""" GTTS Voice. """
import os
from playsound import playsound
import gtts
from autogpt.speech.base import VoiceBase
class GTTSVoice(VoiceBase):
"""GTTS Voice."""
def _setup(self) -> None:
pass
def _speech(self, text: str, _: int = 0) -> bool:
"""Play the given text."""
tts = gtts.gTTS(text)
tts.save("speech.mp3")
playsound("speech.mp3", True)
os.remove("speech.mp3")
return True

View File

@@ -0,0 +1,21 @@
""" MacOS TTS Voice. """
import os
from autogpt.speech.base import VoiceBase
class MacOSTTS(VoiceBase):
"""MacOS TTS Voice."""
def _setup(self) -> None:
pass
def _speech(self, text: str, voice_index: int = 0) -> bool:
"""Play the given text."""
if voice_index == 0:
os.system(f'say "{text}"')
elif voice_index == 1:
os.system(f'say -v "Ava (Premium)" "{text}"')
else:
os.system(f'say -v Samantha "{text}"')
return True

42
autogpt/speech/say.py Normal file
View File

@@ -0,0 +1,42 @@
""" Text to speech module """
from autogpt.config import Config
import threading
from threading import Semaphore
from autogpt.speech.brian import BrianSpeech
from autogpt.speech.macos_tts import MacOSTTS
from autogpt.speech.gtts import GTTSVoice
from autogpt.speech.eleven_labs import ElevenLabsSpeech
CFG = Config()
DEFAULT_VOICE_ENGINE = GTTSVoice()
VOICE_ENGINE = None
if CFG.elevenlabs_api_key:
VOICE_ENGINE = ElevenLabsSpeech()
elif CFG.use_mac_os_tts == "True":
VOICE_ENGINE = MacOSTTS()
elif CFG.use_brian_tts == "True":
VOICE_ENGINE = BrianSpeech()
else:
VOICE_ENGINE = GTTSVoice()
QUEUE_SEMAPHORE = Semaphore(
1
) # The amount of sounds to queue before blocking the main thread
def say_text(text: str, voice_index: int = 0) -> None:
"""Speak the given text using the given voice index"""
def speak() -> None:
success = VOICE_ENGINE.say(text, voice_index)
if not success:
DEFAULT_VOICE_ENGINE.say(text)
QUEUE_SEMAPHORE.release()
QUEUE_SEMAPHORE.acquire(True)
thread = threading.Thread(target=speak)
thread.start()

View File

@@ -1,3 +1,4 @@
"""A simple spinner module"""
import itertools
import sys
import threading
@@ -7,15 +8,20 @@ import time
class Spinner:
"""A simple spinner class"""
def __init__(self, message="Loading...", delay=0.1):
"""Initialize the spinner class"""
def __init__(self, message: str = "Loading...", delay: float = 0.1) -> None:
"""Initialize the spinner class
Args:
message (str): The message to display.
delay (float): The delay between each spinner update.
"""
self.spinner = itertools.cycle(["-", "/", "|", "\\"])
self.delay = delay
self.message = message
self.running = False
self.spinner_thread = None
def spin(self):
def spin(self) -> None:
"""Spin the spinner"""
while self.running:
sys.stdout.write(f"{next(self.spinner)} {self.message}\r")
@@ -23,14 +29,20 @@ class Spinner:
time.sleep(self.delay)
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
def __enter__(self):
def __enter__(self) -> None:
"""Start the spinner"""
self.running = True
self.spinner_thread = threading.Thread(target=self.spin)
self.spinner_thread.start()
def __exit__(self, exc_type, exc_value, exc_traceback):
"""Stop the spinner"""
def __exit__(self, exc_type, exc_value, exc_traceback) -> None:
"""Stop the spinner
Args:
exc_type (Exception): The exception type.
exc_value (Exception): The exception value.
exc_traceback (Exception): The exception traceback.
"""
self.running = False
if self.spinner_thread is not None:
self.spinner_thread.join()

View File

@@ -1,69 +0,0 @@
from autogpt.llm_utils import create_chat_completion
def summarize_text(driver, text, question):
if not text:
return "Error: No text to summarize"
text_length = len(text)
print(f"Text length: {text_length} characters")
summaries = []
chunks = list(split_text(text))
scroll_ratio = 1 / len(chunks)
for i, chunk in enumerate(chunks):
scroll_to_percentage(driver, scroll_ratio * i)
print(f"Summarizing chunk {i + 1} / {len(chunks)}")
messages = [create_message(chunk, question)]
summary = create_chat_completion(
model="gpt-3.5-turbo",
messages=messages,
max_tokens=300,
)
summaries.append(summary)
print(f"Summarized {len(chunks)} chunks.")
combined_summary = "\n".join(summaries)
messages = [create_message(combined_summary, question)]
return create_chat_completion(
model="gpt-3.5-turbo",
messages=messages,
max_tokens=300,
)
def split_text(text, max_length=8192):
paragraphs = text.split("\n")
current_length = 0
current_chunk = []
for paragraph in paragraphs:
if current_length + len(paragraph) + 1 <= max_length:
current_chunk.append(paragraph)
current_length += len(paragraph) + 1
else:
yield "\n".join(current_chunk)
current_chunk = [paragraph]
current_length = len(paragraph) + 1
if current_chunk:
yield "\n".join(current_chunk)
def create_message(chunk, question):
return {
"role": "user",
"content": f'"""{chunk}""" Using the above text, please answer the following'
f' question: "{question}" -- if the question cannot be answered using the text,'
" please summarize the text.",
}
def scroll_to_percentage(driver, ratio):
if ratio < 0 or ratio > 1:
raise ValueError("Percentage should be between 0 and 1")
driver.execute_script(f"window.scrollTo(0, document.body.scrollHeight * {ratio});")

View File

@@ -1,8 +1,9 @@
"""Functions for counting the number of tokens in a message or string."""
from typing import Dict, List
import tiktoken
from autogpt.logger import logger
from autogpt.logs import logger
def count_message_tokens(

View File

@@ -1,85 +0,0 @@
from selenium import webdriver
import autogpt.summary as summary
from bs4 import BeautifulSoup
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.options import Options
import logging
from pathlib import Path
from autogpt.config import Config
file_dir = Path(__file__).parent
cfg = Config()
def browse_website(url, question):
driver, text = scrape_text_with_selenium(url)
add_header(driver)
summary_text = summary.summarize_text(driver, text, question)
links = scrape_links_with_selenium(driver)
# Limit links to 5
if len(links) > 5:
links = links[:5]
close_browser(driver)
return f"Answer gathered from website: {summary_text} \n \n Links: {links}", driver
def scrape_text_with_selenium(url):
logging.getLogger("selenium").setLevel(logging.CRITICAL)
options = Options()
options.add_argument(
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36"
)
driver = webdriver.Chrome(
executable_path=ChromeDriverManager().install(), options=options
)
driver.get(url)
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.TAG_NAME, "body"))
)
# Get the HTML content directly from the browser's DOM
page_source = driver.execute_script("return document.body.outerHTML;")
soup = BeautifulSoup(page_source, "html.parser")
for script in soup(["script", "style"]):
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
text = "\n".join(chunk for chunk in chunks if chunk)
return driver, text
def scrape_links_with_selenium(driver):
page_source = driver.page_source
soup = BeautifulSoup(page_source, "html.parser")
for script in soup(["script", "style"]):
script.extract()
hyperlinks = extract_hyperlinks(soup)
return format_hyperlinks(hyperlinks)
def close_browser(driver):
driver.quit()
def extract_hyperlinks(soup):
return [(link.text, link["href"]) for link in soup.find_all("a", href=True)]
def format_hyperlinks(hyperlinks):
return [f"{link_text} ({link_url})" for link_text, link_url in hyperlinks]
def add_header(driver):
driver.execute_script(open(f"{file_dir}/js/overlay.js", "r").read())

View File

@@ -1,11 +0,0 @@
from colorama import Style, init
# Initialize colorama
init(autoreset=True)
# Use the bold ANSI style
print(
f"""{Style.BRIGHT}Please run:
python -m autogpt
"""
)

View File

@@ -1,7 +1,7 @@
import unittest
import tests.context
from autogpt.json_parser import fix_and_parse_json
from autogpt.json_fixes.parsing import fix_and_parse_json
class TestParseJson(unittest.TestCase):

View File

@@ -2,7 +2,7 @@
import requests
from autogpt.browse import scrape_text
from autogpt.commands.web_requests import scrape_text
"""
Code Analysis