Merge branch 'master' into dev

This commit is contained in:
Andres Caicedo
2023-04-03 13:51:36 +02:00
26 changed files with 716 additions and 183 deletions

4
.env.template Normal file
View File

@@ -0,0 +1,4 @@
OPENAI_API_KEY=your-openai-api-key
ELEVENLABS_API_KEY=your-elevenlabs-api-key
SMART_LLM_MODEL="gpt-4"
FAST_LLM_MODEL="gpt-3.5-turbo"

3
.gitignore vendored
View File

@@ -6,4 +6,5 @@ package-lock.json
*.pyc *.pyc
scripts/auto_gpt_workspace/* scripts/auto_gpt_workspace/*
*.mpeg *.mpeg
venv/* .env
last_run_ai_settings.yaml

8
Dockerfile Normal file
View File

@@ -0,0 +1,8 @@
FROM python:3.11
WORKDIR /app
COPY scripts/ /app
RUN pip install -r requirements.txt
CMD ["python", "main.py"]

21
LICENSE Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2023 Toran Bruce Richards
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -17,10 +17,12 @@ Your support is greatly appreciated
<p align="center"> <p align="center">
Development of this free, open-source project is made possible by all the <a href="https://github.com/Torantulino/Auto-GPT/graphs/contributors">contributors</a> and <a href="https://github.com/sponsors/Torantulino">sponsors</a>. If you'd like to sponsor this project and have your avatar or company logo appear below <a href="https://github.com/sponsors/Torantulino">click here</a>. 💖 Development of this free, open-source project is made possible by all the <a href="https://github.com/Torantulino/Auto-GPT/graphs/contributors">contributors</a> and <a href="https://github.com/sponsors/Torantulino">sponsors</a>. If you'd like to sponsor this project and have your avatar or company logo appear below <a href="https://github.com/sponsors/Torantulino">click here</a>. 💖
<p align="center">
<p align="center">
<a href="https://github.com/nocodeclarity"><img src="https://github.com/nocodeclarity.png" width="50px" alt="nocodeclarity" /></a>&nbsp;&nbsp;<a href="https://github.com/tjarmain"><img src="https://github.com/tjarmain.png" width="50px" alt="tjarmain" /></a>&nbsp;&nbsp;<a href="https://github.com/tekelsey"><img src="https://github.com/tekelsey.png" width="50px" alt="tekelsey" /></a>&nbsp;&nbsp;<a href="https://github.com/robinicus"><img src="https://github.com/robinicus.png" width="50px" alt="robinicus" /></a>&nbsp;&nbsp;<a href="https://github.com/digisomni"><img src="https://github.com/digisomni.png" width="50px" alt="digisomni" /></a>&nbsp;&nbsp;
</p> </p>
<p align="center"> <p align="center">
<a href="https://github.com/nocodeclarity"><img src="https://github.com/nocodeclarity.png" width="50px" alt="nocodeclarity" /></a>&nbsp;&nbsp;<a href="https://github.com/tjarmain"><img src="https://github.com/tjarmain.png" width="50px" alt="robjtede" /></a>&nbsp;&nbsp; <a href="https://github.com/alexisneuhaus"><img src="https://github.com/alexisneuhaus.png" width="30px" alt="alexisneuhaus" /></a>&nbsp;&nbsp;<a href="https://github.com/iokode"><img src="https://github.com/iokode.png" width="30px" alt="iokode" /></a>&nbsp;&nbsp;<a href="https://github.com/jaumebalust"><img src="https://github.com/jaumebalust.png" width="30px" alt="jaumebalust" /></a>&nbsp;&nbsp;
</p>
</p> </p>
@@ -65,9 +67,9 @@ git clone https://github.com/Torantulino/Auto-GPT.git
``` ```
2. Navigate to the project directory: 2. Navigate to the project directory:
*(Type this into your CMD window, you're aiming to navigate the CMD window to the "scripts" folder you just downloaded)* *(Type this into your CMD window, you're aiming to navigate the CMD window to the repository you just downloaded)*
``` ```
$ cd 'Auto-GPT/scripts' $ cd 'Auto-GPT'
``` ```
3. Install the required dependencies: 3. Install the required dependencies:
@@ -76,22 +78,24 @@ $ cd 'Auto-GPT/scripts'
pip install -r requirements.txt pip install -r requirements.txt
``` ```
4. Edit the file named "keys.py" in the "scripts" directory to add your OpenAI API key (and eleven labs key if you want speech): 4. Rename `.env.template` to `.env` and fill in your `OPENAI_API_KEY`. If you plan to use Speech Mode, fill in your `ELEVEN_LABS_API_KEY` as well.
*(Open the keys.py file in a text editor and follow the instructions inside, save it after)* - Obtain your OpenAI API key from: https://platform.openai.com/account/api-keys.
- Obtain your ElevenLabs API key from: https://elevenlabs.io. You can view your xi-api-key using the "Profile" tab on the website.
## 🔧 Usage ## 🔧 Usage
1. Run the Python script in your terminal: 1. Run the `main.py` Python script in your terminal:
*(Type this into your CMD window)* *(Type this into your CMD window)*
``` ```
python main.py python scripts/main.py
``` ```
2. After each of AUTO-GPT's actions, type "NEXT COMMAND" to authorise them to continue. 2. After each of AUTO-GPT's actions, type "NEXT COMMAND" to authorise them to continue.
3. To exit the program, type "exit" and press Enter. 3. To exit the program, type "exit" and press Enter.
## 🗣️ Speech Mode ## 🗣️ Speech Mode
Use this to use TTS for Auto-GPT
``` ```
python main.py speach-mode python scripts/main.py speak-mode
``` ```
## 💀 Continuous Mode ⚠️ ## 💀 Continuous Mode ⚠️
@@ -100,9 +104,9 @@ Continuous mode is not recommended.
It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise. It is potentially dangerous and may cause your AI to run forever or carry out actions you would not usually authorise.
Use at your own risk. Use at your own risk.
1. Run the Python script in your terminal: 1. Run the `main.py` Python script in your terminal:
``` ```
python main.py continuous-mode python scripts/main.py continuous-mode
``` ```
2. To exit the program, press Ctrl + C 2. To exit the program, press Ctrl + C

View File

@@ -0,0 +1,14 @@
# I wasn't having any luck installing the requirements.txt file in Mac or Linux
# But this seems to work.
# The biggest difference is docker 5 instead of 6, because of this silliness:
#
# The conflict is caused by:
# The user requested requests>=2.26.0
# docker 6.0.1 depends on requests>=2.26.0
# googlesearch-python 1.1.0 depends on requests==2.25.1
docker==5.0.3
# I'd love to fix this in a cleaner way
# Now go ahead and install the rest of what requirements.txt says:
-r requirements.txt

13
requirements.txt Normal file
View File

@@ -0,0 +1,13 @@
beautifulsoup4
colorama==0.4.6
dirtyjson==1.0.
openai==0.27.2
playsound==1.3.0
python-dotenv==1.0.0
pyyaml==6.0
readability-lxml==0.8.1
requests
tiktoken==0.3.3
docker
# googlesearch-python
# Googlesearch python seems to be a bit cursed, anyone good at fixing thigns like this?

View File

@@ -1,10 +1,10 @@
import openai from llm_utils import create_chat_completion
next_key = 0 next_key = 0
agents = {} # key, (task, full_message_history, model) agents = {} # key, (task, full_message_history, model)
# Create new GPT agent # Create new GPT agent
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
def create_agent(task, prompt, model): def create_agent(task, prompt, model):
"""Create a new agent and return its key""" """Create a new agent and return its key"""
@@ -14,13 +14,11 @@ def create_agent(task, prompt, model):
messages = [{"role": "user", "content": prompt}, ] messages = [{"role": "user", "content": prompt}, ]
# Start GTP3 instance # Start GTP3 instance
response = openai.ChatCompletion.create( agent_reply = create_chat_completion(
model=model, model=model,
messages=messages, messages=messages,
) )
agent_reply = response.choices[0].message["content"]
# Update full message history # Update full message history
messages.append({"role": "assistant", "content": agent_reply}) messages.append({"role": "assistant", "content": agent_reply})
@@ -44,14 +42,11 @@ def message_agent(key, message):
messages.append({"role": "user", "content": message}) messages.append({"role": "user", "content": message})
# Start GTP3 instance # Start GTP3 instance
response = openai.ChatCompletion.create( agent_reply = create_chat_completion(
model=model, model=model,
messages=messages, messages=messages,
) )
# Get agent response
agent_reply = response.choices[0].message["content"]
# Update full message history # Update full message history
messages.append({"role": "assistant", "content": agent_reply}) messages.append({"role": "assistant", "content": agent_reply})

43
scripts/ai_config.py Normal file
View File

@@ -0,0 +1,43 @@
import yaml
import data
class AIConfig:
def __init__(self, ai_name="", ai_role="", ai_goals=[]):
self.ai_name = ai_name
self.ai_role = ai_role
self.ai_goals = ai_goals
# Soon this will go in a folder where it remembers more stuff about the run(s)
SAVE_FILE = "last_run_ai_settings.yaml"
@classmethod
def load(cls, config_file=SAVE_FILE):
# Load variables from yaml file if it exists
try:
with open(config_file) as file:
config_params = yaml.load(file, Loader=yaml.FullLoader)
except FileNotFoundError:
config_params = {}
ai_name = config_params.get("ai_name", "")
ai_role = config_params.get("ai_role", "")
ai_goals = config_params.get("ai_goals", [])
return cls(ai_name, ai_role, ai_goals)
def save(self, config_file=SAVE_FILE):
config = {"ai_name": self.ai_name, "ai_role": self.ai_role, "ai_goals": self.ai_goals}
with open(config_file, "w") as file:
yaml.dump(config, file)
def construct_full_prompt(self):
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
# Construct full prompt
full_prompt = f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n"
for i, goal in enumerate(self.ai_goals):
full_prompt += f"{i+1}. {goal}\n"
full_prompt += f"\n\n{data.load_prompt()}"
return full_prompt

View File

@@ -1,31 +1,12 @@
from typing import List, Optional from typing import List, Optional
import json import json
import openai from config import Config
from call_ai_function import call_ai_function
from json_parser import fix_and_parse_json
# This is a magic function that can do anything with no-code. See cfg = Config()
# https://github.com/Torantulino/AI-Functions for more info.
def call_ai_function(function, args, description, model="gpt-4"):
"""Calls an AI function and returns the result."""
args = ", ".join(args)
messages = [
{
"role": "system",
"content": f"You are now the following python function: ```# {description}\n{function}```\n\nOnly respond with your `return` value.",
},
{"role": "user", "content": args},
]
response = openai.ChatCompletion.create(
model=model, messages=messages, temperature=0
)
return response.choices[0].message["content"]
# Evaluating code # Evaluating code
def evaluate_code(code: str) -> List[str]: def evaluate_code(code: str) -> List[str]:
"""Evaluates the given code and returns a list of suggestions for improvements.""" """Evaluates the given code and returns a list of suggestions for improvements."""
function_string = "def analyze_code(code: str) -> List[str]:" function_string = "def analyze_code(code: str) -> List[str]:"
@@ -33,12 +14,12 @@ def evaluate_code(code: str) -> List[str]:
description_string = """Analyzes the given code and returns a list of suggestions for improvements.""" description_string = """Analyzes the given code and returns a list of suggestions for improvements."""
result_string = call_ai_function(function_string, args, description_string) result_string = call_ai_function(function_string, args, description_string)
return json.loads(result_string)
return result_string
# Improving code # Improving code
def improve_code(suggestions: List[str], code: str) -> str: def improve_code(suggestions: List[str], code: str) -> str:
"""Improves the provided code based on the suggestions provided, making no other changes.""" """Improves the provided code based on the suggestions provided, making no other changes."""
function_string = ( function_string = (
@@ -64,3 +45,5 @@ def write_tests(code: str, focus: List[str]) -> str:
result_string = call_ai_function(function_string, args, description_string) result_string = call_ai_function(function_string, args, description_string)
return result_string return result_string

View File

@@ -1,9 +1,9 @@
from googlesearch import search
import requests import requests
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from readability import Document from config import Config
import openai from llm_utils import create_chat_completion
cfg = Config()
def scrape_text(url): def scrape_text(url):
"""Scrape text from a webpage""" """Scrape text from a webpage"""
@@ -94,7 +94,7 @@ def summarize_text(text, is_website=True):
messages = [ messages = [
{ {
"role": "user", "role": "user",
"content": "Please summarize the following website text, do not describe the general website, but instead concisely extract the specifc information this subpage contains.: " + "content": "Please summarize the following website text, do not describe the general website, but instead concisely extract the specific information this subpage contains.: " +
chunk}, chunk},
] ]
else: else:
@@ -105,13 +105,11 @@ def summarize_text(text, is_website=True):
chunk}, chunk},
] ]
response = openai.ChatCompletion.create( summary = create_chat_completion(
model="gpt-3.5-turbo", model=cfg.fast_llm_model,
messages=messages, messages=messages,
max_tokens=300, max_tokens=300,
) )
summary = response.choices[0].message.content
summaries.append(summary) summaries.append(summary)
print("Summarized " + str(len(chunks)) + " chunks.") print("Summarized " + str(len(chunks)) + " chunks.")
@@ -122,7 +120,7 @@ def summarize_text(text, is_website=True):
messages = [ messages = [
{ {
"role": "user", "role": "user",
"content": "Please summarize the following website text, do not describe the general website, but instead concisely extract the specifc information this subpage contains.: " + "content": "Please summarize the following website text, do not describe the general website, but instead concisely extract the specific information this subpage contains.: " +
combined_summary}, combined_summary},
] ]
else: else:
@@ -133,11 +131,10 @@ def summarize_text(text, is_website=True):
combined_summary}, combined_summary},
] ]
response = openai.ChatCompletion.create( final_summary = create_chat_completion(
model="gpt-3.5-turbo", model=cfg.fast_llm_model,
messages=messages, messages=messages,
max_tokens=300, max_tokens=300,
) )
final_summary = response.choices[0].message.content
return final_summary return final_summary

View File

@@ -0,0 +1,25 @@
from config import Config
cfg = Config()
from llm_utils import create_chat_completion
# This is a magic function that can do anything with no-code. See
# https://github.com/Torantulino/AI-Functions for more info.
def call_ai_function(function, args, description, model=cfg.smart_llm_model):
# For each arg, if any are None, convert to "None":
args = [str(arg) if arg is not None else "None" for arg in args]
# parse args to comma seperated string
args = ", ".join(args)
messages = [
{
"role": "system",
"content": f"You are now the following python function: ```# {description}\n{function}```\n\nOnly respond with your `return` value.",
},
{"role": "user", "content": args},
]
response = create_chat_completion(
model=model, messages=messages, temperature=0
)
return response

View File

@@ -1,9 +1,12 @@
import time import time
import openai import openai
import keys from dotenv import load_dotenv
from config import Config
import token_counter
# Initialize the OpenAI API client cfg = Config()
openai.api_key = keys.OPENAI_API_KEY
from llm_utils import create_chat_completion
def create_chat_message(role, content): def create_chat_message(role, content):
@@ -20,6 +23,8 @@ def create_chat_message(role, content):
return {"role": role, "content": content} return {"role": role, "content": content}
# TODO: Change debug from hardcode to argument
def chat_with_ai( def chat_with_ai(
prompt, prompt,
user_input, user_input,
@@ -43,16 +48,55 @@ def chat_with_ai(
Returns: Returns:
str: The AI's response. str: The AI's response.
""" """
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
# Reserve 1000 tokens for the response
if debug:
print(f"Token limit: {token_limit}")
send_token_limit = token_limit - 1000
current_context = [ current_context = [
create_chat_message( create_chat_message(
"system", prompt), create_chat_message( "system", prompt), create_chat_message(
"system", f"Permanent memory: {permanent_memory}")] "system", f"Permanent memory: {permanent_memory}")]
current_context.extend(
full_message_history[-(token_limit - len(prompt) - len(permanent_memory) - 10):]) # Add messages from the full message history until we reach the token limit
next_message_to_add_index = len(full_message_history) - 1
current_tokens_used = 0
insertion_index = len(current_context)
# Count the currently used tokens
current_tokens_used = token_counter.count_message_tokens(current_context, model)
current_tokens_used += token_counter.count_message_tokens([create_chat_message("user", user_input)], model) # Account for user input (appended later)
while next_message_to_add_index >= 0:
# print (f"CURRENT TOKENS USED: {current_tokens_used}")
message_to_add = full_message_history[next_message_to_add_index]
tokens_to_add = token_counter.count_message_tokens([message_to_add], model)
if current_tokens_used + tokens_to_add > send_token_limit:
break
# Add the most recent message to the start of the current context, after the two system prompts.
current_context.insert(insertion_index, full_message_history[next_message_to_add_index])
# Count the currently used tokens
current_tokens_used += tokens_to_add
# Move to the next most recent message in the full message history
next_message_to_add_index -= 1
# Append user input, the length of this is accounted for above
current_context.extend([create_chat_message("user", user_input)]) current_context.extend([create_chat_message("user", user_input)])
# Calculate remaining tokens
tokens_remaining = token_limit - current_tokens_used
# assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT"
# Debug print the current context # Debug print the current context
if debug: if debug:
print(f"Token limit: {token_limit}")
print(f"Send Token Count: {current_tokens_used}")
print(f"Tokens remaining for response: {tokens_remaining}")
print("------------ CONTEXT SENT TO AI ---------------") print("------------ CONTEXT SENT TO AI ---------------")
for message in current_context: for message in current_context:
# Skip printing the prompt # Skip printing the prompt
@@ -60,15 +104,16 @@ def chat_with_ai(
continue continue
print( print(
f"{message['role'].capitalize()}: {message['content']}") f"{message['role'].capitalize()}: {message['content']}")
print()
print("----------- END OF CONTEXT ----------------") print("----------- END OF CONTEXT ----------------")
response = openai.ChatCompletion.create( # TODO: use a model defined elsewhere, so that model can contain temperature and other settings we care about
model="gpt-4", assistant_reply = create_chat_completion(
model=model,
messages=current_context, messages=current_context,
max_tokens=tokens_remaining,
) )
assistant_reply = response.choices[0].message["content"]
# Update full message history # Update full message history
full_message_history.append( full_message_history.append(
create_chat_message( create_chat_message(
@@ -79,5 +124,6 @@ def chat_with_ai(
return assistant_reply return assistant_reply
except openai.error.RateLimitError: except openai.error.RateLimitError:
# TODO: WHen we switch to langchain, this is built in
print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...") print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
time.sleep(10) time.sleep(10)

View File

@@ -8,16 +8,28 @@ from config import Config
import ai_functions as ai import ai_functions as ai
from file_operations import read_file, write_to_file, append_to_file, delete_file from file_operations import read_file, write_to_file, append_to_file, delete_file
from execute_code import execute_python_file from execute_code import execute_python_file
from json_parser import fix_and_parse_json
from googlesearch import search
cfg = Config() cfg = Config()
def get_command(response): def get_command(response):
"""Parse the response and return the command name and arguments""" """Parse the response and return the command name and arguments"""
try: try:
response_json = json.loads(response) response_json = fix_and_parse_json(response)
if "command" not in response_json:
return "Error:" , "Missing 'command' object in JSON"
command = response_json["command"] command = response_json["command"]
if "name" not in command:
return "Error:", "Missing 'name' field in 'command' object"
command_name = command["name"] command_name = command["name"]
arguments = command["args"]
# Use an empty dictionary if 'args' field is not present in 'command' object
arguments = command.get("args", {})
if not arguments: if not arguments:
arguments = {} arguments = {}
@@ -35,8 +47,6 @@ def execute_command(command_name, arguments):
try: try:
if command_name == "google": if command_name == "google":
return google_search(arguments["input"]) return google_search(arguments["input"])
elif command_name == "check_notifications":
return check_notifications(arguments["website"])
elif command_name == "memory_add": elif command_name == "memory_add":
return commit_memory(arguments["string"]) return commit_memory(arguments["string"])
elif command_name == "memory_del": elif command_name == "memory_del":
@@ -54,12 +64,6 @@ def execute_command(command_name, arguments):
return list_agents() return list_agents()
elif command_name == "delete_agent": elif command_name == "delete_agent":
return delete_agent(arguments["key"]) return delete_agent(arguments["key"])
elif command_name == "navigate_website":
return navigate_website(arguments["action"], arguments["username"])
elif command_name == "register_account":
return register_account(
arguments["username"],
arguments["website"])
elif command_name == "get_text_summary": elif command_name == "get_text_summary":
return get_text_summary(arguments["url"]) return get_text_summary(arguments["url"])
elif command_name == "get_hyperlinks": elif command_name == "get_hyperlinks":
@@ -103,7 +107,7 @@ def get_datetime():
def google_search(query, num_results=8): def google_search(query, num_results=8):
"""Return the results of a google search""" """Return the results of a google search"""
search_results = [] search_results = []
for j in browse.search(query, num_results=num_results): for j in search(query, num_results=num_results):
search_results.append(j) search_results.append(j)
return json.dumps(search_results, ensure_ascii=False, indent=4) return json.dumps(search_results, ensure_ascii=False, indent=4)
@@ -156,8 +160,8 @@ def delete_memory(key):
def overwrite_memory(key, string): def overwrite_memory(key, string):
"""Overwrite a memory with a given key""" """Overwrite a memory with a given key and string"""
if key >= 0 and key < len(mem.permanent_memory): if int(key) >= 0 and key < len(mem.permanent_memory):
_text = "Overwriting memory with key " + \ _text = "Overwriting memory with key " + \
str(key) + " and string " + string str(key) + " and string " + string
mem.permanent_memory[key] = string mem.permanent_memory[key] = string
@@ -174,7 +178,7 @@ def shutdown():
quit() quit()
def start_agent(name, task, prompt, model="gpt-3.5-turbo"): def start_agent(name, task, prompt, model=cfg.fast_llm_model):
"""Start an agent with a given name, task, and prompt""" """Start an agent with a given name, task, and prompt"""
global cfg global cfg
@@ -220,23 +224,4 @@ def delete_agent(key):
result = agents.delete_agent(key) result = agents.delete_agent(key)
if not result: if not result:
return f"Agent {key} does not exist." return f"Agent {key} does not exist."
return f"Agent {key} deleted." return f"Agent {key} deleted."
def navigate_website(action, username):
_text = "Navigating website with action " + action + " and username " + username
print(_text)
return "Command not implemented yet."
def register_account(username, website):
_text = "Registering account with username " + \
username + " and website " + website
print(_text)
return "Command not implemented yet."
def check_notifications(website):
_text = "Checking notifications from " + website
print(_text)
return "Command not implemented yet."

View File

@@ -1,3 +1,9 @@
import os
import openai
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
class Singleton(type): class Singleton(type):
""" """
Singleton metaclass for ensuring only one instance of a class. Singleton metaclass for ensuring only one instance of a class.
@@ -23,6 +29,18 @@ class Config(metaclass=Singleton):
"""Initialize the configuration class.""" """Initialize the configuration class."""
self.continuous_mode = False self.continuous_mode = False
self.speak_mode = False self.speak_mode = False
# TODO - make these models be self-contained, using langchain, so we can configure them once and call it good
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
# Initialize the OpenAI API client
openai.api_key = self.openai_api_key
def set_continuous_mode(self, value: bool): def set_continuous_mode(self, value: bool):
"""Set the continuous mode value.""" """Set the continuous mode value."""
@@ -31,3 +49,24 @@ class Config(metaclass=Singleton):
def set_speak_mode(self, value: bool): def set_speak_mode(self, value: bool):
"""Set the speak mode value.""" """Set the speak mode value."""
self.speak_mode = value self.speak_mode = value
def set_fast_llm_model(self, value: str):
self.fast_llm_model = value
def set_smart_llm_model(self, value: str):
self.smart_llm_model = value
def set_fast_token_limit(self, value: int):
self.fast_token_limit = value
def set_smart_token_limit(self, value: int):
self.smart_token_limit = value
def set_openai_api_key(self, value: str):
self.apiopenai_api_key_key = value
def set_elevenlabs_api_key(self, value: str):
self.elevenlabs_api_key = value

View File

@@ -1,8 +1,16 @@
import os
from pathlib import Path
def load_prompt(): def load_prompt():
"""Load the prompt from data/prompt.txt""" """Load the prompt from data/prompt.txt"""
try: try:
# get directory of this file:
file_dir = Path(os.path.dirname(os.path.realpath(__file__)))
data_dir = file_dir / "data"
prompt_file = data_dir / "prompt.txt"
# Load the promt from data/prompt.txt # Load the promt from data/prompt.txt
with open("data/prompt.txt", "r") as prompt_file: with open(prompt_file, "r") as prompt_file:
prompt = prompt_file.read() prompt = prompt_file.read()
return prompt return prompt

View File

@@ -1,6 +1,6 @@
CONSTRAINTS: CONSTRAINTS:
1. 6000-word count limit for memory 1. ~4000 word limit for memory. Your memory is short, so immidiately save important information to long term memory and code to files.
2. No user assistance 2. No user assistance
COMMANDS: COMMANDS:
@@ -18,9 +18,9 @@ COMMANDS:
11. Read file: "read_file", args: "file": "<file>" 11. Read file: "read_file", args: "file": "<file>"
12. Append to file: "append_to_file", args: "file": "<file>", "text": "<text>" 12. Append to file: "append_to_file", args: "file": "<file>", "text": "<text>"
13. Delete file: "delete_file", args: "file": "<file>" 13. Delete file: "delete_file", args: "file": "<file>"
14. Evaluate Code: "evaluate_code", args: "code": "<code>" 14. Evaluate Code: "evaluate_code", args: "code": "<full _code_string>"
15. Get Improved Code: "improve_code", args: "suggestions": "<list_of_suggestions>", "code": "<string>" 15. Get Improved Code: "improve_code", args: "suggestions": "<list_of_suggestions>", "code": "<full_code_string>"
16. Write Tests: "write_tests", args: "code": "<string>", "focus": "<list_of_focus_areas>" 16. Write Tests: "write_tests", args: "code": "<full_code_string>", "focus": "<list_of_focus_areas>"
17. Execute Python File: "execute_python_file", args: "file": "<file>" 17. Execute Python File: "execute_python_file", args: "file": "<file>"
18. Task Complete (Shutdown): "task_complete", args: "reason": "<reason>" 18. Task Complete (Shutdown): "task_complete", args: "reason": "<reason>"
@@ -38,22 +38,24 @@ PERFORMANCE EVALUATION:
3. Reflect on past decisions and strategies to refine your approach. 3. Reflect on past decisions and strategies to refine your approach.
4. Every command has a cost, so be smart and efficent. Aim to complete tasks in the least number of steps. 4. Every command has a cost, so be smart and efficent. Aim to complete tasks in the least number of steps.
You should only respond in JSON format as described below
RESPONSE FORMAT: RESPONSE FORMAT:
{ {
"command": "command": {
{ "name": "command name",
"name": "command name", "args":{
"args": "arg name": "value"
{ }
"arg name": "value" },
"thoughts":
{
"text": "thought",
"reasoning": "reasoning",
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
"criticism": "constructive self-criticism",
"speak": "thoughts summary to say to user"
}
} }
},
"thoughts": Ensure the response can be parsed by Python json.loads
{
"text": "thought",
"reasoning": "reasoning",
"plan": "short bulleted long-term plan",
"criticism": "constructive self-criticism"
"speak": "thoughts summary to say to user"
}
}

View File

@@ -6,6 +6,8 @@ def execute_python_file(file):
"""Execute a Python file in a Docker container and return the output""" """Execute a Python file in a Docker container and return the output"""
workspace_folder = "auto_gpt_workspace" workspace_folder = "auto_gpt_workspace"
print (f"Executing file '{file}' in workspace '{workspace_folder}'")
if not file.endswith(".py"): if not file.endswith(".py"):
return "Error: Invalid file type. Only .py files are allowed." return "Error: Invalid file type. Only .py files are allowed."
@@ -21,7 +23,7 @@ def execute_python_file(file):
# You can find available Python images on Docker Hub: # You can find available Python images on Docker Hub:
# https://hub.docker.com/_/python # https://hub.docker.com/_/python
container = client.containers.run( container = client.containers.run(
'python:3.8', 'python:3.10',
f'python {file}', f'python {file}',
volumes={ volumes={
os.path.abspath(workspace_folder): { os.path.abspath(workspace_folder): {
@@ -37,6 +39,9 @@ def execute_python_file(file):
logs = container.logs().decode('utf-8') logs = container.logs().decode('utf-8')
container.remove() container.remove()
# print(f"Execution complete. Output: {output}")
# print(f"Logs: {logs}")
return logs return logs
except Exception as e: except Exception as e:

77
scripts/json_parser.py Normal file
View File

@@ -0,0 +1,77 @@
import dirtyjson
from call_ai_function import call_ai_function
from config import Config
cfg = Config()
def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True):
json_schema = """
{
"command": {
"name": "command name",
"args":{
"arg name": "value"
}
},
"thoughts":
{
"text": "thought",
"reasoning": "reasoning",
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
"criticism": "constructive self-criticism",
"speak": "thoughts summary to say to user"
}
}
"""
try:
return dirtyjson.loads(json_str)
except Exception as e:
# Let's do something manually - sometimes GPT responds with something BEFORE the braces:
# "I'm sorry, I don't understand. Please try again."{"text": "I'm sorry, I don't understand. Please try again.", "confidence": 0.0}
# So let's try to find the first brace and then parse the rest of the string
try:
brace_index = json_str.index("{")
json_str = json_str[brace_index:]
last_brace_index = json_str.rindex("}")
json_str = json_str[:last_brace_index+1]
return dirtyjson.loads(json_str)
except Exception as e:
if try_to_fix_with_gpt:
print(f"Warning: Failed to parse AI output, attempting to fix.\n If you see this warning frequently, it's likely that your prompt is confusing the AI. Try changing it up slightly.")
# Now try to fix this up using the ai_functions
ai_fixed_json = fix_json(json_str, json_schema, False)
if ai_fixed_json != "failed":
return dirtyjson.loads(ai_fixed_json)
else:
print(f"Failed to fix ai output, telling the AI.") # This allows the AI to react to the error message, which usually results in it correcting its ways.
return json_str
else:
raise e
# TODO: Make debug a global config var
def fix_json(json_str: str, schema: str, debug=False) -> str:
# Try to fix the JSON using gpt:
function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
args = [json_str, schema]
description_string = """Fixes the provided JSON string to make it parseable and fully complient with the provided schema.\n If an object or field specifed in the schema isn't contained within the correct JSON, it is ommited.\n This function is brilliant at guessing when the format is incorrect."""
# If it doesn't already start with a "`", add one:
if not json_str.startswith("`"):
json_str = "```json\n" + json_str + "\n```"
result_string = call_ai_function(
function_string, args, description_string, model=cfg.fast_llm_model
)
if debug:
print("------------ JSON FIX ATTEMPT ---------------")
print(f"Original JSON: {json_str}")
print("-----------")
print(f"Fixed JSON: {result_string}")
print("----------- END OF FIX ATTEMPT ----------------")
try:
return dirtyjson.loads(result_string)
except:
# Get the call stack:
# import traceback
# call_stack = traceback.format_exc()
# print(f"Failed to fix JSON: '{json_str}' "+call_stack)
return "failed"

View File

@@ -1,6 +0,0 @@
# This file contains the API keys for the various APIs used in the project.
# Get yours from: https://beta.openai.com/account/api-keys
OPENAI_API_KEY = "YOUR-OPENAI-KEY"
# To access your ElevenLabs API key, head to https://elevenlabs.io, you
# can view your xi-api-key using the 'Profile' tab on the website.
ELEVENLABS_API_KEY = "YOUR-ELEVENLABS-KEY"

16
scripts/llm_utils.py Normal file
View File

@@ -0,0 +1,16 @@
import openai
from config import Config
cfg = Config()
openai.api_key = cfg.openai_api_key
# Overly simple abstraction until we create something better
def create_chat_completion(messages, model=None, temperature=None, max_tokens=None)->str:
response = openai.ChatCompletion.create(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens
)
return response.choices[0].message["content"]

View File

@@ -11,14 +11,16 @@ import speak
from enum import Enum, auto from enum import Enum, auto
import sys import sys
from config import Config from config import Config
from json_parser import fix_and_parse_json
from ai_config import AIConfig
import traceback
import yaml
class Argument(Enum): class Argument(Enum):
"""This class is used to define the different arguments that can be passed""" """This class is used to define the different arguments that can be passed"""
CONTINUOUS_MODE = "continuous-mode" CONTINUOUS_MODE = "continuous-mode"
SPEAK_MODE = "speak-mode" SPEAK_MODE = "speak-mode"
def print_to_console( def print_to_console(
title, title,
title_color, title_color,
@@ -32,6 +34,8 @@ def print_to_console(
speak.say_text(f"{title}. {content}") speak.say_text(f"{title}. {content}")
print(title_color + title + " " + Style.RESET_ALL, end="") print(title_color + title + " " + Style.RESET_ALL, end="")
if content: if content:
if isinstance(content, list):
content = " ".join(content)
words = content.split() words = content.split()
for i, word in enumerate(words): for i, word in enumerate(words):
print(word, end="", flush=True) print(word, end="", flush=True)
@@ -51,21 +55,24 @@ def print_assistant_thoughts(assistant_reply):
global cfg global cfg
try: try:
# Parse and print Assistant response # Parse and print Assistant response
assistant_reply_json = json.loads(assistant_reply) assistant_reply_json = fix_and_parse_json(assistant_reply)
assistant_thoughts = assistant_reply_json.get("thoughts") try:
if assistant_thoughts: assistant_thoughts = assistant_reply_json.get("thoughts")
assistant_thoughts_text = assistant_thoughts.get("text") if assistant_thoughts:
assistant_thoughts_reasoning = assistant_thoughts.get("reasoning") assistant_thoughts_text = assistant_thoughts.get("text")
assistant_thoughts_plan = assistant_thoughts.get("plan") assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
assistant_thoughts_criticism = assistant_thoughts.get("criticism") assistant_thoughts_plan = assistant_thoughts.get("plan")
assistant_thoughts_speak = assistant_thoughts.get("speak") assistant_thoughts_criticism = assistant_thoughts.get("criticism")
else: assistant_thoughts_speak = assistant_thoughts.get("speak")
assistant_thoughts_text = None else:
assistant_thoughts_reasoning = None assistant_thoughts_text = None
assistant_thoughts_plan = None assistant_thoughts_reasoning = None
assistant_thoughts_criticism = None assistant_thoughts_plan = None
assistant_thoughts_speak = None assistant_thoughts_criticism = None
assistant_thoughts_speak = None
except Exception as e:
assistant_thoughts_text = "The AI's response was unreadable."
print_to_console( print_to_console(
f"{ai_name.upper()} THOUGHTS:", f"{ai_name.upper()} THOUGHTS:",
@@ -78,8 +85,13 @@ def print_assistant_thoughts(assistant_reply):
if assistant_thoughts_plan: if assistant_thoughts_plan:
print_to_console("PLAN:", Fore.YELLOW, "") print_to_console("PLAN:", Fore.YELLOW, "")
if assistant_thoughts_plan: if assistant_thoughts_plan:
# If it's a list, join it into a string
# Split the input_string using the newline character and dash if isinstance(assistant_thoughts_plan, list):
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
elif isinstance(assistant_thoughts_plan, dict):
assistant_thoughts_plan = str(assistant_thoughts_plan)
# Split the input_string using the newline character and dash
lines = assistant_thoughts_plan.split('\n') lines = assistant_thoughts_plan.split('\n')
# Iterate through the lines and print each one with a bullet # Iterate through the lines and print each one with a bullet
@@ -101,12 +113,94 @@ def print_assistant_thoughts(assistant_reply):
print_to_console("Error: Invalid JSON\n", Fore.RED, assistant_reply) print_to_console("Error: Invalid JSON\n", Fore.RED, assistant_reply)
# All other errors, return "Error: + error message" # All other errors, return "Error: + error message"
except Exception as e: except Exception as e:
print_to_console("Error: \n", Fore.RED, str(e)) call_stack = traceback.format_exc()
print_to_console("Error: \n", Fore.RED, call_stack)
def load_variables(config_file="config.yaml"):
# Load variables from yaml file if it exists
try:
with open(config_file) as file:
config = yaml.load(file, Loader=yaml.FullLoader)
ai_name = config.get("ai_name")
ai_role = config.get("ai_role")
ai_goals = config.get("ai_goals")
except FileNotFoundError:
ai_name = ""
ai_role = ""
ai_goals = []
# Prompt the user for input if config file is missing or empty values
if not ai_name:
ai_name = input("Name your AI: ")
if ai_name == "":
ai_name = "Entrepreneur-GPT"
if not ai_role:
ai_role = input(f"{ai_name} is: ")
if ai_role == "":
ai_role = "an AI designed to autonomously develop and run businesses with the sole goal of increasing your net worth."
if not ai_goals:
print("Enter up to 5 goals for your AI: ")
print("For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'")
print("Enter nothing to load defaults, enter nothing when finished.")
ai_goals = []
for i in range(5):
ai_goal = input(f"Goal {i+1}: ")
if ai_goal == "":
break
ai_goals.append(ai_goal)
if len(ai_goals) == 0:
ai_goals = ["Increase net worth", "Grow Twitter Account", "Develop and manage multiple businesses autonomously"]
# Save variables to yaml file
config = {"ai_name": ai_name, "ai_role": ai_role, "ai_goals": ai_goals}
with open(config_file, "w") as file:
documents = yaml.dump(config, file)
prompt = data.load_prompt()
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
# Construct full prompt
full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n"
for i, goal in enumerate(ai_goals):
full_prompt += f"{i+1}. {goal}\n"
full_prompt += f"\n\n{prompt}"
return full_prompt
def construct_prompt(): def construct_prompt():
"""Constructs the prompt for the AI""" """Construct the prompt for the AI to respond to"""
config = AIConfig.load()
if config.ai_name:
print_to_console(
f"Welcome back! ",
Fore.GREEN,
f"Would you like me to return to being {config.ai_name}?",
speak_text=True)
should_continue = input(f"""Continue with the last settings?
Name: {config.ai_name}
Role: {config.ai_role}
Goals: {config.ai_goals}
Continue (y/n): """)
if should_continue.lower() == "n":
config = AIConfig()
if not config.ai_name:
config = prompt_user()
config.save()
# Get rid of this global:
global ai_name global ai_name
ai_name = config.ai_name
full_prompt = config.construct_full_prompt()
return full_prompt
def prompt_user():
ai_name = ""
# Construct the prompt # Construct the prompt
print_to_console( print_to_console(
"Welcome to Auto-GPT! ", "Welcome to Auto-GPT! ",
@@ -142,7 +236,7 @@ def construct_prompt():
print_to_console( print_to_console(
"Enter up to 5 goals for your AI: ", "Enter up to 5 goals for your AI: ",
Fore.GREEN, Fore.GREEN,
"For example: \nIncrease net worth \nGrow Twitter Account \nDevelop and manage multiple businesses autonomously'") "For example: \nIncrease net worth, Grow Twitter Account, Develop and manage multiple businesses autonomously'")
print("Enter nothing to load defaults, enter nothing when finished.", flush=True) print("Enter nothing to load defaults, enter nothing when finished.", flush=True)
ai_goals = [] ai_goals = []
for i in range(5): for i in range(5):
@@ -154,19 +248,8 @@ def construct_prompt():
ai_goals = ["Increase net worth", "Grow Twitter Account", ai_goals = ["Increase net worth", "Grow Twitter Account",
"Develop and manage multiple businesses autonomously"] "Develop and manage multiple businesses autonomously"]
prompt = data.load_prompt() config = AIConfig(ai_name, ai_role, ai_goals)
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications.""" return config
# Construct full prompt
full_prompt = f"You are {ai_name}, {ai_role}\n{prompt_start}\n\nGOALS:\n\n"
for i, goal in enumerate(ai_goals):
full_prompt += f"{i+1}. {goal}\n"
full_prompt += f"\n\n{prompt}"
return full_prompt
# Check if the python script was executed with arguments, get those arguments
def parse_arguments(): def parse_arguments():
"""Parses the arguments passed to the script""" """Parses the arguments passed to the script"""
@@ -186,16 +269,20 @@ def parse_arguments():
cfg.set_speak_mode(True) cfg.set_speak_mode(True)
cfg = Config()
# TODO: Better argument parsing:
# TODO: fill in llm values here
cfg = Config()
parse_arguments() parse_arguments()
ai_name = "" ai_name = ""
prompt = construct_prompt() prompt = construct_prompt()
# print(prompt)
# Initialize variables # Initialize variables
full_message_history = [] full_message_history = []
token_limit = 6000 # The maximum number of tokens allowed in the API call
result = None result = None
user_input = "NEXT COMMAND" # Make a constant:
user_input = "Determine which next command to use, and respond using the format specified above:"
# Interaction Loop # Interaction Loop
while True: while True:
@@ -206,8 +293,9 @@ while True:
user_input, user_input,
full_message_history, full_message_history,
mem.permanent_memory, mem.permanent_memory,
token_limit) cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
# print("assistant reply: "+assistant_reply)
# Print Assistant thoughts # Print Assistant thoughts
print_assistant_thoughts(assistant_reply) print_assistant_thoughts(assistant_reply)
@@ -232,7 +320,7 @@ while True:
while True: while True:
console_input = input(Fore.MAGENTA + "Input:" + Style.RESET_ALL) console_input = input(Fore.MAGENTA + "Input:" + Style.RESET_ALL)
if console_input.lower() == "y": if console_input.lower() == "y":
user_input = "NEXT COMMAND" user_input = "GENERATE NEXT COMMAND JSON"
break break
elif console_input.lower() == "n": elif console_input.lower() == "n":
user_input = "EXIT" user_input = "EXIT"
@@ -240,7 +328,7 @@ while True:
else: else:
continue continue
if user_input != "NEXT COMMAND": if user_input != "GENERATE NEXT COMMAND JSON":
print("Exiting...", flush=True) print("Exiting...", flush=True)
break break
@@ -255,7 +343,7 @@ while True:
Fore.CYAN, Fore.CYAN,
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
# Exectute command # Execute command
if command_name.lower() != "error": if command_name.lower() != "error":
result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}" result = f"Command {command_name} returned: {cmd.execute_command(command_name, arguments)}"
else: else:

View File

@@ -1,8 +0,0 @@
beautifulsoup4==4.9.3
colorama==0.4.6
googlesearch_python==1.1.0
openai==0.27.0
playsound==1.2.2
readability_lxml==0.8.1
requests==2.25.1
docker==6.0.1

View File

@@ -1,16 +1,17 @@
import os import os
from playsound import playsound from playsound import playsound
import requests import requests
import keys from config import Config
cfg = Config()
# TODO: Nicer names for these ids
voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"] voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
tts_headers = { tts_headers = {
"Content-Type": "application/json", "Content-Type": "application/json",
"xi-api-key": keys.ELEVENLABS_API_KEY "xi-api-key": cfg.elevenlabs_api_key
} }
def say_text(text, voice_index=0): def say_text(text, voice_index=0):
"""Say text using ElevenLabs API""" """Say text using ElevenLabs API"""
tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format( tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format(

57
scripts/token_counter.py Normal file
View File

@@ -0,0 +1,57 @@
import tiktoken
from typing import List, Dict
def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5-turbo-0301") -> int:
"""
Returns the number of tokens used by a list of messages.
Args:
messages (list): A list of messages, each of which is a dictionary containing the role and content of the message.
model (str): The name of the model to use for tokenization. Defaults to "gpt-3.5-turbo-0301".
Returns:
int: The number of tokens used by the list of messages.
"""
try:
encoding = tiktoken.encoding_for_model(model)
except KeyError:
print("Warning: model not found. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
if model == "gpt-3.5-turbo":
# !Node: gpt-3.5-turbo may change over time. Returning num tokens assuming gpt-3.5-turbo-0301.")
return count_message_tokens(messages, model="gpt-3.5-turbo-0301")
elif model == "gpt-4":
# !Note: gpt-4 may change over time. Returning num tokens assuming gpt-4-0314.")
return count_message_tokens(messages, model="gpt-4-0314")
elif model == "gpt-3.5-turbo-0301":
tokens_per_message = 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
tokens_per_name = -1 # if there's a name, the role is omitted
elif model == "gpt-4-0314":
tokens_per_message = 3
tokens_per_name = 1
else:
raise NotImplementedError(f"""num_tokens_from_messages() is not implemented for model {model}. See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""")
num_tokens = 0
for message in messages:
num_tokens += tokens_per_message
for key, value in message.items():
num_tokens += len(encoding.encode(value))
if key == "name":
num_tokens += tokens_per_name
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens
def count_string_tokens(string: str, model_name: str) -> int:
"""
Returns the number of tokens in a text string.
Args:
string (str): The text string.
model_name (str): The name of the encoding to use. (e.g., "gpt-3.5-turbo")
Returns:
int: The number of tokens in the text string.
"""
encoding = tiktoken.encoding_for_model(model_name)
num_tokens = len(encoding.encode(string))
return num_tokens

115
tests/json_tests.py Normal file
View File

@@ -0,0 +1,115 @@
import unittest
import os
import sys
# Probably a better way:
sys.path.append(os.path.abspath('../scripts'))
from json_parser import fix_and_parse_json
class TestParseJson(unittest.TestCase):
def test_valid_json(self):
# Test that a valid JSON string is parsed correctly
json_str = '{"name": "John", "age": 30, "city": "New York"}'
obj = fix_and_parse_json(json_str)
self.assertEqual(obj, {"name": "John", "age": 30, "city": "New York"})
def test_invalid_json_minor(self):
# Test that an invalid JSON string can be fixed with gpt
json_str = '{"name": "John", "age": 30, "city": "New York",}'
self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), {"name": "John", "age": 30, "city": "New York"})
def test_invalid_json_major_with_gpt(self):
# Test that an invalid JSON string raises an error when try_to_fix_with_gpt is False
json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END'
self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=True), {"name": "John", "age": 30, "city": "New York"})
def test_invalid_json_major_without_gpt(self):
# Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
json_str = 'BEGIN: "name": "John" - "age": 30 - "city": "New York" :END'
# Assert that this raises an exception:
with self.assertRaises(Exception):
fix_and_parse_json(json_str, try_to_fix_with_gpt=False)
def test_invalid_json_leading_sentence_with_gpt(self):
# Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
json_str = """I suggest we start by browsing the repository to find any issues that we can fix.
{
"command": {
"name": "browse_website",
"args":{
"url": "https://github.com/Torantulino/Auto-GPT"
}
},
"thoughts":
{
"text": "I suggest we start browsing the repository to find any issues that we can fix.",
"reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.",
"plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes",
"criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.",
"speak": "I will start browsing the repository to find any issues we can fix."
}
}"""
good_obj = {
"command": {
"name": "browse_website",
"args":{
"url": "https://github.com/Torantulino/Auto-GPT"
}
},
"thoughts":
{
"text": "I suggest we start browsing the repository to find any issues that we can fix.",
"reasoning": "Browsing the repository will give us an idea of the current state of the codebase and identify any issues that we can address to improve the repo.",
"plan": "- Look through the repository to find any issues.\n- Investigate any issues to determine what needs to be fixed\n- Identify possible solutions to fix the issues\n- Open Pull Requests with fixes",
"criticism": "I should be careful while browsing so as not to accidentally introduce any new bugs or issues.",
"speak": "I will start browsing the repository to find any issues we can fix."
}
}
# Assert that this raises an exception:
self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj)
def test_invalid_json_leading_sentence_with_gpt(self):
# Test that a REALLY invalid JSON string raises an error when try_to_fix_with_gpt is False
json_str = """I will first need to browse the repository (https://github.com/Torantulino/Auto-GPT) and identify any potential bugs that need fixing. I will use the "browse_website" command for this.
{
"command": {
"name": "browse_website",
"args":{
"url": "https://github.com/Torantulino/Auto-GPT"
}
},
"thoughts":
{
"text": "Browsing the repository to identify potential bugs",
"reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.",
"plan": "- Analyze the repository for potential bugs and areas of improvement",
"criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.",
"speak": "I am browsing the repository to identify potential bugs."
}
}"""
good_obj = {
"command": {
"name": "browse_website",
"args":{
"url": "https://github.com/Torantulino/Auto-GPT"
}
},
"thoughts":
{
"text": "Browsing the repository to identify potential bugs",
"reasoning": "Before fixing bugs, I need to identify what needs fixing. I will use the 'browse_website' command to analyze the repository.",
"plan": "- Analyze the repository for potential bugs and areas of improvement",
"criticism": "I need to ensure I am thorough and pay attention to detail while browsing the repository.",
"speak": "I am browsing the repository to identify potential bugs."
}
}
# Assert that this raises an exception:
self.assertEqual(fix_and_parse_json(json_str, try_to_fix_with_gpt=False), good_obj)
if __name__ == '__main__':
unittest.main()