Fix merge conflicts

This commit is contained in:
Bernhard Mueller
2023-04-10 10:14:35 +07:00
16 changed files with 498 additions and 42 deletions

View File

@@ -9,4 +9,6 @@ CUSTOM_SEARCH_ENGINE_ID=
USE_AZURE=False USE_AZURE=False
OPENAI_API_BASE=your-base-url-for-azure OPENAI_API_BASE=your-base-url-for-azure
OPENAI_API_VERSION=api-version-for-azure OPENAI_API_VERSION=api-version-for-azure
OPENAI_DEPLOYMENT_ID=deployment-id-for-azure OPENAI_DEPLOYMENT_ID=deployment-id-for-azure
IMAGE_PROVIDER=dalle
HUGGINGFACE_API_TOKEN=

View File

@@ -9,7 +9,7 @@ Auto-GPT is an experimental open-source application showcasing the capabilities
https://user-images.githubusercontent.com/22963551/228855501-2f5777cf-755b-4407-a643-c7299e5b6419.mp4 https://user-images.githubusercontent.com/22963551/228855501-2f5777cf-755b-4407-a643-c7299e5b6419.mp4
## 💖 Help Fund Auto-GPT's Development <h2 align="center"> 💖 Help Fund Auto-GPT's Development 💖</h2>
<p align="center"> <p align="center">
If you can spare a coffee, you can help to cover the API costs of developing Auto-GPT and help push the boundaries of fully autonomous AI! If you can spare a coffee, you can help to cover the API costs of developing Auto-GPT and help push the boundaries of fully autonomous AI!
A full day of development can easily cost as much as $20 in API costs, which for a free project is quite limiting. A full day of development can easily cost as much as $20 in API costs, which for a free project is quite limiting.
@@ -17,14 +17,13 @@ Your support is greatly appreciated
</p> </p>
<p align="center"> <p align="center">
Development of this free, open-source project is made possible by all the <a href="https://github.com/Torantulino/Auto-GPT/graphs/contributors">contributors</a> and <a href="https://github.com/sponsors/Torantulino">sponsors</a>. If you'd like to sponsor this project and have your avatar or company logo appear below <a href="https://github.com/sponsors/Torantulino">click here</a>. 💖 Development of this free, open-source project is made possible by all the <a href="https://github.com/Torantulino/Auto-GPT/graphs/contributors">contributors</a> and <a href="https://github.com/sponsors/Torantulino">sponsors</a>. If you'd like to sponsor this project and have your avatar or company logo appear below <a href="https://github.com/sponsors/Torantulino">click here</a>.
<p align="center">
<p align="center">
<a href="https://github.com/thepok"><img src="https://github.com/thepok.png" width="50px" alt="thepok" /></a>&nbsp;&nbsp;<a href="https://github.com/SpacingLily"><img src="https://github.com/SpacingLily.png" width="50px" alt="SpacingLily" /></a>&nbsp;&nbsp;<a href="https://github.com/m"><img src="https://github.com/m.png" width="50px" alt="m" /></a>&nbsp;&nbsp;<a href="https://github.com/zkonduit"><img src="https://github.com/zkonduit.png" width="50px" alt="zkonduit" /></a>&nbsp;&nbsp;<a href="https://github.com/maxxflyer"><img src="https://github.com/maxxflyer.png" width="50px" alt="maxxflyer" /></a>&nbsp;&nbsp;<a href="https://github.com/tekelsey"><img src="https://github.com/tekelsey.png" width="50px" alt="tekelsey" /></a>&nbsp;&nbsp;<a href="https://github.com/nocodeclarity"><img src="https://github.com/nocodeclarity.png" width="50px" alt="nocodeclarity" /></a>&nbsp;&nbsp;<a href="https://github.com/tjarmain"><img src="https://github.com/tjarmain.png" width="50px" alt="tjarmain" /></a>&nbsp;&nbsp;<a href="https://github.com/alexisneuhaus"><img src="https://github.com/alexisneuhaus.png" width="50px" alt="alexisneuhaus" /></a>&nbsp;&nbsp;<a href="https://github.com/jaumebalust"><img src="https://github.com/jaumebalust.png" width="50px" alt="jaumebalust" /></a>&nbsp;&nbsp;<a href="https://github.com/robinicus"><img src="https://github.com/robinicus.png" width="50px" alt="robinicus" /></a>&nbsp;&nbsp;<a href="https://github.com/digisomni"><img src="https://github.com/digisomni.png" width="50px" alt="digisomni" /></a>&nbsp;&nbsp;
</p>
<h3 align="center">Individual Sponsors</h3>
<p align="center"> <p align="center">
<a href="https://github.com/alexisneuhaus"><img src="https://github.com/alexisneuhaus.png" width="30px" alt="alexisneuhaus" /></a>&nbsp;&nbsp;<a href="https://github.com/iokode"><img src="https://github.com/iokode.png" width="30px" alt="iokode" /></a>&nbsp;&nbsp;<a href="https://github.com/jaumebalust"><img src="https://github.com/jaumebalust.png" width="30px" alt="jaumebalust" /></a>&nbsp;&nbsp;<a href="https://github.com/nova-land"><img src="https://github.com/nova-land.png" width="30px" alt="nova-land" /></a>&nbsp;&nbsp;<a href="https://github.com/robinicus"><img src="https://github.com/robinicus.png" width="30px" alt="robinicus" /></a>&nbsp;&nbsp;<a href="https://github.com/Void-n-Null"><img src="https://github.com/Void-n-Null.png" width="30px" alt="Void-n-Null" /></a>&nbsp;&nbsp;<a href="https://github.com/ritesh24"><img src="https://github.com/ritesh24.png" width="30px" alt="ritesh24" /></a>&nbsp;&nbsp;<a href="https://github.com/merwanehamadi"><img src="https://github.com/merwanehamadi.png" width="30px" alt="merwanehamadi" /></a>&nbsp;&nbsp;<a href="https://github.com/raulmarindev"><img src="https://github.com/raulmarindev.png" width="30px" alt="raulmarindev" /></a>&nbsp;&nbsp;<a href="https://github.com/siduppal"><img src="https://github.com/siduppal.png" width="30px" alt="siduppal" /></a>&nbsp;&nbsp;<a href="https://github.com/goosecubedaddy"><img src="https://github.com/goosecubedaddy.png" width="30px" alt="goosecubedaddy" /></a>&nbsp;&nbsp;<a href="https://github.com/pleabargain"><img src="https://github.com/pleabargain.png" width="30px" alt="pleabargain" /></a>&nbsp;&nbsp; <a href="https://github.com/robinicus"><img src="https://github.com/robinicus.png" width="50px" alt="robinicus" /></a>&nbsp;&nbsp;<a href="https://github.com/prompthero"><img src="https://github.com/prompthero.png" width="50px" alt="prompthero" /></a>&nbsp;&nbsp;<a href="https://github.com/crizzler"><img src="https://github.com/crizzler.png" width="50px" alt="crizzler" /></a>&nbsp;&nbsp;<a href="https://github.com/tob-le-rone"><img src="https://github.com/tob-le-rone.png" width="50px" alt="tob-le-rone" /></a>&nbsp;&nbsp;<a href="https://github.com/FSTatSBS"><img src="https://github.com/FSTatSBS.png" width="50px" alt="FSTatSBS" /></a>&nbsp;&nbsp;<a href="https://github.com/toverly1"><img src="https://github.com/toverly1.png" width="50px" alt="toverly1" /></a>&nbsp;&nbsp;<a href="https://github.com/ddtarazona"><img src="https://github.com/ddtarazona.png" width="50px" alt="ddtarazona" /></a>&nbsp;&nbsp;<a href="https://github.com/Nalhos"><img src="https://github.com/Nalhos.png" width="50px" alt="Nalhos" /></a>&nbsp;&nbsp;<a href="https://github.com/Kazamario"><img src="https://github.com/Kazamario.png" width="50px" alt="Kazamario" /></a>&nbsp;&nbsp;<a href="https://github.com/pingbotan"><img src="https://github.com/pingbotan.png" width="50px" alt="pingbotan" /></a>&nbsp;&nbsp;<a href="https://github.com/indoor47"><img src="https://github.com/indoor47.png" width="50px" alt="indoor47" /></a>&nbsp;&nbsp;<a href="https://github.com/AuroraHolding"><img src="https://github.com/AuroraHolding.png" width="50px" alt="AuroraHolding" /></a>&nbsp;&nbsp;<a href="https://github.com/kreativai"><img src="https://github.com/kreativai.png" width="50px" alt="kreativai" /></a>&nbsp;&nbsp;<a href="https://github.com/hunteraraujo"><img src="https://github.com/hunteraraujo.png" width="50px" alt="hunteraraujo" /></a>&nbsp;&nbsp;<a href="https://github.com/Explorergt92"><img src="https://github.com/Explorergt92.png" width="50px" alt="Explorergt92" /></a>&nbsp;&nbsp;<a href="https://github.com/judegomila"><img src="https://github.com/judegomila.png" width="50px" alt="judegomila" /></a>&nbsp;&nbsp;
<a href="https://github.com/thepok"><img src="https://github.com/thepok.png" width="50px" alt="thepok" /></a>
&nbsp;&nbsp;<a href="https://github.com/SpacingLily"><img src="https://github.com/SpacingLily.png" width="50px" alt="SpacingLily" /></a>&nbsp;&nbsp;<a href="https://github.com/merwanehamadi"><img src="https://github.com/merwanehamadi.png" width="50px" alt="merwanehamadi" /></a>&nbsp;&nbsp;<a href="https://github.com/m"><img src="https://github.com/m.png" width="50px" alt="m" /></a>&nbsp;&nbsp;<a href="https://github.com/zkonduit"><img src="https://github.com/zkonduit.png" width="50px" alt="zkonduit" /></a>&nbsp;&nbsp;<a href="https://github.com/maxxflyer"><img src="https://github.com/maxxflyer.png" width="50px" alt="maxxflyer" /></a>&nbsp;&nbsp;<a href="https://github.com/tekelsey"><img src="https://github.com/tekelsey.png" width="50px" alt="tekelsey" /></a>&nbsp;&nbsp;<a href="https://github.com/digisomni"><img src="https://github.com/digisomni.png" width="50px" alt="digisomni" /></a>&nbsp;&nbsp;<a href="https://github.com/nocodeclarity"><img src="https://github.com/nocodeclarity.png" width="50px" alt="nocodeclarity" /></a>&nbsp;&nbsp;<a href="https://github.com/tjarmain"><img src="https://github.com/tjarmain.png" width="50px" alt="tjarmain" /></a>
</p> </p>
@@ -43,6 +42,7 @@ Your support is greatly appreciated
- [Setting up environment variables](#setting-up-environment-variables) - [Setting up environment variables](#setting-up-environment-variables)
- [💀 Continuous Mode ⚠️](#-continuous-mode-) - [💀 Continuous Mode ⚠️](#-continuous-mode-)
- [GPT3.5 ONLY Mode](#gpt35-only-mode) - [GPT3.5 ONLY Mode](#gpt35-only-mode)
- [🖼 Image Generation](#image-generation)
- [⚠️ Limitations](#-limitations) - [⚠️ Limitations](#-limitations)
- [🛡 Disclaimer](#-disclaimer) - [🛡 Disclaimer](#-disclaimer)
- [🐦 Connect with Us on Twitter](#-connect-with-us-on-twitter) - [🐦 Connect with Us on Twitter](#-connect-with-us-on-twitter)
@@ -57,7 +57,7 @@ Your support is greatly appreciated
- 🗃️ File storage and summarization with GPT-3.5 - 🗃️ File storage and summarization with GPT-3.5
## 📋 Requirements ## 📋 Requirements
- [Python 3.7 or later](https://www.tutorialspoint.com/how-to-install-python-in-windows) - [Python 3.8 or later](https://www.tutorialspoint.com/how-to-install-python-in-windows)
- OpenAI API key - OpenAI API key
- PINECONE API key - PINECONE API key
@@ -141,6 +141,40 @@ export CUSTOM_SEARCH_ENGINE_ID="YOUR_CUSTOM_SEARCH_ENGINE_ID"
``` ```
## Redis Setup
Install docker desktop.
Run:
```
docker run -d --name redis-stack-server -p 6379:6379 redis/redis-stack-server:latest
```
See https://hub.docker.com/r/redis/redis-stack-server for setting a password and additional configuration.
Set the following environment variables:
```
MEMORY_BACKEND=redis
REDIS_HOST=localhost
REDIS_PORT=6379
REDIS_PASSWORD=
```
Note that this is not intended to be run facing the internet and is not secure, do not expose redis to the internet without a password or at all really.
You can optionally set
```
WIPE_REDIS_ON_START=False
```
To persist memory stored in Redis.
You can specify the memory index for redis using the following:
````
MEMORY_INDEX=whatever
````
## 🌲 Pinecone API Key Setup ## 🌲 Pinecone API Key Setup
Pinecone enable a vector based memory so a vast memory can be stored and only relevant memories Pinecone enable a vector based memory so a vast memory can be stored and only relevant memories
@@ -170,6 +204,7 @@ Or you can set them in the `.env` file.
1. View memory usage by using the `--debug` flag :) 1. View memory usage by using the `--debug` flag :)
## 💀 Continuous Mode ⚠️ ## 💀 Continuous Mode ⚠️
Run the AI **without** user authorisation, 100% automated. Run the AI **without** user authorisation, 100% automated.
Continuous mode is not recommended. Continuous mode is not recommended.
@@ -188,6 +223,15 @@ If you don't have access to the GPT4 api, this mode will allow you to use Auto-G
python scripts/main.py --gpt3only python scripts/main.py --gpt3only
``` ```
## 🖼 Image Generation
By default, Auto-GPT uses DALL-e for image generation. To use Stable Diffusion, a [HuggingFace API Token](https://huggingface.co/settings/tokens) is required.
Once you have a token, set these variables in your `.env`:
```
IMAGE_PROVIDER=sd
HUGGINGFACE_API_TOKEN="YOUR_HUGGINGFACE_API_TOKEN"
```
## ⚠️ Limitations ## ⚠️ Limitations
This experiment aims to showcase the potential of GPT-4 but comes with some limitations: This experiment aims to showcase the potential of GPT-4 but comes with some limitations:

View File

@@ -12,3 +12,6 @@ docker
duckduckgo-search duckduckgo-search
google-api-python-client #(https://developers.google.com/custom-search/v1/overview) google-api-python-client #(https://developers.google.com/custom-search/v1/overview)
pinecone-client==2.2.1 pinecone-client==2.2.1
redis
orjson
Pillow

View File

@@ -1,6 +1,6 @@
import yaml import yaml
import data import data
import os
class AIConfig: class AIConfig:
def __init__(self, ai_name="", ai_role="", ai_goals=[]): def __init__(self, ai_name="", ai_role="", ai_goals=[]):
@@ -9,7 +9,7 @@ class AIConfig:
self.ai_goals = ai_goals self.ai_goals = ai_goals
# Soon this will go in a folder where it remembers more stuff about the run(s) # Soon this will go in a folder where it remembers more stuff about the run(s)
SAVE_FILE = "../ai_settings.yaml" SAVE_FILE = os.path.join(os.path.dirname(__file__), '..', 'ai_settings.yaml')
@classmethod @classmethod
def load(cls, config_file=SAVE_FILE): def load(cls, config_file=SAVE_FILE):

View File

@@ -26,8 +26,11 @@ def create_chat_message(role, content):
def generate_context(prompt, relevant_memory, full_message_history, model): def generate_context(prompt, relevant_memory, full_message_history, model):
current_context = [ current_context = [
create_chat_message( create_chat_message(
"system", prompt), create_chat_message( "system", prompt),
"system", f"Permanent memory: {relevant_memory}")] create_chat_message(
"system", f"The current time and date is {time.strftime('%c')}"),
create_chat_message(
"system", f"This reminds you of these events from your past:\n{relevant_memory}\n\n")]
# Add messages from the full message history until we reach the token limit # Add messages from the full message history until we reach the token limit
next_message_to_add_index = len(full_message_history) - 1 next_message_to_add_index = len(full_message_history) - 1
@@ -95,7 +98,7 @@ def chat_with_ai(
# Count the currently used tokens # Count the currently used tokens
current_tokens_used += tokens_to_add current_tokens_used += tokens_to_add
# Move to the next most recent message in the full message history # Move to the next most recent message in the full message history
next_message_to_add_index -= 1 next_message_to_add_index -= 1

View File

@@ -1,6 +1,6 @@
import browse import browse
import json import json
from memory import PineconeMemory from memory import get_memory
import datetime import datetime
import agent_manager as agents import agent_manager as agents
import speak import speak
@@ -9,6 +9,7 @@ import ai_functions as ai
from file_operations import read_file, write_to_file, append_to_file, delete_file, search_files from file_operations import read_file, write_to_file, append_to_file, delete_file, search_files
from execute_code import execute_python_file, exec_shell from execute_code import execute_python_file, exec_shell
from json_parser import fix_and_parse_json from json_parser import fix_and_parse_json
from image_gen import generate_image
from duckduckgo_search import ddg from duckduckgo_search import ddg
from googleapiclient.discovery import build from googleapiclient.discovery import build
from googleapiclient.errors import HttpError from googleapiclient.errors import HttpError
@@ -52,10 +53,11 @@ def get_command(response):
def execute_command(command_name, arguments): def execute_command(command_name, arguments):
memory = PineconeMemory() memory = get_memory(cfg)
try: try:
if command_name == "google": if command_name == "google":
# Check if the Google API key is set and use the official search method # Check if the Google API key is set and use the official search method
# If the API key is not set or has only whitespaces, use the unofficial search method # If the API key is not set or has only whitespaces, use the unofficial search method
if cfg.google_api_key and (cfg.google_api_key.strip() if cfg.google_api_key else None): if cfg.google_api_key and (cfg.google_api_key.strip() if cfg.google_api_key else None):
@@ -104,10 +106,12 @@ def execute_command(command_name, arguments):
return execute_python_file(arguments["file"]) return execute_python_file(arguments["file"])
elif command_name == "exec_shell": # Add this command elif command_name == "exec_shell": # Add this command
return exec_shell(arguments["command_line"]) return exec_shell(arguments["command_line"])
elif command_name == "generate_image":
return generate_image(arguments["prompt"])
elif command_name == "task_complete": elif command_name == "task_complete":
shutdown() shutdown()
else: else:
return f"Unknown command {command_name}" return f"Unknown command '{command_name}'. Please refer to the 'COMMANDS' list for availabe commands and only respond in the specified JSON format."
# All errors, return "Error: + error message" # All errors, return "Error: + error message"
except Exception as e: except Exception as e:
return "Error: " + str(e) return "Error: " + str(e)

View File

@@ -1,3 +1,4 @@
import abc
import os import os
import openai import openai
from dotenv import load_dotenv from dotenv import load_dotenv
@@ -5,7 +6,7 @@ from dotenv import load_dotenv
load_dotenv() load_dotenv()
class Singleton(type): class Singleton(abc.ABCMeta, type):
""" """
Singleton metaclass for ensuring only one instance of a class. Singleton metaclass for ensuring only one instance of a class.
""" """
@@ -20,12 +21,17 @@ class Singleton(type):
return cls._instances[cls] return cls._instances[cls]
class AbstractSingleton(abc.ABC, metaclass=Singleton):
pass
class Config(metaclass=Singleton): class Config(metaclass=Singleton):
""" """
Configuration class to store the state of bools for different scripts access. Configuration class to store the state of bools for different scripts access.
""" """
def __init__(self): def __init__(self):
self.debug = False
self.continuous_mode = False self.continuous_mode = False
self.speak_mode = False self.speak_mode = False
# TODO - make these models be self-contained, using langchain, so we can configure them once and call it good # TODO - make these models be self-contained, using langchain, so we can configure them once and call it good
@@ -53,10 +59,20 @@ class Config(metaclass=Singleton):
self.pinecone_api_key = os.getenv("PINECONE_API_KEY") self.pinecone_api_key = os.getenv("PINECONE_API_KEY")
self.pinecone_region = os.getenv("PINECONE_ENV") self.pinecone_region = os.getenv("PINECONE_ENV")
self.image_provider = os.getenv("IMAGE_PROVIDER")
self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN")
# User agent headers to use when browsing web # User agent headers to use when browsing web
# Some websites might just completely deny request with an error code if no user agent was found. # Some websites might just completely deny request with an error code if no user agent was found.
self.user_agent_header = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"} self.user_agent_header = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"}
self.redis_host = os.getenv("REDIS_HOST", "localhost")
self.redis_port = os.getenv("REDIS_PORT", "6379")
self.redis_password = os.getenv("REDIS_PASSWORD", "")
self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == 'True'
self.memory_index = os.getenv("MEMORY_INDEX", 'auto-gpt')
# Note that indexes must be created on db 0 in redis, this is not configureable.
self.memory_backend = os.getenv("MEMORY_BACKEND", 'local')
# Initialize the OpenAI API client # Initialize the OpenAI API client
openai.api_key = self.openai_api_key openai.api_key = self.openai_api_key
@@ -95,3 +111,6 @@ class Config(metaclass=Singleton):
def set_pinecone_region(self, value: str): def set_pinecone_region(self, value: str):
self.pinecone_region = value self.pinecone_region = value
def set_debug_mode(self, value: bool):
self.debug = value

View File

@@ -18,12 +18,13 @@ COMMANDS:
12. Append to file: "append_to_file", args: "file": "<file>", "text": "<text>" 12. Append to file: "append_to_file", args: "file": "<file>", "text": "<text>"
13. Delete file: "delete_file", args: "file": "<file>" 13. Delete file: "delete_file", args: "file": "<file>"
14. Search Files: "search_files", args: "directory": "<directory>" 14. Search Files: "search_files", args: "directory": "<directory>"
15. Evaluate Code: "evaluate_code", args: "code": "<full _code_string>" 15. Evaluate Code: "evaluate_code", args: "code": "<full_code_string>"
16. Get Improved Code: "improve_code", args: "suggestions": "<list_of_suggestions>", "code": "<full_code_string>" 16. Get Improved Code: "improve_code", args: "suggestions": "<list_of_suggestions>", "code": "<full_code_string>"
17. Write Tests: "write_tests", args: "code": "<full_code_string>", "focus": "<list_of_focus_areas>" 17. Write Tests: "write_tests", args: "code": "<full_code_string>", "focus": "<list_of_focus_areas>"
18. Execute Python File: "execute_python_file", args: "file": "<file>" 18. Execute Python File: "execute_python_file", args: "file": "<file>"
19. Execute Shell Command: "exec_shell", args: "command_line": "<command_line>". Remember only to use commands that terminate, interactive tools like vim are not supported! 19. Execute Shell Command: "exec_shell", args: "command_line": "<command_line>". Remember only to use commands that terminate, interactive tools like vim are not supported!
20. Task Complete (Shutdown): "task_complete", args: "reason": "<reason>" 20. Task Complete (Shutdown): "task_complete", args: "reason": "<reason>"
21. Generate Image: "generate_image", args: "prompt": "<prompt>"
RESOURCES: RESOURCES:

57
scripts/image_gen.py Normal file
View File

@@ -0,0 +1,57 @@
import requests
import io
import os.path
from PIL import Image
from config import Config
import uuid
import openai
from base64 import b64decode
cfg = Config()
working_directory = "auto_gpt_workspace"
def generate_image(prompt):
filename = str(uuid.uuid4()) + ".jpg"
# DALL-E
if cfg.image_provider == 'dalle':
openai.api_key = cfg.openai_api_key
response = openai.Image.create(
prompt=prompt,
n=1,
size="256x256",
response_format="b64_json",
)
print("Image Generated for prompt:" + prompt)
image_data = b64decode(response["data"][0]["b64_json"])
with open(working_directory + "/" + filename, mode="wb") as png:
png.write(image_data)
return "Saved to disk:" + filename
# STABLE DIFFUSION
elif cfg.image_provider == 'sd':
API_URL = "https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4"
headers = {"Authorization": "Bearer " + cfg.huggingface_api_token}
response = requests.post(API_URL, headers=headers, json={
"inputs": prompt,
})
image = Image.open(io.BytesIO(response.content))
print("Image Generated for prompt:" + prompt)
image.save(os.path.join(working_directory, filename))
return "Saved to disk:" + filename
else:
return "No Image Provider Set"

View File

@@ -40,7 +40,7 @@ def fix_and_parse_json(json_str: str, try_to_fix_with_gpt: bool = True):
if try_to_fix_with_gpt: if try_to_fix_with_gpt:
print(f"Warning: Failed to parse AI output, attempting to fix.\n If you see this warning frequently, it's likely that your prompt is confusing the AI. Try changing it up slightly.") print(f"Warning: Failed to parse AI output, attempting to fix.\n If you see this warning frequently, it's likely that your prompt is confusing the AI. Try changing it up slightly.")
# Now try to fix this up using the ai_functions # Now try to fix this up using the ai_functions
ai_fixed_json = fix_json(json_str, json_schema, False) ai_fixed_json = fix_json(json_str, json_schema, cfg.debug)
if ai_fixed_json != "failed": if ai_fixed_json != "failed":
return json.loads(ai_fixed_json) return json.loads(ai_fixed_json)
else: else:

View File

@@ -1,7 +1,7 @@
import json import json
import random import random
import commands as cmd import commands as cmd
from memory import PineconeMemory from memory import get_memory
import data import data
import chat import chat
from colorama import Fore, Style from colorama import Fore, Style
@@ -266,6 +266,10 @@ def parse_arguments():
print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED") print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
cfg.set_smart_llm_model(cfg.fast_llm_model) cfg.set_smart_llm_model(cfg.fast_llm_model)
if args.debug:
print_to_console("Debug Mode: ", Fore.GREEN, "ENABLED")
cfg.set_debug_mode(True)
# TODO: fill in llm values here # TODO: fill in llm values here
@@ -281,12 +285,9 @@ next_action_count = 0
# Make a constant: # Make a constant:
user_input = "Determine which next command to use, and respond using the format specified above:" user_input = "Determine which next command to use, and respond using the format specified above:"
# raise an exception if pinecone_api_key or region is not provided
if not cfg.pinecone_api_key or not cfg.pinecone_region: raise Exception("Please provide pinecone_api_key and pinecone_region")
# Initialize memory and make sure it is empty. # Initialize memory and make sure it is empty.
# this is particularly important for indexing and referencing pinecone memory # this is particularly important for indexing and referencing pinecone memory
memory = PineconeMemory() memory = get_memory(cfg, init=True)
memory.clear()
print('Using memory of type: ' + memory.__class__.__name__) print('Using memory of type: ' + memory.__class__.__name__)
# Interaction Loop # Interaction Loop
@@ -298,7 +299,7 @@ while True:
user_input, user_input,
full_message_history, full_message_history,
memory, memory,
cfg.fast_token_limit) # TODO: This hardcodes the model to use GPT3.5. Make this an argument cfg.fast_token_limit, cfg.debug) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
# Print Assistant thoughts # Print Assistant thoughts
print_assistant_thoughts(assistant_reply) print_assistant_thoughts(assistant_reply)
@@ -358,7 +359,7 @@ while True:
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}") f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}")
# Execute command # Execute command
if command_name.lower() == "error": if command_name.lower().startswith( "error" ):
result = f"Command {command_name} threw the following error: " + arguments result = f"Command {command_name} threw the following error: " + arguments
elif command_name == "human_feedback": elif command_name == "human_feedback":
result = f"Human feedback: {user_input}" result = f"Human feedback: {user_input}"

View File

@@ -0,0 +1,44 @@
from memory.local import LocalCache
try:
from memory.redismem import RedisMemory
except ImportError:
print("Redis not installed. Skipping import.")
RedisMemory = None
try:
from memory.pinecone import PineconeMemory
except ImportError:
print("Pinecone not installed. Skipping import.")
PineconeMemory = None
def get_memory(cfg, init=False):
memory = None
if cfg.memory_backend == "pinecone":
if not PineconeMemory:
print("Error: Pinecone is not installed. Please install pinecone"
" to use Pinecone as a memory backend.")
else:
memory = PineconeMemory(cfg)
if init:
memory.clear()
elif cfg.memory_backend == "redis":
if not RedisMemory:
print("Error: Redis is not installed. Please install redis-py to"
" use Redis as a memory backend.")
else:
memory = RedisMemory(cfg)
if memory is None:
memory = LocalCache(cfg)
if init:
memory.clear()
return memory
__all__ = [
"get_memory",
"LocalCache",
"RedisMemory",
"PineconeMemory",
]

31
scripts/memory/base.py Normal file
View File

@@ -0,0 +1,31 @@
"""Base class for memory providers."""
import abc
from config import AbstractSingleton
import openai
def get_ada_embedding(text):
text = text.replace("\n", " ")
return openai.Embedding.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"]
class MemoryProviderSingleton(AbstractSingleton):
@abc.abstractmethod
def add(self, data):
pass
@abc.abstractmethod
def get(self, data):
pass
@abc.abstractmethod
def clear(self):
pass
@abc.abstractmethod
def get_relevant(self, data, num_relevant=5):
pass
@abc.abstractmethod
def get_stats(self):
pass

114
scripts/memory/local.py Normal file
View File

@@ -0,0 +1,114 @@
import dataclasses
import orjson
from typing import Any, List, Optional
import numpy as np
import os
from memory.base import MemoryProviderSingleton, get_ada_embedding
EMBED_DIM = 1536
SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS
def create_default_embeddings():
return np.zeros((0, EMBED_DIM)).astype(np.float32)
@dataclasses.dataclass
class CacheContent:
texts: List[str] = dataclasses.field(default_factory=list)
embeddings: np.ndarray = dataclasses.field(
default_factory=create_default_embeddings
)
class LocalCache(MemoryProviderSingleton):
# on load, load our database
def __init__(self, cfg) -> None:
self.filename = f"{cfg.memory_index}.json"
if os.path.exists(self.filename):
with open(self.filename, 'rb') as f:
loaded = orjson.loads(f.read())
self.data = CacheContent(**loaded)
else:
self.data = CacheContent()
def add(self, text: str):
"""
Add text to our list of texts, add embedding as row to our
embeddings-matrix
Args:
text: str
Returns: None
"""
if 'Command Error:' in text:
return ""
self.data.texts.append(text)
embedding = get_ada_embedding(text)
vector = np.array(embedding).astype(np.float32)
vector = vector[np.newaxis, :]
self.data.embeddings = np.concatenate(
[
vector,
self.data.embeddings,
],
axis=0,
)
with open(self.filename, 'wb') as f:
out = orjson.dumps(
self.data,
option=SAVE_OPTIONS
)
f.write(out)
return text
def clear(self) -> str:
"""
Clears the redis server.
Returns: A message indicating that the memory has been cleared.
"""
self.data = CacheContent()
return "Obliviated"
def get(self, data: str) -> Optional[List[Any]]:
"""
Gets the data from the memory that is most relevant to the given data.
Args:
data: The data to compare to.
Returns: The most relevant data.
"""
return self.get_relevant(data, 1)
def get_relevant(self, text: str, k: int) -> List[Any]:
""""
matrix-vector mult to find score-for-each-row-of-matrix
get indices for top-k winning scores
return texts for those indices
Args:
text: str
k: int
Returns: List[str]
"""
embedding = get_ada_embedding(text)
scores = np.dot(self.data.embeddings, embedding)
top_k_indices = np.argsort(scores)[-k:][::-1]
return [self.data.texts[i] for i in top_k_indices]
def get_stats(self):
"""
Returns: The stats of the local cache.
"""
return len(self.data.texts), self.data.embeddings.shape

View File

@@ -1,21 +1,11 @@
from config import Config, Singleton
import pinecone import pinecone
import openai
cfg = Config() from memory.base import MemoryProviderSingleton, get_ada_embedding
def get_ada_embedding(text): class PineconeMemory(MemoryProviderSingleton):
text = text.replace("\n", " ") def __init__(self, cfg):
return openai.Embedding.create(input=[text], model="text-embedding-ada-002")["data"][0]["embedding"]
def get_text_from_embedding(embedding):
return openai.Embedding.retrieve(embedding, model="text-embedding-ada-002")["data"][0]["text"]
class PineconeMemory(metaclass=Singleton):
def __init__(self):
pinecone_api_key = cfg.pinecone_api_key pinecone_api_key = cfg.pinecone_api_key
pinecone_region = cfg.pinecone_region pinecone_region = cfg.pinecone_region
pinecone.init(api_key=pinecone_api_key, environment=pinecone_region) pinecone.init(api_key=pinecone_api_key, environment=pinecone_region)

143
scripts/memory/redismem.py Normal file
View File

@@ -0,0 +1,143 @@
"""Redis memory provider."""
from typing import Any, List, Optional
import redis
from redis.commands.search.field import VectorField, TextField
from redis.commands.search.query import Query
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
import numpy as np
from memory.base import MemoryProviderSingleton, get_ada_embedding
SCHEMA = [
TextField("data"),
VectorField(
"embedding",
"HNSW",
{
"TYPE": "FLOAT32",
"DIM": 1536,
"DISTANCE_METRIC": "COSINE"
}
),
]
class RedisMemory(MemoryProviderSingleton):
def __init__(self, cfg):
"""
Initializes the Redis memory provider.
Args:
cfg: The config object.
Returns: None
"""
redis_host = cfg.redis_host
redis_port = cfg.redis_port
redis_password = cfg.redis_password
self.dimension = 1536
self.redis = redis.Redis(
host=redis_host,
port=redis_port,
password=redis_password,
db=0 # Cannot be changed
)
self.cfg = cfg
if cfg.wipe_redis_on_start:
self.redis.flushall()
try:
self.redis.ft(f"{cfg.memory_index}").create_index(
fields=SCHEMA,
definition=IndexDefinition(
prefix=[f"{cfg.memory_index}:"],
index_type=IndexType.HASH
)
)
except Exception as e:
print("Error creating Redis search index: ", e)
existing_vec_num = self.redis.get(f'{cfg.memory_index}-vec_num')
self.vec_num = int(existing_vec_num.decode('utf-8')) if\
existing_vec_num else 0
def add(self, data: str) -> str:
"""
Adds a data point to the memory.
Args:
data: The data to add.
Returns: Message indicating that the data has been added.
"""
if 'Command Error:' in data:
return ""
vector = get_ada_embedding(data)
vector = np.array(vector).astype(np.float32).tobytes()
data_dict = {
b"data": data,
"embedding": vector
}
pipe = self.redis.pipeline()
pipe.hset(f"{self.cfg.memory_index}:{self.vec_num}", mapping=data_dict)
_text = f"Inserting data into memory at index: {self.vec_num}:\n"\
f"data: {data}"
self.vec_num += 1
pipe.set(f'{self.cfg.memory_index}-vec_num', self.vec_num)
pipe.execute()
return _text
def get(self, data: str) -> Optional[List[Any]]:
"""
Gets the data from the memory that is most relevant to the given data.
Args:
data: The data to compare to.
Returns: The most relevant data.
"""
return self.get_relevant(data, 1)
def clear(self) -> str:
"""
Clears the redis server.
Returns: A message indicating that the memory has been cleared.
"""
self.redis.flushall()
return "Obliviated"
def get_relevant(
self,
data: str,
num_relevant: int = 5
) -> Optional[List[Any]]:
"""
Returns all the data in the memory that is relevant to the given data.
Args:
data: The data to compare to.
num_relevant: The number of relevant data to return.
Returns: A list of the most relevant data.
"""
query_embedding = get_ada_embedding(data)
base_query = f"*=>[KNN {num_relevant} @embedding $vector AS vector_score]"
query = Query(base_query).return_fields(
"data",
"vector_score"
).sort_by("vector_score").dialect(2)
query_vector = np.array(query_embedding).astype(np.float32).tobytes()
try:
results = self.redis.ft(f"{self.cfg.memory_index}").search(
query, query_params={"vector": query_vector}
)
except Exception as e:
print("Error calling Redis search: ", e)
return None
return [result.data for result in results.docs]
def get_stats(self):
"""
Returns: The stats of the memory index.
"""
return self.redis.ft(f"{self.cfg.memory_index}").info()