mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-18 22:44:21 +01:00
isort implemented
This commit is contained in:
@@ -1,12 +1,15 @@
|
||||
"""Main script for the autogpt package."""
|
||||
import logging
|
||||
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.args import parse_arguments
|
||||
from autogpt.config import Config, check_openai_api_key
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.prompt import construct_prompt
|
||||
|
||||
# Load environment variables from .env file
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
from colorama import Fore, Style
|
||||
from autogpt.app import execute_command, get_command
|
||||
|
||||
from autogpt.app import execute_command, get_command
|
||||
from autogpt.chat import chat_with_ai, create_chat_message
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_fixes.master_json_fix_method import fix_json_using_multiple_techniques
|
||||
@@ -84,7 +84,7 @@ class Agent:
|
||||
|
||||
# Print Assistant thoughts
|
||||
if assistant_reply_json != {}:
|
||||
validate_json(assistant_reply_json, 'llm_response_format_1')
|
||||
validate_json(assistant_reply_json, "llm_response_format_1")
|
||||
# Get command name and arguments
|
||||
try:
|
||||
print_assistant_thoughts(self.ai_name, assistant_reply_json)
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
"""Agent manager for managing GPT agents"""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Union
|
||||
from autogpt.llm_utils import create_chat_completion
|
||||
|
||||
from autogpt.config.config import Singleton
|
||||
from autogpt.llm_utils import create_chat_completion
|
||||
|
||||
|
||||
class AgentManager(metaclass=Singleton):
|
||||
|
||||
@@ -1,15 +1,10 @@
|
||||
""" Command and Control """
|
||||
import json
|
||||
from typing import List, NoReturn, Union, Dict
|
||||
from typing import Dict, List, NoReturn, Union
|
||||
|
||||
from autogpt.agent.agent_manager import AgentManager
|
||||
from autogpt.commands.evaluate_code import evaluate_code
|
||||
from autogpt.commands.google_search import google_official_search, google_search
|
||||
from autogpt.commands.improve_code import improve_code
|
||||
from autogpt.commands.write_tests import write_tests
|
||||
from autogpt.config import Config
|
||||
from autogpt.commands.image_gen import generate_image
|
||||
from autogpt.commands.audio_text import read_audio_from_file
|
||||
from autogpt.commands.web_requests import scrape_links, scrape_text
|
||||
from autogpt.commands.evaluate_code import evaluate_code
|
||||
from autogpt.commands.execute_code import (
|
||||
execute_python_file,
|
||||
execute_shell,
|
||||
@@ -18,19 +13,24 @@ from autogpt.commands.execute_code import (
|
||||
from autogpt.commands.file_operations import (
|
||||
append_to_file,
|
||||
delete_file,
|
||||
download_file,
|
||||
read_file,
|
||||
search_files,
|
||||
write_to_file,
|
||||
download_file
|
||||
)
|
||||
from autogpt.commands.git_operations import clone_repository
|
||||
from autogpt.commands.google_search import google_official_search, google_search
|
||||
from autogpt.commands.image_gen import generate_image
|
||||
from autogpt.commands.improve_code import improve_code
|
||||
from autogpt.commands.twitter import send_tweet
|
||||
from autogpt.commands.web_requests import scrape_links, scrape_text
|
||||
from autogpt.commands.web_selenium import browse_website
|
||||
from autogpt.commands.write_tests import write_tests
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_fixes.parsing import fix_and_parse_json
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.processing.text import summarize_text
|
||||
from autogpt.speech import say_text
|
||||
from autogpt.commands.web_selenium import browse_website
|
||||
from autogpt.commands.git_operations import clone_repository
|
||||
from autogpt.commands.twitter import send_tweet
|
||||
|
||||
|
||||
CFG = Config()
|
||||
AGENT_MANAGER = AgentManager()
|
||||
@@ -133,11 +133,14 @@ def execute_command(command_name: str, arguments):
|
||||
|
||||
# google_result can be a list or a string depending on the search results
|
||||
if isinstance(google_result, list):
|
||||
safe_message = [google_result_single.encode('utf-8', 'ignore') for google_result_single in google_result]
|
||||
safe_message = [
|
||||
google_result_single.encode("utf-8", "ignore")
|
||||
for google_result_single in google_result
|
||||
]
|
||||
else:
|
||||
safe_message = google_result.encode('utf-8', 'ignore')
|
||||
safe_message = google_result.encode("utf-8", "ignore")
|
||||
|
||||
return safe_message.decode('utf-8')
|
||||
return safe_message.decode("utf-8")
|
||||
elif command_name == "memory_add":
|
||||
memory = get_memory(CFG)
|
||||
return memory.add(arguments["string"])
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
"""This module contains the argument parsing logic for the script."""
|
||||
import argparse
|
||||
|
||||
from colorama import Fore, Back, Style
|
||||
from colorama import Back, Fore, Style
|
||||
|
||||
from autogpt import utils
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
@@ -64,10 +65,10 @@ def parse_arguments() -> None:
|
||||
" skip the re-prompt.",
|
||||
)
|
||||
parser.add_argument(
|
||||
'--allow-downloads',
|
||||
action='store_true',
|
||||
dest='allow_downloads',
|
||||
help='Dangerous: Allows Auto-GPT to download files natively.'
|
||||
"--allow-downloads",
|
||||
action="store_true",
|
||||
dest="allow_downloads",
|
||||
help="Dangerous: Allows Auto-GPT to download files natively.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -141,10 +142,17 @@ def parse_arguments() -> None:
|
||||
|
||||
if args.allow_downloads:
|
||||
logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED")
|
||||
logger.typewriter_log("WARNING: ", Fore.YELLOW,
|
||||
f"{Back.LIGHTYELLOW_EX}Auto-GPT will now be able to download and save files to your machine.{Back.RESET} " +
|
||||
"It is recommended that you monitor any files it downloads carefully.")
|
||||
logger.typewriter_log("WARNING: ", Fore.YELLOW, f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}")
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.YELLOW,
|
||||
f"{Back.LIGHTYELLOW_EX}Auto-GPT will now be able to download and save files to your machine.{Back.RESET} "
|
||||
+ "It is recommended that you monitor any files it downloads carefully.",
|
||||
)
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
Fore.YELLOW,
|
||||
f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}",
|
||||
)
|
||||
CFG.allow_downloads = True
|
||||
|
||||
if args.browser_name:
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import requests
|
||||
import json
|
||||
|
||||
import requests
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.workspace import path_in_workspace
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import subprocess
|
||||
import docker
|
||||
from docker.errors import ImageNotFound
|
||||
|
||||
from autogpt.workspace import path_in_workspace, WORKSPACE_PATH
|
||||
from autogpt.workspace import WORKSPACE_PATH, path_in_workspace
|
||||
|
||||
|
||||
def execute_python_file(file: str) -> str:
|
||||
|
||||
@@ -5,14 +5,14 @@ import os
|
||||
import os.path
|
||||
from pathlib import Path
|
||||
from typing import Generator, List
|
||||
|
||||
import requests
|
||||
from requests.adapters import HTTPAdapter
|
||||
from requests.adapters import Retry
|
||||
from colorama import Fore, Back
|
||||
from colorama import Back, Fore
|
||||
from requests.adapters import HTTPAdapter, Retry
|
||||
|
||||
from autogpt.spinner import Spinner
|
||||
from autogpt.utils import readable_file_size
|
||||
from autogpt.workspace import path_in_workspace, WORKSPACE_PATH
|
||||
|
||||
from autogpt.workspace import WORKSPACE_PATH, path_in_workspace
|
||||
|
||||
LOG_FILE = "file_logger.txt"
|
||||
LOG_FILE_PATH = WORKSPACE_PATH / LOG_FILE
|
||||
@@ -47,7 +47,7 @@ def log_operation(operation: str, filename: str) -> None:
|
||||
with open(LOG_FILE_PATH, "w", encoding="utf-8") as f:
|
||||
f.write("File Operation Logger ")
|
||||
|
||||
append_to_file(LOG_FILE, log_entry, shouldLog = False)
|
||||
append_to_file(LOG_FILE, log_entry, shouldLog=False)
|
||||
|
||||
|
||||
def split_file(
|
||||
@@ -241,23 +241,23 @@ def download_file(url, filename):
|
||||
session = requests.Session()
|
||||
retry = Retry(total=3, backoff_factor=1, status_forcelist=[502, 503, 504])
|
||||
adapter = HTTPAdapter(max_retries=retry)
|
||||
session.mount('http://', adapter)
|
||||
session.mount('https://', adapter)
|
||||
session.mount("http://", adapter)
|
||||
session.mount("https://", adapter)
|
||||
|
||||
total_size = 0
|
||||
downloaded_size = 0
|
||||
|
||||
with session.get(url, allow_redirects=True, stream=True) as r:
|
||||
r.raise_for_status()
|
||||
total_size = int(r.headers.get('Content-Length', 0))
|
||||
total_size = int(r.headers.get("Content-Length", 0))
|
||||
downloaded_size = 0
|
||||
|
||||
with open(safe_filename, 'wb') as f:
|
||||
with open(safe_filename, "wb") as f:
|
||||
for chunk in r.iter_content(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
downloaded_size += len(chunk)
|
||||
|
||||
# Update the progress message
|
||||
# Update the progress message
|
||||
progress = f"{readable_file_size(downloaded_size)} / {readable_file_size(total_size)}"
|
||||
spinner.update_message(f"{message} {progress}")
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"""Git operations for autogpt"""
|
||||
import git
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.workspace import path_in_workspace
|
||||
|
||||
|
||||
@@ -7,6 +7,7 @@ from base64 import b64decode
|
||||
import openai
|
||||
import requests
|
||||
from PIL import Image
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.workspace import path_in_workspace
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import tweepy
|
||||
import os
|
||||
|
||||
import tweepy
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
|
||||
@@ -8,6 +8,7 @@ except ImportError:
|
||||
"Playwright not installed. Please install it with 'pip install playwright' to use."
|
||||
)
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
|
||||
|
||||
|
||||
@@ -4,9 +4,9 @@ from __future__ import annotations
|
||||
from urllib.parse import urljoin, urlparse
|
||||
|
||||
import requests
|
||||
from requests.compat import urljoin
|
||||
from requests import Response
|
||||
from bs4 import BeautifulSoup
|
||||
from requests import Response
|
||||
from requests.compat import urljoin
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.memory import get_memory
|
||||
@@ -79,7 +79,7 @@ def check_local_file_access(url: str) -> bool:
|
||||
"http://0000",
|
||||
"http://0000/",
|
||||
"https://0000",
|
||||
"https://0000/"
|
||||
"https://0000/",
|
||||
]
|
||||
return any(url.startswith(prefix) for prefix in local_prefixes)
|
||||
|
||||
|
||||
@@ -1,24 +1,26 @@
|
||||
"""Selenium web scraping module."""
|
||||
from __future__ import annotations
|
||||
|
||||
from selenium import webdriver
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
import autogpt.processing.text as summary
|
||||
from bs4 import BeautifulSoup
|
||||
from selenium.webdriver.remote.webdriver import WebDriver
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.support.wait import WebDriverWait
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
from webdriver_manager.chrome import ChromeDriverManager
|
||||
from webdriver_manager.firefox import GeckoDriverManager
|
||||
from selenium.webdriver.chrome.options import Options as ChromeOptions
|
||||
from selenium.webdriver.firefox.options import Options as FirefoxOptions
|
||||
from selenium.webdriver.safari.options import Options as SafariOptions
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from autogpt.config import Config
|
||||
from sys import platform
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.chrome.options import Options as ChromeOptions
|
||||
from selenium.webdriver.common.by import By
|
||||
from selenium.webdriver.firefox.options import Options as FirefoxOptions
|
||||
from selenium.webdriver.remote.webdriver import WebDriver
|
||||
from selenium.webdriver.safari.options import Options as SafariOptions
|
||||
from selenium.webdriver.support import expected_conditions as EC
|
||||
from selenium.webdriver.support.wait import WebDriverWait
|
||||
from webdriver_manager.chrome import ChromeDriverManager
|
||||
from webdriver_manager.firefox import GeckoDriverManager
|
||||
|
||||
import autogpt.processing.text as summary
|
||||
from autogpt.config import Config
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
|
||||
FILE_DIR = Path(__file__).parent.parent
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
|
||||
from autogpt.llm_utils import call_ai_function
|
||||
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
This module contains the configuration classes for AutoGPT.
|
||||
"""
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.config.config import check_openai_api_key, Config
|
||||
from autogpt.config.config import Config, check_openai_api_key
|
||||
from autogpt.config.singleton import AbstractSingleton, Singleton
|
||||
|
||||
__all__ = [
|
||||
|
||||
@@ -6,6 +6,7 @@ from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Type
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
"""Configuration class to store the state of bools for different scripts access."""
|
||||
import os
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt.config.singleton import Singleton
|
||||
|
||||
import openai
|
||||
import yaml
|
||||
|
||||
from colorama import Fore
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from autogpt.config.singleton import Singleton
|
||||
|
||||
load_dotenv(verbose=True)
|
||||
|
||||
|
||||
@@ -74,7 +73,9 @@ class Config(metaclass=Singleton):
|
||||
self.weaviate_scopes = os.getenv("WEAVIATE_SCOPES", None)
|
||||
self.weaviate_embedded_path = os.getenv("WEAVIATE_EMBEDDED_PATH")
|
||||
self.weaviate_api_key = os.getenv("WEAVIATE_API_KEY", None)
|
||||
self.use_weaviate_embedded = os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True"
|
||||
self.use_weaviate_embedded = (
|
||||
os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True"
|
||||
)
|
||||
|
||||
# milvus configuration, e.g., localhost:19530.
|
||||
self.milvus_addr = os.getenv("MILVUS_ADDR", "localhost:19530")
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
"""This module contains the function to fix JSON strings using GPT-3."""
|
||||
import json
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm_utils import call_ai_function
|
||||
from autogpt.logs import logger
|
||||
from autogpt.config import Config
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ from __future__ import annotations
|
||||
import contextlib
|
||||
import json
|
||||
from typing import Optional
|
||||
|
||||
from autogpt.config import Config
|
||||
|
||||
CFG = Config()
|
||||
|
||||
@@ -3,13 +3,15 @@ from typing import Any, Dict
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.speech import say_text
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]:
|
||||
from autogpt.json_fixes.parsing import attempt_to_fix_json_by_finding_outermost_brackets
|
||||
|
||||
from autogpt.json_fixes.parsing import fix_and_parse_json
|
||||
from autogpt.json_fixes.parsing import (
|
||||
attempt_to_fix_json_by_finding_outermost_brackets,
|
||||
fix_and_parse_json,
|
||||
)
|
||||
|
||||
# Parse and print Assistant response
|
||||
assistant_reply_json = fix_and_parse_json(assistant_reply)
|
||||
@@ -21,7 +23,10 @@ def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]:
|
||||
if assistant_reply_json != {}:
|
||||
return assistant_reply_json
|
||||
|
||||
logger.error("Error: The following AI output couldn't be converted to a JSON:\n", assistant_reply)
|
||||
logger.error(
|
||||
"Error: The following AI output couldn't be converted to a JSON:\n",
|
||||
assistant_reply,
|
||||
)
|
||||
if CFG.speak_mode:
|
||||
say_text("I have received an invalid JSON response from the OpenAI API.")
|
||||
|
||||
|
||||
@@ -4,8 +4,10 @@ from __future__ import annotations
|
||||
import contextlib
|
||||
import json
|
||||
from typing import Any, Dict, Union
|
||||
|
||||
from colorama import Fore
|
||||
from regex import regex
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_fixes.auto_fix import fix_json
|
||||
from autogpt.json_fixes.bracket_termination import balance_braces
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
import json
|
||||
|
||||
from jsonschema import Draft7Validator
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
@@ -19,7 +21,9 @@ def validate_json(json_object: object, schema_name: object) -> object:
|
||||
if errors := sorted(validator.iter_errors(json_object), key=lambda e: e.path):
|
||||
logger.error("The JSON object is invalid.")
|
||||
if CFG.debug_mode:
|
||||
logger.error(json.dumps(json_object, indent=4)) # Replace 'json_object' with the variable containing the JSON data
|
||||
logger.error(
|
||||
json.dumps(json_object, indent=4)
|
||||
) # Replace 'json_object' with the variable containing the JSON data
|
||||
logger.error("The following issues were found:")
|
||||
|
||||
for error in errors:
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from ast import List
|
||||
import time
|
||||
from ast import List
|
||||
|
||||
import openai
|
||||
from openai.error import APIError, RateLimitError
|
||||
from colorama import Fore, Style
|
||||
from openai.error import APIError, RateLimitError
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
@@ -105,8 +105,9 @@ def create_chat_completion(
|
||||
)
|
||||
if not warned_user:
|
||||
logger.double_check(
|
||||
f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. " +
|
||||
f"You can read more here: {Fore.CYAN}https://github.com/Significant-Gravitas/Auto-GPT#openai-api-keys-configuration{Fore.RESET}")
|
||||
f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. "
|
||||
+ f"You can read more here: {Fore.CYAN}https://github.com/Significant-Gravitas/Auto-GPT#openai-api-keys-configuration{Fore.RESET}"
|
||||
)
|
||||
warned_user = True
|
||||
except APIError as e:
|
||||
if e.http_status == 502:
|
||||
@@ -125,8 +126,8 @@ def create_chat_completion(
|
||||
logger.typewriter_log(
|
||||
"FAILED TO GET RESPONSE FROM OPENAI",
|
||||
Fore.RED,
|
||||
"Auto-GPT has failed to get a response from OpenAI's services. " +
|
||||
f"Try running Auto-GPT again, and if the problem the persists try running it with `{Fore.CYAN}--debug{Fore.RESET}`."
|
||||
"Auto-GPT has failed to get a response from OpenAI's services. "
|
||||
+ f"Try running Auto-GPT again, and if the problem the persists try running it with `{Fore.CYAN}--debug{Fore.RESET}`.",
|
||||
)
|
||||
logger.double_check()
|
||||
if CFG.debug_mode:
|
||||
|
||||
@@ -5,13 +5,13 @@ import os
|
||||
import random
|
||||
import re
|
||||
import time
|
||||
from logging import LogRecord
|
||||
import traceback
|
||||
from logging import LogRecord
|
||||
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.speech import say_text
|
||||
from autogpt.config import Config, Singleton
|
||||
from autogpt.speech import say_text
|
||||
|
||||
CFG = Config()
|
||||
|
||||
@@ -47,7 +47,7 @@ class Logger(metaclass=Singleton):
|
||||
|
||||
# Info handler in activity.log
|
||||
self.file_handler = logging.FileHandler(
|
||||
os.path.join(log_dir, log_file), 'a', 'utf-8'
|
||||
os.path.join(log_dir, log_file), "a", "utf-8"
|
||||
)
|
||||
self.file_handler.setLevel(logging.DEBUG)
|
||||
info_formatter = AutoGptFormatter(
|
||||
@@ -57,7 +57,7 @@ class Logger(metaclass=Singleton):
|
||||
|
||||
# Error handler error.log
|
||||
error_handler = logging.FileHandler(
|
||||
os.path.join(log_dir, error_file), 'a', 'utf-8'
|
||||
os.path.join(log_dir, error_file), "a", "utf-8"
|
||||
)
|
||||
error_handler.setLevel(logging.ERROR)
|
||||
error_formatter = AutoGptFormatter(
|
||||
@@ -79,7 +79,7 @@ class Logger(metaclass=Singleton):
|
||||
self.logger.setLevel(logging.DEBUG)
|
||||
|
||||
def typewriter_log(
|
||||
self, title="", title_color="", content="", speak_text=False, level=logging.INFO
|
||||
self, title="", title_color="", content="", speak_text=False, level=logging.INFO
|
||||
):
|
||||
if speak_text and CFG.speak_mode:
|
||||
say_text(f"{title}. {content}")
|
||||
@@ -95,18 +95,18 @@ class Logger(metaclass=Singleton):
|
||||
)
|
||||
|
||||
def debug(
|
||||
self,
|
||||
message,
|
||||
title="",
|
||||
title_color="",
|
||||
self,
|
||||
message,
|
||||
title="",
|
||||
title_color="",
|
||||
):
|
||||
self._log(title, title_color, message, logging.DEBUG)
|
||||
|
||||
def warn(
|
||||
self,
|
||||
message,
|
||||
title="",
|
||||
title_color="",
|
||||
self,
|
||||
message,
|
||||
title="",
|
||||
title_color="",
|
||||
):
|
||||
self._log(title, title_color, message, logging.WARN)
|
||||
|
||||
@@ -180,10 +180,10 @@ class AutoGptFormatter(logging.Formatter):
|
||||
def format(self, record: LogRecord) -> str:
|
||||
if hasattr(record, "color"):
|
||||
record.title_color = (
|
||||
getattr(record, "color")
|
||||
+ getattr(record, "title")
|
||||
+ " "
|
||||
+ Style.RESET_ALL
|
||||
getattr(record, "color")
|
||||
+ getattr(record, "title")
|
||||
+ " "
|
||||
+ Style.RESET_ALL
|
||||
)
|
||||
else:
|
||||
record.title_color = getattr(record, "title")
|
||||
@@ -294,7 +294,9 @@ def print_assistant_thoughts(ai_name, assistant_reply):
|
||||
logger.error("Error: \n", call_stack)
|
||||
|
||||
|
||||
def print_assistant_thoughts(ai_name: object, assistant_reply_json_valid: object) -> None:
|
||||
def print_assistant_thoughts(
|
||||
ai_name: object, assistant_reply_json_valid: object
|
||||
) -> None:
|
||||
assistant_thoughts_reasoning = None
|
||||
assistant_thoughts_plan = None
|
||||
assistant_thoughts_speak = None
|
||||
@@ -310,9 +312,7 @@ def print_assistant_thoughts(ai_name: object, assistant_reply_json_valid: object
|
||||
logger.typewriter_log(
|
||||
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}"
|
||||
)
|
||||
logger.typewriter_log(
|
||||
"REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}"
|
||||
)
|
||||
logger.typewriter_log("REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}")
|
||||
if assistant_thoughts_plan:
|
||||
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
|
||||
# If it's a list, join it into a string
|
||||
@@ -326,9 +326,7 @@ def print_assistant_thoughts(ai_name: object, assistant_reply_json_valid: object
|
||||
for line in lines:
|
||||
line = line.lstrip("- ")
|
||||
logger.typewriter_log("- ", Fore.GREEN, line.strip())
|
||||
logger.typewriter_log(
|
||||
"CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}"
|
||||
)
|
||||
logger.typewriter_log("CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}")
|
||||
# Speak the assistant's thoughts
|
||||
if CFG.speak_mode and assistant_thoughts_speak:
|
||||
say_text(assistant_thoughts_speak)
|
||||
|
||||
@@ -1,11 +1,5 @@
|
||||
""" Milvus memory storage provider."""
|
||||
from pymilvus import (
|
||||
connections,
|
||||
FieldSchema,
|
||||
CollectionSchema,
|
||||
DataType,
|
||||
Collection,
|
||||
)
|
||||
from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections
|
||||
|
||||
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
import pinecone
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.llm_utils import create_embedding_with_ada
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.base import MemoryProviderSingleton
|
||||
from autogpt.llm_utils import create_embedding_with_ada
|
||||
|
||||
|
||||
class PineconeMemory(MemoryProviderSingleton):
|
||||
|
||||
@@ -10,9 +10,9 @@ from redis.commands.search.field import TextField, VectorField
|
||||
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
|
||||
from redis.commands.search.query import Query
|
||||
|
||||
from autogpt.llm_utils import create_embedding_with_ada
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.base import MemoryProviderSingleton
|
||||
from autogpt.llm_utils import create_embedding_with_ada
|
||||
|
||||
SCHEMA = [
|
||||
TextField("data"),
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
from autogpt.config import Config
|
||||
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
|
||||
import uuid
|
||||
|
||||
import weaviate
|
||||
from weaviate import Client
|
||||
from weaviate.embedded import EmbeddedOptions
|
||||
from weaviate.util import generate_uuid5
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
|
||||
|
||||
|
||||
def default_schema(weaviate_index):
|
||||
return {
|
||||
@@ -14,7 +16,7 @@ def default_schema(weaviate_index):
|
||||
{
|
||||
"name": "raw_text",
|
||||
"dataType": ["text"],
|
||||
"description": "original text for the embedding"
|
||||
"description": "original text for the embedding",
|
||||
}
|
||||
],
|
||||
}
|
||||
@@ -24,16 +26,20 @@ class WeaviateMemory(MemoryProviderSingleton):
|
||||
def __init__(self, cfg):
|
||||
auth_credentials = self._build_auth_credentials(cfg)
|
||||
|
||||
url = f'{cfg.weaviate_protocol}://{cfg.weaviate_host}:{cfg.weaviate_port}'
|
||||
url = f"{cfg.weaviate_protocol}://{cfg.weaviate_host}:{cfg.weaviate_port}"
|
||||
|
||||
if cfg.use_weaviate_embedded:
|
||||
self.client = Client(embedded_options=EmbeddedOptions(
|
||||
hostname=cfg.weaviate_host,
|
||||
port=int(cfg.weaviate_port),
|
||||
persistence_data_path=cfg.weaviate_embedded_path
|
||||
))
|
||||
self.client = Client(
|
||||
embedded_options=EmbeddedOptions(
|
||||
hostname=cfg.weaviate_host,
|
||||
port=int(cfg.weaviate_port),
|
||||
persistence_data_path=cfg.weaviate_embedded_path,
|
||||
)
|
||||
)
|
||||
|
||||
print(f"Weaviate Embedded running on: {url} with persistence path: {cfg.weaviate_embedded_path}")
|
||||
print(
|
||||
f"Weaviate Embedded running on: {url} with persistence path: {cfg.weaviate_embedded_path}"
|
||||
)
|
||||
else:
|
||||
self.client = Client(url, auth_client_secret=auth_credentials)
|
||||
|
||||
@@ -56,7 +62,9 @@ class WeaviateMemory(MemoryProviderSingleton):
|
||||
|
||||
def _build_auth_credentials(self, cfg):
|
||||
if cfg.weaviate_username and cfg.weaviate_password:
|
||||
return weaviate.AuthClientPassword(cfg.weaviate_username, cfg.weaviate_password)
|
||||
return weaviate.AuthClientPassword(
|
||||
cfg.weaviate_username, cfg.weaviate_password
|
||||
)
|
||||
if cfg.weaviate_api_key:
|
||||
return weaviate.AuthApiKey(api_key=cfg.weaviate_api_key)
|
||||
else:
|
||||
@@ -66,16 +74,14 @@ class WeaviateMemory(MemoryProviderSingleton):
|
||||
vector = get_ada_embedding(data)
|
||||
|
||||
doc_uuid = generate_uuid5(data, self.index)
|
||||
data_object = {
|
||||
'raw_text': data
|
||||
}
|
||||
data_object = {"raw_text": data}
|
||||
|
||||
with self.client.batch as batch:
|
||||
batch.add_data_object(
|
||||
uuid=doc_uuid,
|
||||
data_object=data_object,
|
||||
class_name=self.index,
|
||||
vector=vector
|
||||
vector=vector,
|
||||
)
|
||||
|
||||
return f"Inserting data into memory at uuid: {doc_uuid}:\n data: {data}"
|
||||
@@ -91,29 +97,31 @@ class WeaviateMemory(MemoryProviderSingleton):
|
||||
# after a call to delete_all
|
||||
self._create_schema()
|
||||
|
||||
return 'Obliterated'
|
||||
return "Obliterated"
|
||||
|
||||
def get_relevant(self, data, num_relevant=5):
|
||||
query_embedding = get_ada_embedding(data)
|
||||
try:
|
||||
results = self.client.query.get(self.index, ['raw_text']) \
|
||||
.with_near_vector({'vector': query_embedding, 'certainty': 0.7}) \
|
||||
.with_limit(num_relevant) \
|
||||
.do()
|
||||
results = (
|
||||
self.client.query.get(self.index, ["raw_text"])
|
||||
.with_near_vector({"vector": query_embedding, "certainty": 0.7})
|
||||
.with_limit(num_relevant)
|
||||
.do()
|
||||
)
|
||||
|
||||
if len(results['data']['Get'][self.index]) > 0:
|
||||
return [str(item['raw_text']) for item in results['data']['Get'][self.index]]
|
||||
if len(results["data"]["Get"][self.index]) > 0:
|
||||
return [
|
||||
str(item["raw_text"]) for item in results["data"]["Get"][self.index]
|
||||
]
|
||||
else:
|
||||
return []
|
||||
|
||||
except Exception as err:
|
||||
print(f'Unexpected error {err=}, {type(err)=}')
|
||||
print(f"Unexpected error {err=}, {type(err)=}")
|
||||
return []
|
||||
|
||||
def get_stats(self):
|
||||
result = self.client.query.aggregate(self.index) \
|
||||
.with_meta_count() \
|
||||
.do()
|
||||
class_data = result['data']['Aggregate'][self.index]
|
||||
result = self.client.query.aggregate(self.index).with_meta_count().do()
|
||||
class_data = result["data"]["Aggregate"][self.index]
|
||||
|
||||
return class_data[0]['meta'] if class_data else {}
|
||||
return class_data[0]["meta"] if class_data else {}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
"""HTML processing functions"""
|
||||
from __future__ import annotations
|
||||
|
||||
from requests.compat import urljoin
|
||||
from bs4 import BeautifulSoup
|
||||
from requests.compat import urljoin
|
||||
|
||||
|
||||
def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple[str, str]]:
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
"""Text processing functions"""
|
||||
from typing import Generator, Optional, Dict
|
||||
from typing import Dict, Generator, Optional
|
||||
|
||||
from selenium.webdriver.remote.webdriver import WebDriver
|
||||
from autogpt.memory import get_memory
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm_utils import create_chat_completion
|
||||
from autogpt.memory import get_memory
|
||||
|
||||
CFG = Config()
|
||||
MEMORY = get_memory(CFG)
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.promptgenerator import PromptGenerator
|
||||
from autogpt.config import Config
|
||||
from autogpt.setup import prompt_user
|
||||
from autogpt.utils import clean_input
|
||||
|
||||
@@ -92,11 +93,7 @@ def get_prompt() -> str:
|
||||
# Only add the audio to text command if the model is specified
|
||||
if cfg.huggingface_audio_to_text_model:
|
||||
commands.append(
|
||||
(
|
||||
"Convert Audio to text",
|
||||
"read_audio_from_file",
|
||||
{"file": "<file>"}
|
||||
),
|
||||
("Convert Audio to text", "read_audio_from_file", {"file": "<file>"}),
|
||||
)
|
||||
|
||||
# Only add shell command to the prompt if the AI is allowed to execute it
|
||||
@@ -112,7 +109,7 @@ def get_prompt() -> str:
|
||||
(
|
||||
"Execute Shell Command Popen, non-interactive commands only",
|
||||
"execute_shell_popen",
|
||||
{"command_line": "<command_line>"}
|
||||
{"command_line": "<command_line>"},
|
||||
),
|
||||
)
|
||||
|
||||
@@ -122,7 +119,7 @@ def get_prompt() -> str:
|
||||
(
|
||||
"Downloads a file from the internet, and stores it locally",
|
||||
"download_file",
|
||||
{"url": "<file_url>", "file": "<saved_filename>"}
|
||||
{"url": "<file_url>", "file": "<saved_filename>"},
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"""Set up the AI and its goals"""
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt import utils
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.logs import logger
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
""" Brian speech module for autogpt """
|
||||
import os
|
||||
|
||||
import requests
|
||||
from playsound import playsound
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
"""ElevenLabs speech module"""
|
||||
import os
|
||||
from playsound import playsound
|
||||
|
||||
import requests
|
||||
from playsound import playsound
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.speech.base import VoiceBase
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
""" GTTS Voice. """
|
||||
import os
|
||||
from playsound import playsound
|
||||
|
||||
import gtts
|
||||
from playsound import playsound
|
||||
|
||||
from autogpt.speech.base import VoiceBase
|
||||
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
""" Text to speech module """
|
||||
from autogpt.config import Config
|
||||
|
||||
import threading
|
||||
from threading import Semaphore
|
||||
from autogpt.speech.brian import BrianSpeech
|
||||
from autogpt.speech.macos_tts import MacOSTTS
|
||||
from autogpt.speech.gtts import GTTSVoice
|
||||
from autogpt.speech.eleven_labs import ElevenLabsSpeech
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.speech.brian import BrianSpeech
|
||||
from autogpt.speech.eleven_labs import ElevenLabsSpeech
|
||||
from autogpt.speech.gtts import GTTSVoice
|
||||
from autogpt.speech.macos_tts import MacOSTTS
|
||||
|
||||
CFG = Config()
|
||||
DEFAULT_VOICE_ENGINE = GTTSVoice()
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
import argparse
|
||||
import logging
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.commands.file_operations import ingest_file, search_files
|
||||
from autogpt.config import Config
|
||||
from autogpt.memory import get_memory
|
||||
|
||||
cfg = Config()
|
||||
|
||||
@@ -36,4 +36,5 @@ skip = '''
|
||||
.venv/*
|
||||
reports/*
|
||||
dist/*
|
||||
|
||||
'''
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import pkg_resources
|
||||
import sys
|
||||
|
||||
import pkg_resources
|
||||
|
||||
|
||||
def main():
|
||||
requirements_file = sys.argv[1]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import unittest
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
from unittest import mock
|
||||
import sys
|
||||
import os
|
||||
from uuid import uuid4
|
||||
|
||||
from weaviate import Client
|
||||
from weaviate.util import get_valid_uuid
|
||||
from uuid import uuid4
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.memory.weaviate import WeaviateMemory
|
||||
from autogpt.memory.base import get_ada_embedding
|
||||
from autogpt.memory.weaviate import WeaviateMemory
|
||||
|
||||
|
||||
class TestWeaviateMemory(unittest.TestCase):
|
||||
@@ -25,13 +25,17 @@ class TestWeaviateMemory(unittest.TestCase):
|
||||
if cls.cfg.use_weaviate_embedded:
|
||||
from weaviate.embedded import EmbeddedOptions
|
||||
|
||||
cls.client = Client(embedded_options=EmbeddedOptions(
|
||||
hostname=cls.cfg.weaviate_host,
|
||||
port=int(cls.cfg.weaviate_port),
|
||||
persistence_data_path=cls.cfg.weaviate_embedded_path
|
||||
))
|
||||
cls.client = Client(
|
||||
embedded_options=EmbeddedOptions(
|
||||
hostname=cls.cfg.weaviate_host,
|
||||
port=int(cls.cfg.weaviate_port),
|
||||
persistence_data_path=cls.cfg.weaviate_embedded_path,
|
||||
)
|
||||
)
|
||||
else:
|
||||
cls.client = Client(f"{cls.cfg.weaviate_protocol}://{cls.cfg.weaviate_host}:{self.cfg.weaviate_port}")
|
||||
cls.client = Client(
|
||||
f"{cls.cfg.weaviate_protocol}://{cls.cfg.weaviate_host}:{self.cfg.weaviate_port}"
|
||||
)
|
||||
|
||||
cls.index = WeaviateMemory.format_classname(cls.cfg.memory_index)
|
||||
|
||||
@@ -44,6 +48,7 @@ class TestWeaviateMemory(unittest.TestCase):
|
||||
USE_WEAVIATE_EMBEDDED=True
|
||||
WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate"
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
try:
|
||||
self.client.schema.delete_class(self.index)
|
||||
@@ -53,23 +58,23 @@ class TestWeaviateMemory(unittest.TestCase):
|
||||
self.memory = WeaviateMemory(self.cfg)
|
||||
|
||||
def test_add(self):
|
||||
doc = 'You are a Titan name Thanos and you are looking for the Infinity Stones'
|
||||
doc = "You are a Titan name Thanos and you are looking for the Infinity Stones"
|
||||
self.memory.add(doc)
|
||||
result = self.client.query.get(self.index, ['raw_text']).do()
|
||||
actual = result['data']['Get'][self.index]
|
||||
result = self.client.query.get(self.index, ["raw_text"]).do()
|
||||
actual = result["data"]["Get"][self.index]
|
||||
|
||||
self.assertEqual(len(actual), 1)
|
||||
self.assertEqual(actual[0]['raw_text'], doc)
|
||||
self.assertEqual(actual[0]["raw_text"], doc)
|
||||
|
||||
def test_get(self):
|
||||
doc = 'You are an Avenger and swore to defend the Galaxy from a menace called Thanos'
|
||||
doc = "You are an Avenger and swore to defend the Galaxy from a menace called Thanos"
|
||||
|
||||
with self.client.batch as batch:
|
||||
batch.add_data_object(
|
||||
uuid=get_valid_uuid(uuid4()),
|
||||
data_object={'raw_text': doc},
|
||||
data_object={"raw_text": doc},
|
||||
class_name=self.index,
|
||||
vector=get_ada_embedding(doc)
|
||||
vector=get_ada_embedding(doc),
|
||||
)
|
||||
|
||||
batch.flush()
|
||||
@@ -81,8 +86,8 @@ class TestWeaviateMemory(unittest.TestCase):
|
||||
|
||||
def test_get_stats(self):
|
||||
docs = [
|
||||
'You are now about to count the number of docs in this index',
|
||||
'And then you about to find out if you can count correctly'
|
||||
"You are now about to count the number of docs in this index",
|
||||
"And then you about to find out if you can count correctly",
|
||||
]
|
||||
|
||||
[self.memory.add(doc) for doc in docs]
|
||||
@@ -90,23 +95,23 @@ class TestWeaviateMemory(unittest.TestCase):
|
||||
stats = self.memory.get_stats()
|
||||
|
||||
self.assertTrue(stats)
|
||||
self.assertTrue('count' in stats)
|
||||
self.assertEqual(stats['count'], 2)
|
||||
self.assertTrue("count" in stats)
|
||||
self.assertEqual(stats["count"], 2)
|
||||
|
||||
def test_clear(self):
|
||||
docs = [
|
||||
'Shame this is the last test for this class',
|
||||
'Testing is fun when someone else is doing it'
|
||||
"Shame this is the last test for this class",
|
||||
"Testing is fun when someone else is doing it",
|
||||
]
|
||||
|
||||
[self.memory.add(doc) for doc in docs]
|
||||
|
||||
self.assertEqual(self.memory.get_stats()['count'], 2)
|
||||
self.assertEqual(self.memory.get_stats()["count"], 2)
|
||||
|
||||
self.memory.clear()
|
||||
|
||||
self.assertEqual(self.memory.get_stats()['count'], 0)
|
||||
self.assertEqual(self.memory.get_stats()["count"], 0)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import unittest
|
||||
|
||||
import tests.context
|
||||
from autogpt.token_counter import count_message_tokens, count_string_tokens
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Generated by CodiumAI
|
||||
import unittest
|
||||
import time
|
||||
import unittest
|
||||
from unittest.mock import patch
|
||||
|
||||
from autogpt.chat import create_chat_message, generate_context
|
||||
|
||||
Reference in New Issue
Block a user