lint: Fix all E302 linting errors

This commit is contained in:
Drikus Roor
2023-04-12 23:05:14 +02:00
committed by Drikus Roor
parent 04dc0f7149
commit d1ea6cf002
20 changed files with 47 additions and 0 deletions

View File

@@ -6,6 +6,7 @@ agents = {} # key, (task, full_message_history, model)
# Create new GPT agent
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
def create_agent(task, prompt, model):
"""Create a new agent and return its key"""
global next_key

View File

@@ -2,6 +2,7 @@ import yaml
import data
import os
class AIConfig:
"""
A class object that contains the configuration information for the AI

View File

@@ -21,12 +21,28 @@ def sanitize_url(url):
return urljoin(url, urlparse(url).path)
# Function to make a request with a specified timeout and handle exceptions
def make_request(url, timeout=10):
try:
response = requests.get(url, headers=cfg.user_agent_header, timeout=timeout)
response.raise_for_status()
return response
except requests.exceptions.RequestException as e:
return "Error: " + str(e)
# Define and check for local file address prefixes
def check_local_file_access(url):
local_prefixes = ['file:///', 'file://localhost', 'http://localhost', 'https://localhost']
return any(url.startswith(prefix) for prefix in local_prefixes)
def scrape_text(url):
"""Scrape text from a webpage"""
# Basic check if the URL is valid
if not url.startswith('http'):
return "Error: Invalid URL"
def get_response(url, headers=cfg.user_agent_header, timeout=10):
try:
# Restrict access to local files

View File

@@ -3,6 +3,8 @@ from config import Config
cfg = Config()
from llm_utils import create_chat_completion
# This is a magic function that can do anything with no-code. See
# https://github.com/Torantulino/AI-Functions for more info.
def call_ai_function(function, args, description, model=None):

View File

@@ -9,6 +9,7 @@ import logging
cfg = Config()
def create_chat_message(role, content):
"""
Create a chat message with the given role and content.

View File

@@ -24,6 +24,7 @@ def is_valid_int(value):
except ValueError:
return False
def get_command(response):
"""Parse the response and return the command name and arguments"""
try:
@@ -135,6 +136,7 @@ def google_search(query, num_results=8):
return json.dumps(search_results, ensure_ascii=False, indent=4)
def google_official_search(query, num_results=8):
"""Return the results of a google search using the official Google API"""
from googleapiclient.discovery import build
@@ -171,6 +173,7 @@ def google_official_search(query, num_results=8):
# Return the list of search result URLs
return search_results_links
def browse_website(url, question):
"""Browse a website and return the summary and links"""
summary = get_text_summary(url, question)

View File

@@ -1,6 +1,7 @@
import os
from pathlib import Path
def load_prompt():
"""Load the prompt from data/prompt.txt"""
try:

View File

@@ -65,6 +65,7 @@ def delete_file(filename):
except Exception as e:
return "Error: " + str(e)
def search_files(directory):
found_files = []

View File

@@ -11,6 +11,7 @@ cfg = Config()
working_directory = "auto_gpt_workspace"
def generate_image(prompt):
filename = str(uuid.uuid4()) + ".jpg"

View File

@@ -4,6 +4,7 @@ cfg = Config()
openai.api_key = cfg.openai_api_key
# Overly simple abstraction until we create something better
def create_chat_completion(messages, model=None, temperature=cfg.temperature, max_tokens=None)->str:
"""Create a chat completion using the OpenAI API"""

View File

@@ -151,6 +151,7 @@ class TypingConsoleHandler(logging.StreamHandler):
except Exception:
self.handleError(record)
class ConsoleHandler(logging.StreamHandler):
def emit(self, record):
msg = self.format(record)

View File

@@ -20,6 +20,7 @@ import logging
cfg = Config()
def check_openai_api_key():
"""Check if the OpenAI API key is set in config.py or as an environment variable."""
if not cfg.openai_api_key:
@@ -30,6 +31,7 @@ def check_openai_api_key():
print("You can get your key from https://beta.openai.com/account/api-keys")
exit(1)
def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
if cfg.speak_mode and cfg.debug_mode:
speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.")
@@ -58,6 +60,7 @@ def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
return json_string
def print_assistant_thoughts(assistant_reply):
"""Prints the assistant's thoughts to the console"""
global ai_name
@@ -262,6 +265,7 @@ def prompt_user():
config = AIConfig(ai_name, ai_role, ai_goals)
return config
def parse_arguments():
"""Parses the arguments passed to the script"""
global cfg

View File

@@ -18,6 +18,7 @@ except ImportError:
print("Pinecone not installed. Skipping import.")
PineconeMemory = None
def get_memory(cfg, init=False):
memory = None
if cfg.memory_backend == "pinecone":
@@ -41,6 +42,7 @@ def get_memory(cfg, init=False):
memory.clear()
return memory
def get_supported_memory_backends():
return supported_memory

View File

@@ -31,6 +31,7 @@ tts_headers = {
mutex_lock = Lock() # Ensure only one sound is played at a time
queue_semaphore = Semaphore(1) # The amount of sounds to queue before blocking the main thread
def eleven_labs_speech(text, voice_index=0):
"""Speak text using elevenlabs.io's API"""
tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format(
@@ -51,6 +52,7 @@ def eleven_labs_speech(text, voice_index=0):
print("Response content:", response.content)
return False
def gtts_speech(text):
tts = gtts.gTTS(text)
with mutex_lock:
@@ -58,6 +60,7 @@ def gtts_speech(text):
playsound("speech.mp3", True)
os.remove("speech.mp3")
def macos_tts_speech(text, voice_index=0):
if voice_index == 0:
os.system(f'say "{text}"')
@@ -67,6 +70,7 @@ def macos_tts_speech(text, voice_index=0):
else:
os.system(f'say -v Samantha "{text}"')
def say_text(text, voice_index=0):
def speak():

View File

@@ -1,6 +1,7 @@
import tiktoken
from typing import List, Dict
def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5-turbo-0301") -> int:
"""
Returns the number of tokens used by a list of messages.
@@ -41,6 +42,7 @@ def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens
def count_string_tokens(string: str, model_name: str) -> int:
"""
Returns the number of tokens in a text string.

View File

@@ -8,6 +8,7 @@ sys.path.append(str(Path(__file__).resolve().parent.parent.parent / 'scripts'))
from config import Config
from memory.local import LocalCache
class TestLocalCache(unittest.TestCase):
def random_string(self, length):

View File

@@ -4,6 +4,7 @@ import sys
sys.path.append(os.path.abspath('../scripts'))
from memory.local import LocalCache
def MockConfig():
return type('MockConfig', (object,), {
'debug_mode': False,
@@ -12,6 +13,7 @@ def MockConfig():
'memory_index': 'auto-gpt',
})
class TestLocalCache(unittest.TestCase):
def setUp(self):

View File

@@ -1,6 +1,7 @@
import unittest
from scripts.config import Config
class TestConfig(unittest.TestCase):
def test_singleton(self):

View File

@@ -3,6 +3,7 @@ import tests.context
from scripts.json_parser import fix_and_parse_json
class TestParseJson(unittest.TestCase):
def test_valid_json(self):
# Test that a valid JSON string is parsed correctly

View File

@@ -5,6 +5,7 @@ import sys
sys.path.append(os.path.abspath('../scripts'))
from json_parser import fix_and_parse_json
class TestParseJson(unittest.TestCase):
def test_valid_json(self):
# Test that a valid JSON string is parsed correctly