Merge pull request #1014 from drikusroor/fix-flake8-issues-pt-2

Fix flake8 issues pt. 2 (Add E231 & E302 flake8 rules)
This commit is contained in:
Richard Beales
2023-04-13 17:56:06 +01:00
committed by GitHub
24 changed files with 47 additions and 14 deletions

View File

@@ -32,7 +32,7 @@ jobs:
- name: Lint with flake8 - name: Lint with flake8
continue-on-error: false continue-on-error: false
run: flake8 scripts/ tests/ --select E303,W293,W291,W292,E305 run: flake8 scripts/ tests/ --select E303,W293,W291,W292,E305,E231,E302
- name: Run unittest tests with coverage - name: Run unittest tests with coverage
run: | run: |

View File

@@ -348,11 +348,13 @@ coverage run -m unittest discover tests
## Run linter ## Run linter
This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. To run the linter, run the following command: This project uses [flake8](https://flake8.pycqa.org/en/latest/) for linting. We currently use the following rules: `E303,W293,W291,W292,E305,E231,E302`. See the [flake8 rules](https://www.flake8rules.com/) for more information.
To run the linter, run the following command:
``` ```
flake8 scripts/ tests/ flake8 scripts/ tests/
# Or, if you want to run flake8 with the same configuration as the CI: # Or, if you want to run flake8 with the same configuration as the CI:
flake8 scripts/ tests/ --select E303,W293,W291,W292,E305 flake8 scripts/ tests/ --select E303,W293,W291,W292,E305,E231,E302
``` ```

View File

@@ -6,6 +6,7 @@ agents = {} # key, (task, full_message_history, model)
# Create new GPT agent # Create new GPT agent
# TODO: Centralise use of create_chat_completion() to globally enforce token limit # TODO: Centralise use of create_chat_completion() to globally enforce token limit
def create_agent(task, prompt, model): def create_agent(task, prompt, model):
"""Create a new agent and return its key""" """Create a new agent and return its key"""
global next_key global next_key

View File

@@ -2,6 +2,7 @@ import yaml
import data import data
import os import os
class AIConfig: class AIConfig:
""" """
A class object that contains the configuration information for the AI A class object that contains the configuration information for the AI

View File

@@ -3,6 +3,8 @@ from config import Config
cfg = Config() cfg = Config()
from llm_utils import create_chat_completion from llm_utils import create_chat_completion
# This is a magic function that can do anything with no-code. See # This is a magic function that can do anything with no-code. See
# https://github.com/Torantulino/AI-Functions for more info. # https://github.com/Torantulino/AI-Functions for more info.
def call_ai_function(function, args, description, model=None): def call_ai_function(function, args, description, model=None):

View File

@@ -9,6 +9,7 @@ import logging
cfg = Config() cfg = Config()
def create_chat_message(role, content): def create_chat_message(role, content):
""" """
Create a chat message with the given role and content. Create a chat message with the given role and content.

View File

@@ -24,6 +24,7 @@ def is_valid_int(value):
except ValueError: except ValueError:
return False return False
def get_command(response): def get_command(response):
"""Parse the response and return the command name and arguments""" """Parse the response and return the command name and arguments"""
try: try:
@@ -135,6 +136,7 @@ def google_search(query, num_results=8):
return json.dumps(search_results, ensure_ascii=False, indent=4) return json.dumps(search_results, ensure_ascii=False, indent=4)
def google_official_search(query, num_results=8): def google_official_search(query, num_results=8):
"""Return the results of a google search using the official Google API""" """Return the results of a google search using the official Google API"""
from googleapiclient.discovery import build from googleapiclient.discovery import build
@@ -171,6 +173,7 @@ def google_official_search(query, num_results=8):
# Return the list of search result URLs # Return the list of search result URLs
return search_results_links return search_results_links
def browse_website(url, question): def browse_website(url, question):
"""Browse a website and return the summary and links""" """Browse a website and return the summary and links"""
summary = get_text_summary(url, question) summary = get_text_summary(url, question)

View File

@@ -73,7 +73,7 @@ class Config(metaclass=Singleton):
# User agent headers to use when browsing web # User agent headers to use when browsing web
# Some websites might just completely deny request with an error code if no user agent was found. # Some websites might just completely deny request with an error code if no user agent was found.
self.user_agent_header = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"} self.user_agent_header = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"}
self.redis_host = os.getenv("REDIS_HOST", "localhost") self.redis_host = os.getenv("REDIS_HOST", "localhost")
self.redis_port = os.getenv("REDIS_PORT", "6379") self.redis_port = os.getenv("REDIS_PORT", "6379")
self.redis_password = os.getenv("REDIS_PASSWORD", "") self.redis_password = os.getenv("REDIS_PASSWORD", "")

View File

@@ -1,6 +1,7 @@
import os import os
from pathlib import Path from pathlib import Path
def load_prompt(): def load_prompt():
"""Load the prompt from data/prompt.txt""" """Load the prompt from data/prompt.txt"""
try: try:

View File

@@ -67,6 +67,7 @@ def execute_python_file(file):
except Exception as e: except Exception as e:
return f"Error: {str(e)}" return f"Error: {str(e)}"
def execute_shell(command_line): def execute_shell(command_line):
current_dir = os.getcwd() current_dir = os.getcwd()

View File

@@ -65,6 +65,7 @@ def delete_file(filename):
except Exception as e: except Exception as e:
return "Error: " + str(e) return "Error: " + str(e)
def search_files(directory): def search_files(directory):
found_files = [] found_files = []

View File

@@ -11,6 +11,7 @@ cfg = Config()
working_directory = "auto_gpt_workspace" working_directory = "auto_gpt_workspace"
def generate_image(prompt): def generate_image(prompt):
filename = str(uuid.uuid4()) + ".jpg" filename = str(uuid.uuid4()) + ".jpg"

View File

@@ -4,6 +4,7 @@ cfg = Config()
openai.api_key = cfg.openai_api_key openai.api_key = cfg.openai_api_key
# Overly simple abstraction until we create something better # Overly simple abstraction until we create something better
def create_chat_completion(messages, model=None, temperature=cfg.temperature, max_tokens=None)->str: def create_chat_completion(messages, model=None, temperature=cfg.temperature, max_tokens=None)->str:
"""Create a chat completion using the OpenAI API""" """Create a chat completion using the OpenAI API"""

View File

@@ -157,6 +157,7 @@ class TypingConsoleHandler(logging.StreamHandler):
except Exception: except Exception:
self.handleError(record) self.handleError(record)
class ConsoleHandler(logging.StreamHandler): class ConsoleHandler(logging.StreamHandler):
def emit(self, record): def emit(self, record):
msg = self.format(record) msg = self.format(record)
@@ -166,11 +167,11 @@ class ConsoleHandler(logging.StreamHandler):
self.handleError(record) self.handleError(record)
'''
Allows to handle custom placeholders 'title_color' and 'message_no_color'.
To use this formatter, make sure to pass 'color', 'title' as log extras.
'''
class AutoGptFormatter(logging.Formatter): class AutoGptFormatter(logging.Formatter):
"""
Allows to handle custom placeholders 'title_color' and 'message_no_color'.
To use this formatter, make sure to pass 'color', 'title' as log extras.
"""
def format(self, record: LogRecord) -> str: def format(self, record: LogRecord) -> str:
if (hasattr(record, 'color')): if (hasattr(record, 'color')):
record.title_color = getattr(record, 'color') + getattr(record, 'title') + " " + Style.RESET_ALL record.title_color = getattr(record, 'color') + getattr(record, 'title') + " " + Style.RESET_ALL

View File

@@ -20,6 +20,7 @@ import logging
cfg = Config() cfg = Config()
def check_openai_api_key(): def check_openai_api_key():
"""Check if the OpenAI API key is set in config.py or as an environment variable.""" """Check if the OpenAI API key is set in config.py or as an environment variable."""
if not cfg.openai_api_key: if not cfg.openai_api_key:
@@ -30,6 +31,7 @@ def check_openai_api_key():
print("You can get your key from https://beta.openai.com/account/api-keys") print("You can get your key from https://beta.openai.com/account/api-keys")
exit(1) exit(1)
def attempt_to_fix_json_by_finding_outermost_brackets(json_string): def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
if cfg.speak_mode and cfg.debug_mode: if cfg.speak_mode and cfg.debug_mode:
speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.") speak.say_text("I have received an invalid JSON response from the OpenAI API. Trying to fix it now.")
@@ -58,6 +60,7 @@ def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
return json_string return json_string
def print_assistant_thoughts(assistant_reply): def print_assistant_thoughts(assistant_reply):
"""Prints the assistant's thoughts to the console""" """Prints the assistant's thoughts to the console"""
global ai_name global ai_name
@@ -262,6 +265,7 @@ def prompt_user():
config = AIConfig(ai_name, ai_role, ai_goals) config = AIConfig(ai_name, ai_role, ai_goals)
return config return config
def parse_arguments(): def parse_arguments():
"""Parses the arguments passed to the script""" """Parses the arguments passed to the script"""
global cfg global cfg

View File

@@ -19,6 +19,7 @@ except ImportError:
print("Pinecone not installed. Skipping import.") print("Pinecone not installed. Skipping import.")
PineconeMemory = None PineconeMemory = None
def get_memory(cfg, init=False): def get_memory(cfg, init=False):
memory = None memory = None
if cfg.memory_backend == "pinecone": if cfg.memory_backend == "pinecone":
@@ -44,6 +45,7 @@ def get_memory(cfg, init=False):
memory.clear() memory.clear()
return memory return memory
def get_supported_memory_backends(): def get_supported_memory_backends():
return supported_memory return supported_memory

View File

@@ -3,7 +3,6 @@ import abc
from config import AbstractSingleton, Config from config import AbstractSingleton, Config
import openai import openai
cfg = Config()
def get_ada_embedding(text): def get_ada_embedding(text):
text = text.replace("\n", " ") text = text.replace("\n", " ")

View File

@@ -31,6 +31,7 @@ tts_headers = {
mutex_lock = Lock() # Ensure only one sound is played at a time mutex_lock = Lock() # Ensure only one sound is played at a time
queue_semaphore = Semaphore(1) # The amount of sounds to queue before blocking the main thread queue_semaphore = Semaphore(1) # The amount of sounds to queue before blocking the main thread
def eleven_labs_speech(text, voice_index=0): def eleven_labs_speech(text, voice_index=0):
"""Speak text using elevenlabs.io's API""" """Speak text using elevenlabs.io's API"""
tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format( tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format(
@@ -51,6 +52,7 @@ def eleven_labs_speech(text, voice_index=0):
print("Response content:", response.content) print("Response content:", response.content)
return False return False
def gtts_speech(text): def gtts_speech(text):
tts = gtts.gTTS(text) tts = gtts.gTTS(text)
with mutex_lock: with mutex_lock:
@@ -58,6 +60,7 @@ def gtts_speech(text):
playsound("speech.mp3", True) playsound("speech.mp3", True)
os.remove("speech.mp3") os.remove("speech.mp3")
def macos_tts_speech(text, voice_index=0): def macos_tts_speech(text, voice_index=0):
if voice_index == 0: if voice_index == 0:
os.system(f'say "{text}"') os.system(f'say "{text}"')
@@ -67,6 +70,7 @@ def macos_tts_speech(text, voice_index=0):
else: else:
os.system(f'say -v Samantha "{text}"') os.system(f'say -v Samantha "{text}"')
def say_text(text, voice_index=0): def say_text(text, voice_index=0):
def speak(): def speak():

View File

@@ -1,6 +1,7 @@
import tiktoken import tiktoken
from typing import List, Dict from typing import List, Dict
def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5-turbo-0301") -> int: def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5-turbo-0301") -> int:
""" """
Returns the number of tokens used by a list of messages. Returns the number of tokens used by a list of messages.
@@ -41,6 +42,7 @@ def count_message_tokens(messages : List[Dict[str, str]], model : str = "gpt-3.5
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|> num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
return num_tokens return num_tokens
def count_string_tokens(string: str, model_name: str) -> int: def count_string_tokens(string: str, model_name: str) -> int:
""" """
Returns the number of tokens in a text string. Returns the number of tokens in a text string.

View File

@@ -8,6 +8,7 @@ sys.path.append(str(Path(__file__).resolve().parent.parent.parent / 'scripts'))
from config import Config from config import Config
from memory.local import LocalCache from memory.local import LocalCache
class TestLocalCache(unittest.TestCase): class TestLocalCache(unittest.TestCase):
def random_string(self, length): def random_string(self, length):

View File

@@ -4,6 +4,7 @@ import sys
sys.path.append(os.path.abspath('../scripts')) sys.path.append(os.path.abspath('../scripts'))
from memory.local import LocalCache from memory.local import LocalCache
def MockConfig(): def MockConfig():
return type('MockConfig', (object,), { return type('MockConfig', (object,), {
'debug_mode': False, 'debug_mode': False,
@@ -12,6 +13,7 @@ def MockConfig():
'memory_index': 'auto-gpt', 'memory_index': 'auto-gpt',
}) })
class TestLocalCache(unittest.TestCase): class TestLocalCache(unittest.TestCase):
def setUp(self): def setUp(self):

View File

@@ -1,6 +1,7 @@
import unittest import unittest
from scripts.config import Config from scripts.config import Config
class TestConfig(unittest.TestCase): class TestConfig(unittest.TestCase):
def test_singleton(self): def test_singleton(self):

View File

@@ -3,6 +3,7 @@ import tests.context
from scripts.json_parser import fix_and_parse_json from scripts.json_parser import fix_and_parse_json
class TestParseJson(unittest.TestCase): class TestParseJson(unittest.TestCase):
def test_valid_json(self): def test_valid_json(self):
# Test that a valid JSON string is parsed correctly # Test that a valid JSON string is parsed correctly
@@ -52,7 +53,7 @@ class TestParseJson(unittest.TestCase):
good_obj = { good_obj = {
"command": { "command": {
"name": "browse_website", "name": "browse_website",
"args":{ "args": {
"url": "https://github.com/Torantulino/Auto-GPT" "url": "https://github.com/Torantulino/Auto-GPT"
} }
}, },
@@ -91,7 +92,7 @@ class TestParseJson(unittest.TestCase):
good_obj = { good_obj = {
"command": { "command": {
"name": "browse_website", "name": "browse_website",
"args":{ "args": {
"url": "https://github.com/Torantulino/Auto-GPT" "url": "https://github.com/Torantulino/Auto-GPT"
} }
}, },

View File

@@ -5,6 +5,7 @@ import sys
sys.path.append(os.path.abspath('../scripts')) sys.path.append(os.path.abspath('../scripts'))
from json_parser import fix_and_parse_json from json_parser import fix_and_parse_json
class TestParseJson(unittest.TestCase): class TestParseJson(unittest.TestCase):
def test_valid_json(self): def test_valid_json(self):
# Test that a valid JSON string is parsed correctly # Test that a valid JSON string is parsed correctly
@@ -52,7 +53,7 @@ class TestParseJson(unittest.TestCase):
good_obj = { good_obj = {
"command": { "command": {
"name": "browse_website", "name": "browse_website",
"args":{ "args": {
"url": "https://github.com/Torantulino/Auto-GPT" "url": "https://github.com/Torantulino/Auto-GPT"
} }
}, },
@@ -91,7 +92,7 @@ class TestParseJson(unittest.TestCase):
good_obj = { good_obj = {
"command": { "command": {
"name": "browse_website", "name": "browse_website",
"args":{ "args": {
"url": "https://github.com/Torantulino/Auto-GPT" "url": "https://github.com/Torantulino/Auto-GPT"
} }
}, },