mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-18 22:44:21 +01:00
isort, add proper skips.
This commit is contained in:
@@ -3,7 +3,8 @@ from colorama import Fore, Style
|
||||
from autogpt.app import execute_command, get_command
|
||||
from autogpt.chat import chat_with_ai, create_chat_message
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_fixes.master_json_fix_method import fix_json_using_multiple_techniques
|
||||
from autogpt.json_fixes.master_json_fix_method import \
|
||||
fix_json_using_multiple_techniques
|
||||
from autogpt.json_validation.validate_json import validate_json
|
||||
from autogpt.logs import logger, print_assistant_thoughts
|
||||
from autogpt.speech import say_text
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
"""Agent manager for managing GPT agents"""
|
||||
from __future__ import annotations
|
||||
from typing import List
|
||||
|
||||
from typing import Union
|
||||
|
||||
from typing import List, Union
|
||||
|
||||
from autogpt.config.config import Config, Singleton
|
||||
from autogpt.llm_utils import create_chat_completion
|
||||
|
||||
@@ -6,22 +6,14 @@ from autogpt.agent.agent_manager import AgentManager
|
||||
from autogpt.commands.audio_text import read_audio_from_file
|
||||
from autogpt.commands.command import CommandRegistry, command
|
||||
from autogpt.commands.evaluate_code import evaluate_code
|
||||
from autogpt.commands.execute_code import (
|
||||
execute_python_file,
|
||||
execute_shell,
|
||||
execute_shell_popen,
|
||||
)
|
||||
from autogpt.commands.file_operations import (
|
||||
append_to_file,
|
||||
delete_file,
|
||||
download_file,
|
||||
read_file,
|
||||
search_files,
|
||||
write_to_file,
|
||||
download_file,
|
||||
)
|
||||
from autogpt.commands.execute_code import (execute_python_file, execute_shell,
|
||||
execute_shell_popen)
|
||||
from autogpt.commands.file_operations import (append_to_file, delete_file,
|
||||
download_file, read_file,
|
||||
search_files, write_to_file)
|
||||
from autogpt.commands.git_operations import clone_repository
|
||||
from autogpt.commands.google_search import google_official_search, google_search
|
||||
from autogpt.commands.google_search import (google_official_search,
|
||||
google_search)
|
||||
from autogpt.commands.image_gen import generate_image
|
||||
from autogpt.commands.improve_code import improve_code
|
||||
from autogpt.commands.twitter import send_tweet
|
||||
|
||||
@@ -74,15 +74,14 @@ def main(
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
|
||||
from autogpt.commands.command import CommandRegistry
|
||||
from autogpt.config import Config, check_openai_api_key
|
||||
from autogpt.configurator import create_config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.plugins import scan_plugins
|
||||
from autogpt.prompts.prompt import construct_main_ai_config
|
||||
from autogpt.utils import get_latest_bulletin
|
||||
from autogpt.plugins import scan_plugins
|
||||
if ctx.invoked_subcommand is None:
|
||||
cfg = Config()
|
||||
# TODO: fill in llm values here
|
||||
|
||||
@@ -10,13 +10,11 @@ import requests
|
||||
from colorama import Back, Fore
|
||||
from requests.adapters import HTTPAdapter, Retry
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.spinner import Spinner
|
||||
from autogpt.utils import readable_file_size
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.workspace import WORKSPACE_PATH, path_in_workspace
|
||||
|
||||
|
||||
LOG_FILE = "file_logger.txt"
|
||||
LOG_FILE_PATH = WORKSPACE_PATH / LOG_FILE
|
||||
|
||||
|
||||
@@ -17,8 +17,8 @@ from selenium.webdriver.support.wait import WebDriverWait
|
||||
from webdriver_manager.chrome import ChromeDriverManager
|
||||
from webdriver_manager.firefox import GeckoDriverManager
|
||||
|
||||
from autogpt.commands.command import command
|
||||
import autogpt.processing.text as summary
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
"""Configuration class to store the state of bools for different scripts access."""
|
||||
import os
|
||||
|
||||
from typing import List
|
||||
|
||||
import openai
|
||||
from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
||||
import yaml
|
||||
from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
||||
from colorama import Fore
|
||||
from dotenv import load_dotenv
|
||||
|
||||
|
||||
@@ -9,9 +9,7 @@ CFG = Config()
|
||||
|
||||
def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]:
|
||||
from autogpt.json_fixes.parsing import (
|
||||
attempt_to_fix_json_by_finding_outermost_brackets,
|
||||
fix_and_parse_json,
|
||||
)
|
||||
attempt_to_fix_json_by_finding_outermost_brackets, fix_and_parse_json)
|
||||
|
||||
# Parse and print Assistant response
|
||||
assistant_reply_json = fix_and_parse_json(assistant_reply)
|
||||
|
||||
@@ -8,8 +8,8 @@ from colorama import Fore, Style
|
||||
from openai.error import APIError, RateLimitError
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.types.openai import Message
|
||||
from autogpt.logs import logger
|
||||
from autogpt.types.openai import Message
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
@@ -204,9 +204,8 @@ logger = Logger()
|
||||
|
||||
def print_assistant_thoughts(ai_name, assistant_reply):
|
||||
"""Prints the assistant's thoughts to the console"""
|
||||
from autogpt.json_fixes.bracket_termination import (
|
||||
attempt_to_fix_json_by_finding_outermost_brackets,
|
||||
)
|
||||
from autogpt.json_fixes.bracket_termination import \
|
||||
attempt_to_fix_json_by_finding_outermost_brackets
|
||||
from autogpt.json_fixes.parsing import fix_and_parse_json
|
||||
|
||||
try:
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
""" Milvus memory storage provider."""
|
||||
from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections
|
||||
from pymilvus import (Collection, CollectionSchema, DataType, FieldSchema,
|
||||
connections)
|
||||
|
||||
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
"""Handles loading of plugins."""
|
||||
from typing import Any, Dict, List, Optional, Tuple, TypedDict
|
||||
from typing import TypeVar
|
||||
from typing import Any, Dict, List, Optional, Tuple, TypedDict, TypeVar
|
||||
|
||||
from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
||||
|
||||
|
||||
@@ -4,20 +4,19 @@ import importlib
|
||||
import json
|
||||
import os
|
||||
import zipfile
|
||||
import openapi_python_client
|
||||
import requests
|
||||
|
||||
from pathlib import Path
|
||||
from typing import List, Tuple, Optional
|
||||
from typing import List, Optional, Tuple
|
||||
from urllib.parse import urlparse
|
||||
from zipimport import zipimporter
|
||||
|
||||
import openapi_python_client
|
||||
import requests
|
||||
from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
||||
from openapi_python_client.cli import Config as OpenAPIConfig
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.models.base_open_ai_plugin import BaseOpenAIPlugin
|
||||
|
||||
from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
||||
|
||||
|
||||
def inspect_zip_for_module(zip_path: str, debug: bool = False) -> Optional[str]:
|
||||
"""
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
"""Functions for counting the number of tokens in a message or string."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
import tiktoken
|
||||
|
||||
@@ -14,6 +14,7 @@ extend-exclude = '.+/(dist|.venv|venv|build)/.+'
|
||||
|
||||
[tool.isort]
|
||||
profile = "black"
|
||||
skip = venv,env,node_modules,.env,.venv,dist
|
||||
multi_line_output = 3
|
||||
include_trailing_comma = true
|
||||
force_grid_wrap = 0
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import shutil
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
import pytest
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
from autogpt.models.base_open_ai_plugin import BaseOpenAIPlugin, Message, PromptGenerator
|
||||
|
||||
import pytest
|
||||
|
||||
from autogpt.models.base_open_ai_plugin import (BaseOpenAIPlugin, Message,
|
||||
PromptGenerator)
|
||||
|
||||
|
||||
class DummyPlugin(BaseOpenAIPlugin):
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
import pytest
|
||||
from autogpt.plugins import inspect_zip_for_module, scan_plugins, blacklist_whitelist_check
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.plugins import (blacklist_whitelist_check, inspect_zip_for_module,
|
||||
scan_plugins)
|
||||
|
||||
PLUGINS_TEST_DIR = "tests/unit/data/test_plugins"
|
||||
PLUGIN_TEST_ZIP_FILE = "Auto-GPT-Plugin-Test-master.zip"
|
||||
|
||||
Reference in New Issue
Block a user