mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-24 09:24:27 +01:00
fix conflicts
This commit is contained in:
7
.github/PULL_REQUEST_TEMPLATE.md
vendored
7
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,3 +1,10 @@
|
||||
<!-- ⚠️ At the moment any non-essential commands are not being merged.
|
||||
If you want to add non-essential commands to Auto-GPT, please create a plugin instead.
|
||||
We are expecting to ship plugin support within the week (PR #757).
|
||||
Resources:
|
||||
* https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template
|
||||
-->
|
||||
|
||||
<!-- 📢 Announcement
|
||||
We've recently noticed an increase in pull requests focusing on combining multiple changes. While the intentions behind these PRs are appreciated, it's essential to maintain a clean and manageable git history. To ensure the quality of our repository, we kindly ask you to adhere to the following guidelines when submitting PRs:
|
||||
|
||||
|
||||
5
.github/workflows/ci.yml
vendored
5
.github/workflows/ci.yml
vendored
@@ -6,7 +6,10 @@ on:
|
||||
- master
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- '**'
|
||||
pull_request_target:
|
||||
branches:
|
||||
- '**'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
@@ -20,6 +20,12 @@ This document provides guidelines and best practices to help you contribute effe
|
||||
|
||||
By participating in this project, you agree to abide by our [Code of Conduct](CODE_OF_CONDUCT.md). Please read it to understand the expectations we have for everyone who contributes to this project.
|
||||
|
||||
## 📢 A Quick Word
|
||||
Right now we will not be accepting any Contributions that add non-essential commands to Auto-GPT.
|
||||
|
||||
However, you absolutely can still add these commands to Auto-GPT in the form of plugins. Please check out this [template](https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template).
|
||||
> ⚠️ Plugin support is expected to ship within the week. You can follow PR #757 for more updates!
|
||||
|
||||
## Getting Started
|
||||
|
||||
To start contributing, follow these steps:
|
||||
|
||||
@@ -1,53 +1,5 @@
|
||||
"""Main script for the autogpt package."""
|
||||
import logging
|
||||
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.args import parse_arguments
|
||||
from autogpt.config import Config, check_openai_api_key
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.prompt import construct_prompt
|
||||
|
||||
# Load environment variables from .env file
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Main function for the script"""
|
||||
cfg = Config()
|
||||
# TODO: fill in llm values here
|
||||
check_openai_api_key()
|
||||
parse_arguments()
|
||||
logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO)
|
||||
ai_name = ""
|
||||
system_prompt = construct_prompt()
|
||||
# print(prompt)
|
||||
# Initialize variables
|
||||
full_message_history = []
|
||||
next_action_count = 0
|
||||
# Make a constant:
|
||||
triggering_prompt = (
|
||||
"Determine which next command to use, and respond using the"
|
||||
" format specified above:"
|
||||
)
|
||||
# Initialize memory and make sure it is empty.
|
||||
# this is particularly important for indexing and referencing pinecone memory
|
||||
memory = get_memory(cfg, init=True)
|
||||
logger.typewriter_log(
|
||||
f"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
|
||||
)
|
||||
logger.typewriter_log(f"Using Browser:", Fore.GREEN, cfg.selenium_web_browser)
|
||||
agent = Agent(
|
||||
ai_name=ai_name,
|
||||
memory=memory,
|
||||
full_message_history=full_message_history,
|
||||
next_action_count=next_action_count,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=triggering_prompt,
|
||||
)
|
||||
agent.start_interaction_loop()
|
||||
|
||||
"""Auto-GPT: A GPT powered AI Assistant"""
|
||||
import autogpt.cli
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
autogpt.cli.main()
|
||||
|
||||
125
autogpt/cli.py
Normal file
125
autogpt/cli.py
Normal file
@@ -0,0 +1,125 @@
|
||||
"""Main script for the autogpt package."""
|
||||
import click
|
||||
|
||||
|
||||
@click.group(invoke_without_command=True)
|
||||
@click.option("-c", "--continuous", is_flag=True, help="Enable Continuous Mode")
|
||||
@click.option(
|
||||
"--skip-reprompt",
|
||||
"-y",
|
||||
is_flag=True,
|
||||
help="Skips the re-prompting messages at the beginning of the script",
|
||||
)
|
||||
@click.option(
|
||||
"--ai-settings",
|
||||
"-C",
|
||||
help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.",
|
||||
)
|
||||
@click.option(
|
||||
"-l",
|
||||
"--continuous-limit",
|
||||
type=int,
|
||||
help="Defines the number of times to run in continuous mode",
|
||||
)
|
||||
@click.option("--speak", is_flag=True, help="Enable Speak Mode")
|
||||
@click.option("--debug", is_flag=True, help="Enable Debug Mode")
|
||||
@click.option("--gpt3only", is_flag=True, help="Enable GPT3.5 Only Mode")
|
||||
@click.option("--gpt4only", is_flag=True, help="Enable GPT4 Only Mode")
|
||||
@click.option(
|
||||
"--use-memory",
|
||||
"-m",
|
||||
"memory_type",
|
||||
type=str,
|
||||
help="Defines which Memory backend to use",
|
||||
)
|
||||
@click.option(
|
||||
"-b",
|
||||
"--browser-name",
|
||||
help="Specifies which web-browser to use when using selenium to scrape the web.",
|
||||
)
|
||||
@click.option(
|
||||
"--allow-downloads",
|
||||
is_flag=True,
|
||||
help="Dangerous: Allows Auto-GPT to download files natively.",
|
||||
)
|
||||
@click.pass_context
|
||||
def main(
|
||||
ctx: click.Context,
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
gpt3only: bool,
|
||||
gpt4only: bool,
|
||||
memory_type: str,
|
||||
browser_name: str,
|
||||
allow_downloads: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Welcome to AutoGPT an experimental open-source application showcasing the capabilities of the GPT-4 pushing the boundaries of AI.
|
||||
|
||||
Start an Auto-GPT assistant.
|
||||
"""
|
||||
# Put imports inside function to avoid importing everything when starting the CLI
|
||||
import logging
|
||||
|
||||
from colorama import Fore
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.config import Config, check_openai_api_key
|
||||
from autogpt.configurator import create_config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory import get_memory
|
||||
from autogpt.prompt import construct_prompt
|
||||
|
||||
if ctx.invoked_subcommand is None:
|
||||
cfg = Config()
|
||||
# TODO: fill in llm values here
|
||||
check_openai_api_key()
|
||||
create_config(
|
||||
continuous,
|
||||
continuous_limit,
|
||||
ai_settings,
|
||||
skip_reprompt,
|
||||
speak,
|
||||
debug,
|
||||
gpt3only,
|
||||
gpt4only,
|
||||
memory_type,
|
||||
browser_name,
|
||||
allow_downloads,
|
||||
)
|
||||
logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO)
|
||||
ai_name = ""
|
||||
system_prompt = construct_prompt()
|
||||
# print(prompt)
|
||||
# Initialize variables
|
||||
full_message_history = []
|
||||
next_action_count = 0
|
||||
# Make a constant:
|
||||
triggering_prompt = (
|
||||
"Determine which next command to use, and respond using the"
|
||||
" format specified above:"
|
||||
)
|
||||
# Initialize memory and make sure it is empty.
|
||||
# this is particularly important for indexing and referencing pinecone memory
|
||||
memory = get_memory(cfg, init=True)
|
||||
logger.typewriter_log(
|
||||
"Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}"
|
||||
)
|
||||
logger.typewriter_log("Using Browser:", Fore.GREEN, cfg.selenium_web_browser)
|
||||
agent = Agent(
|
||||
ai_name=ai_name,
|
||||
memory=memory,
|
||||
full_message_history=full_message_history,
|
||||
next_action_count=next_action_count,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=triggering_prompt,
|
||||
)
|
||||
agent.start_interaction_loop()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,6 +1,5 @@
|
||||
"""This module contains the argument parsing logic for the script."""
|
||||
import argparse
|
||||
|
||||
"""Configurator module."""
|
||||
import click
|
||||
from colorama import Back, Fore, Style
|
||||
|
||||
from autogpt import utils
|
||||
@@ -11,78 +10,44 @@ from autogpt.memory import get_supported_memory_backends
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def parse_arguments() -> None:
|
||||
"""Parses the arguments passed to the script
|
||||
def create_config(
|
||||
continuous: bool,
|
||||
continuous_limit: int,
|
||||
ai_settings_file: str,
|
||||
skip_reprompt: bool,
|
||||
speak: bool,
|
||||
debug: bool,
|
||||
gpt3only: bool,
|
||||
gpt4only: bool,
|
||||
memory_type: str,
|
||||
browser_name: str,
|
||||
allow_downloads: bool,
|
||||
) -> None:
|
||||
"""Updates the config object with the given arguments.
|
||||
|
||||
Args:
|
||||
continuous (bool): Whether to run in continuous mode
|
||||
continuous_limit (int): The number of times to run in continuous mode
|
||||
ai_settings_file (str): The path to the ai_settings.yaml file
|
||||
skip_reprompt (bool): Whether to skip the re-prompting messages at the beginning of the script
|
||||
speak (bool): Whether to enable speak mode
|
||||
debug (bool): Whether to enable debug mode
|
||||
gpt3only (bool): Whether to enable GPT3.5 only mode
|
||||
gpt4only (bool): Whether to enable GPT4 only mode
|
||||
memory_type (str): The type of memory backend to use
|
||||
browser_name (str): The name of the browser to use when using selenium to scrape the web
|
||||
allow_downloads (bool): Whether to allow Auto-GPT to download files natively
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
CFG.set_debug_mode(False)
|
||||
CFG.set_continuous_mode(False)
|
||||
CFG.set_speak_mode(False)
|
||||
|
||||
parser = argparse.ArgumentParser(description="Process arguments.")
|
||||
parser.add_argument("--debug", action="store_true", help="Enable Debug Mode")
|
||||
|
||||
parser.add_argument(
|
||||
"--continuous", "-c", action="store_true", help="Enable Continuous Mode"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--continuous-limit",
|
||||
"-l",
|
||||
type=int,
|
||||
dest="continuous_limit",
|
||||
help="Defines the number of times to run in continuous mode",
|
||||
)
|
||||
|
||||
parser.add_argument("--speak", action="store_true", help="Enable Speak Mode")
|
||||
|
||||
parser.add_argument(
|
||||
"--gpt3only", action="store_true", help="Enable GPT3.5 Only Mode"
|
||||
)
|
||||
parser.add_argument("--gpt4only", action="store_true", help="Enable GPT4 Only Mode")
|
||||
|
||||
parser.add_argument(
|
||||
"--use-memory",
|
||||
"-m",
|
||||
dest="memory_type",
|
||||
help="Defines which Memory backend to use",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--skip-reprompt",
|
||||
"-y",
|
||||
dest="skip_reprompt",
|
||||
action="store_true",
|
||||
help="Skips the re-prompting messages at the beginning of the script",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--ai-settings",
|
||||
"-C",
|
||||
dest="ai_settings_file",
|
||||
help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--use-browser",
|
||||
"-b",
|
||||
dest="browser_name",
|
||||
help="Specifies which web-browser to use when using selenium to scrape the web.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--allow-downloads",
|
||||
action="store_true",
|
||||
dest="allow_downloads",
|
||||
help="Dangerous: Allows Auto-GPT to download files natively.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.debug:
|
||||
if debug:
|
||||
logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
|
||||
CFG.set_debug_mode(True)
|
||||
|
||||
if args.continuous:
|
||||
if continuous:
|
||||
logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED")
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
@@ -93,30 +58,31 @@ def parse_arguments() -> None:
|
||||
)
|
||||
CFG.set_continuous_mode(True)
|
||||
|
||||
if args.continuous_limit:
|
||||
if continuous_limit:
|
||||
logger.typewriter_log(
|
||||
"Continuous Limit: ", Fore.GREEN, f"{args.continuous_limit}"
|
||||
"Continuous Limit: ", Fore.GREEN, f"{continuous_limit}"
|
||||
)
|
||||
CFG.set_continuous_limit(args.continuous_limit)
|
||||
CFG.set_continuous_limit(continuous_limit)
|
||||
|
||||
# Check if continuous limit is used without continuous mode
|
||||
if args.continuous_limit and not args.continuous:
|
||||
parser.error("--continuous-limit can only be used with --continuous")
|
||||
if continuous_limit and not continuous:
|
||||
raise click.UsageError("--continuous-limit can only be used with --continuous")
|
||||
|
||||
if args.speak:
|
||||
if speak:
|
||||
logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED")
|
||||
CFG.set_speak_mode(True)
|
||||
|
||||
if args.gpt3only:
|
||||
if gpt3only:
|
||||
logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
CFG.set_smart_llm_model(CFG.fast_llm_model)
|
||||
if args.gpt4only:
|
||||
|
||||
if gpt4only:
|
||||
logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
CFG.set_fast_llm_model(CFG.smart_llm_model)
|
||||
|
||||
if args.memory_type:
|
||||
if memory_type:
|
||||
supported_memory = get_supported_memory_backends()
|
||||
chosen = args.memory_type
|
||||
chosen = memory_type
|
||||
if chosen not in supported_memory:
|
||||
logger.typewriter_log(
|
||||
"ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ",
|
||||
@@ -127,12 +93,12 @@ def parse_arguments() -> None:
|
||||
else:
|
||||
CFG.memory_backend = chosen
|
||||
|
||||
if args.skip_reprompt:
|
||||
if skip_reprompt:
|
||||
logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED")
|
||||
CFG.skip_reprompt = True
|
||||
|
||||
if args.ai_settings_file:
|
||||
file = args.ai_settings_file
|
||||
if ai_settings_file:
|
||||
file = ai_settings_file
|
||||
|
||||
# Validate file
|
||||
(validated, message) = utils.validate_yaml_file(file)
|
||||
@@ -145,10 +111,10 @@ def parse_arguments() -> None:
|
||||
CFG.ai_settings_file = file
|
||||
CFG.skip_reprompt = True
|
||||
|
||||
if args.browser_name:
|
||||
CFG.selenium_web_browser = args.browser_name
|
||||
if browser_name:
|
||||
CFG.selenium_web_browser = browser_name
|
||||
|
||||
if args.allow_downloads:
|
||||
if allow_downloads:
|
||||
logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED")
|
||||
logger.typewriter_log(
|
||||
"WARNING: ",
|
||||
@@ -17,6 +17,13 @@ def prompt_user() -> AIConfig:
|
||||
logger.typewriter_log(
|
||||
"Welcome to Auto-GPT! ",
|
||||
Fore.GREEN,
|
||||
"run with '--help' for more information.",
|
||||
speak_text=True,
|
||||
)
|
||||
|
||||
logger.typewriter_log(
|
||||
"Create an AI-Assistant:",
|
||||
Fore.GREEN,
|
||||
"Enter the name of your AI and its role below. Entering nothing will load"
|
||||
" defaults.",
|
||||
speak_text=True,
|
||||
|
||||
@@ -19,6 +19,7 @@ selenium
|
||||
webdriver-manager
|
||||
jsonschema
|
||||
tweepy
|
||||
click
|
||||
|
||||
##Dev
|
||||
coverage
|
||||
|
||||
Reference in New Issue
Block a user