mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-18 22:44:21 +01:00
* Move rename module `agent` -> `agents`
* WIP: abstract agent structure into base class and port Agent
* Move command arg path sanitization to decorator
* Add fallback token limit in llm.utils.create_chat_completion
* Rebase `MessageHistory` class on `ChatSequence` class
* Fix linting
* Consolidate logging modules
* Wham Bam Boom
* Fix tests & linting complaints
* Update Agent class docstring
* Fix Agent import in autogpt.llm.providers.openai
* Fix agent kwarg in test_execute_code.py
* Fix benchmarks.py
* Clean up lingering Agent(ai_name=...) initializations
* Fix agent kwarg
* Make sanitize_path_arg decorator more robust
* Fix linting
* Fix command enabling lambda's
* Use relative paths in file ops logger
* Fix test_execute_python_file_not_found
* Fix Config model validation breaking on .plugins
* Define validator for Config.plugins
* Fix Config model issues
* Fix agent iteration budget in testing
* Fix declaration of context_while_think
* Fix Agent.parse_and_process_response signature
* Fix Agent cycle_budget usages
* Fix budget checking in BaseAgent.__next__
* Fix cycle budget initialization
* Fix function calling in BaseAgent.think()
* Include functions in token length calculation
* Fix Config errors
* Add debug thing to patched_api_requestor to investigate HTTP 400 errors
* If this works I'm gonna be sad
* Fix BaseAgent cycle budget logic and document attributes
* Document attributes on `Agent`
* Fix import issues between Agent and MessageHistory
* Improve typing
* Extract application code from the agent (#4982)
* Extract application code from the agent
* Wrap interaction loop in a function and call in benchmarks
* Forgot the important function call
* Add docstrings and inline comments to run loop
* Update typing and docstrings in agent
* Docstring formatting
* Separate prompt construction from on_before_think
* Use `self.default_cycle_instruction` in `Agent.think()`
* Fix formatting
* hot fix the SIGINT handler (#4997)
The signal handler in the autogpt/main.py doesn't work properly because
of the clean_input(...) func. This commit remedies this issue. The issue
is mentioned in
3966cdfd69 (r1264278776)
* Update the sigint handler to be smart enough to actually work (#4999)
* Update the sigint handler to be smart enough to actually work
* Update autogpt/main.py
Co-authored-by: Reinier van der Leer <github@pwuts.nl>
* Can still use context manager
* Merge in upstream
---------
Co-authored-by: Reinier van der Leer <github@pwuts.nl>
* Fix CI
* Fix initial prompt construction
* off by one error
* allow exit/EXIT to shut down app
* Remove dead code
---------
Co-authored-by: collijk <collijk@uw.edu>
Co-authored-by: Cyrus <39694513+cyrus-hawk@users.noreply.github.com>
186 lines
7.2 KiB
Python
186 lines
7.2 KiB
Python
import os
|
|
import re
|
|
|
|
import requests
|
|
import yaml
|
|
from colorama import Fore, Style
|
|
from git.repo import Repo
|
|
from prompt_toolkit import ANSI, PromptSession
|
|
from prompt_toolkit.history import InMemoryHistory
|
|
|
|
from autogpt.config import Config
|
|
from autogpt.logs import logger
|
|
|
|
session = PromptSession(history=InMemoryHistory())
|
|
|
|
|
|
def batch(iterable, max_batch_length: int, overlap: int = 0):
|
|
"""Batch data from iterable into slices of length N. The last batch may be shorter."""
|
|
# batched('ABCDEFG', 3) --> ABC DEF G
|
|
if max_batch_length < 1:
|
|
raise ValueError("n must be at least one")
|
|
for i in range(0, len(iterable), max_batch_length - overlap):
|
|
yield iterable[i : i + max_batch_length]
|
|
|
|
|
|
def clean_input(config: Config, prompt: str = "", talk=False):
|
|
try:
|
|
if config.chat_messages_enabled:
|
|
for plugin in config.plugins:
|
|
if not hasattr(plugin, "can_handle_user_input"):
|
|
continue
|
|
if not plugin.can_handle_user_input(user_input=prompt):
|
|
continue
|
|
plugin_response = plugin.user_input(user_input=prompt)
|
|
if not plugin_response:
|
|
continue
|
|
if plugin_response.lower() in [
|
|
"yes",
|
|
"yeah",
|
|
"y",
|
|
"ok",
|
|
"okay",
|
|
"sure",
|
|
"alright",
|
|
]:
|
|
return config.authorise_key
|
|
elif plugin_response.lower() in [
|
|
"no",
|
|
"nope",
|
|
"n",
|
|
"negative",
|
|
]:
|
|
return config.exit_key
|
|
return plugin_response
|
|
|
|
# ask for input, default when just pressing Enter is y
|
|
logger.info("Asking user via keyboard...")
|
|
|
|
# handle_sigint must be set to False, so the signal handler in the
|
|
# autogpt/main.py could be employed properly. This referes to
|
|
# https://github.com/Significant-Gravitas/Auto-GPT/pull/4799/files/3966cdfd694c2a80c0333823c3bc3da090f85ed3#r1264278776
|
|
answer = session.prompt(ANSI(prompt), handle_sigint=False)
|
|
return answer
|
|
except KeyboardInterrupt:
|
|
logger.info("You interrupted Auto-GPT")
|
|
logger.info("Quitting...")
|
|
exit(0)
|
|
|
|
|
|
def validate_yaml_file(file: str):
|
|
try:
|
|
with open(file, encoding="utf-8") as fp:
|
|
yaml.load(fp.read(), Loader=yaml.FullLoader)
|
|
except FileNotFoundError:
|
|
return (False, f"The file {Fore.CYAN}`{file}`{Fore.RESET} wasn't found")
|
|
except yaml.YAMLError as e:
|
|
return (
|
|
False,
|
|
f"There was an issue while trying to read with your AI Settings file: {e}",
|
|
)
|
|
|
|
return (True, f"Successfully validated {Fore.CYAN}`{file}`{Fore.RESET}!")
|
|
|
|
|
|
def readable_file_size(size, decimal_places=2):
|
|
"""Converts the given size in bytes to a readable format.
|
|
Args:
|
|
size: Size in bytes
|
|
decimal_places (int): Number of decimal places to display
|
|
"""
|
|
for unit in ["B", "KB", "MB", "GB", "TB"]:
|
|
if size < 1024.0:
|
|
break
|
|
size /= 1024.0
|
|
return f"{size:.{decimal_places}f} {unit}"
|
|
|
|
|
|
def get_bulletin_from_web():
|
|
try:
|
|
response = requests.get(
|
|
"https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT/master/BULLETIN.md"
|
|
)
|
|
if response.status_code == 200:
|
|
return response.text
|
|
except requests.exceptions.RequestException:
|
|
pass
|
|
|
|
return ""
|
|
|
|
|
|
def get_current_git_branch() -> str:
|
|
try:
|
|
repo = Repo(search_parent_directories=True)
|
|
branch = repo.active_branch
|
|
return branch.name
|
|
except:
|
|
return ""
|
|
|
|
|
|
def get_latest_bulletin() -> tuple[str, bool]:
|
|
exists = os.path.exists("data/CURRENT_BULLETIN.md")
|
|
current_bulletin = ""
|
|
if exists:
|
|
current_bulletin = open(
|
|
"data/CURRENT_BULLETIN.md", "r", encoding="utf-8"
|
|
).read()
|
|
new_bulletin = get_bulletin_from_web()
|
|
is_new_news = new_bulletin != "" and new_bulletin != current_bulletin
|
|
|
|
news_header = Fore.YELLOW + "Welcome to Auto-GPT!\n"
|
|
if new_bulletin or current_bulletin:
|
|
news_header += (
|
|
"Below you'll find the latest Auto-GPT News and updates regarding features!\n"
|
|
"If you don't wish to see this message, you "
|
|
"can run Auto-GPT with the *--skip-news* flag.\n"
|
|
)
|
|
|
|
if new_bulletin and is_new_news:
|
|
open("data/CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin)
|
|
current_bulletin = f"{Fore.RED}::NEW BULLETIN::{Fore.RESET}\n\n{new_bulletin}"
|
|
|
|
return f"{news_header}\n{current_bulletin}", is_new_news
|
|
|
|
|
|
def markdown_to_ansi_style(markdown: str):
|
|
ansi_lines: list[str] = []
|
|
for line in markdown.split("\n"):
|
|
line_style = ""
|
|
|
|
if line.startswith("# "):
|
|
line_style += Style.BRIGHT
|
|
else:
|
|
line = re.sub(
|
|
r"(?<!\*)\*(\*?[^*]+\*?)\*(?!\*)",
|
|
rf"{Style.BRIGHT}\1{Style.NORMAL}",
|
|
line,
|
|
)
|
|
|
|
if re.match(r"^#+ ", line) is not None:
|
|
line_style += Fore.CYAN
|
|
line = re.sub(r"^#+ ", "", line)
|
|
|
|
ansi_lines.append(f"{line_style}{line}{Style.RESET_ALL}")
|
|
return "\n".join(ansi_lines)
|
|
|
|
|
|
def get_legal_warning() -> str:
|
|
legal_text = """
|
|
## DISCLAIMER AND INDEMNIFICATION AGREEMENT
|
|
### PLEASE READ THIS DISCLAIMER AND INDEMNIFICATION AGREEMENT CAREFULLY BEFORE USING THE AUTOGPT SYSTEM. BY USING THE AUTOGPT SYSTEM, YOU AGREE TO BE BOUND BY THIS AGREEMENT.
|
|
|
|
## Introduction
|
|
AutoGPT (the "System") is a project that connects a GPT-like artificial intelligence system to the internet and allows it to automate tasks. While the System is designed to be useful and efficient, there may be instances where the System could perform actions that may cause harm or have unintended consequences.
|
|
|
|
## No Liability for Actions of the System
|
|
The developers, contributors, and maintainers of the AutoGPT project (collectively, the "Project Parties") make no warranties or representations, express or implied, about the System's performance, accuracy, reliability, or safety. By using the System, you understand and agree that the Project Parties shall not be liable for any actions taken by the System or any consequences resulting from such actions.
|
|
|
|
## User Responsibility and Respondeat Superior Liability
|
|
As a user of the System, you are responsible for supervising and monitoring the actions of the System while it is operating on your
|
|
behalf. You acknowledge that using the System could expose you to potential liability including but not limited to respondeat superior and you agree to assume all risks and liabilities associated with such potential liability.
|
|
|
|
## Indemnification
|
|
By using the System, you agree to indemnify, defend, and hold harmless the Project Parties from and against any and all claims, liabilities, damages, losses, or expenses (including reasonable attorneys' fees and costs) arising out of or in connection with your use of the System, including, without limitation, any actions taken by the System on your behalf, any failure to properly supervise or monitor the System, and any resulting harm or unintended consequences.
|
|
"""
|
|
return legal_text
|