mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-23 00:44:22 +01:00
Pass Configs to Commands and remove CFG = Config() in the commands/ folder (#4328)
* feat: pass config to call_ai_functions in coimmands * feat: config for read_audio_from_file * feat: file operations cfg NOTE: we replaced the CFG in the command enable with TRUE b/c not sure how to handle this yet * feat: git command conversion * feat: google search * feat: image generation * feat: extract cfg from browser commands * feat: remove cfg from execute code commands * fix: file operation related tests * fix: linting * fix: tests for read_audio * fix: test error * feat: update cassettes * fix: linting * fix: test typechecking * fix: google_search errors if unexpected kw arg is passed * fix: pass config param to google search test * fix: agent commands were broken + cassettes * fix: agent test * feat: cassettes * feat: enable/disable logic for commands * fix: some commands threw errors * feat: fix tests * Add new cassettes * Add new cassettes * ci: trigger ci * Update autogpt/commands/execute_code.py Co-authored-by: Reinier van der Leer <github@pwuts.nl> * fix prompt * fix prompt + rebase * add config remove useless imports * put back CFG just for download file * lint * The signature should be mandatory in the decorator * black isort * fix: remove the CFG * fix: non typed arg * lint: type some args * lint: add types for libraries * Add new cassettes * fix: windows compatibility * fix: add config access to decorator * fix: remove twitter mention * DDGS search works at 3.0.2 version * ci: linting --------- Co-authored-by: Auto-GPT-Bot <github-bot@agpt.co> Co-authored-by: merwanehamadi <merwanehamadi@gmail.com> Co-authored-by: Reinier van der Leer <github@pwuts.nl> Co-authored-by: kinance <kinance@gmail.com>
This commit is contained in:
@@ -30,7 +30,6 @@
|
|||||||
## autogpt.commands.google_search
|
## autogpt.commands.google_search
|
||||||
## autogpt.commands.image_gen
|
## autogpt.commands.image_gen
|
||||||
## autogpt.commands.improve_code
|
## autogpt.commands.improve_code
|
||||||
## autogpt.commands.twitter
|
|
||||||
## autogpt.commands.web_selenium
|
## autogpt.commands.web_selenium
|
||||||
## autogpt.commands.write_tests
|
## autogpt.commands.write_tests
|
||||||
## autogpt.app
|
## autogpt.app
|
||||||
|
|||||||
@@ -274,6 +274,7 @@ class Agent:
|
|||||||
command_name,
|
command_name,
|
||||||
arguments,
|
arguments,
|
||||||
self.config.prompt_generator,
|
self.config.prompt_generator,
|
||||||
|
config=cfg,
|
||||||
)
|
)
|
||||||
result = f"Command {command_name} returned: " f"{command_result}"
|
result = f"Command {command_name} returned: " f"{command_result}"
|
||||||
|
|
||||||
|
|||||||
@@ -6,15 +6,11 @@ from autogpt.agent.agent_manager import AgentManager
|
|||||||
from autogpt.commands.command import CommandRegistry, command
|
from autogpt.commands.command import CommandRegistry, command
|
||||||
from autogpt.commands.web_requests import scrape_links, scrape_text
|
from autogpt.commands.web_requests import scrape_links, scrape_text
|
||||||
from autogpt.config import Config
|
from autogpt.config import Config
|
||||||
from autogpt.logs import logger
|
|
||||||
from autogpt.memory.vector import get_memory
|
|
||||||
from autogpt.processing.text import summarize_text
|
from autogpt.processing.text import summarize_text
|
||||||
from autogpt.prompts.generator import PromptGenerator
|
from autogpt.prompts.generator import PromptGenerator
|
||||||
from autogpt.speech import say_text
|
from autogpt.speech import say_text
|
||||||
from autogpt.url_utils.validators import validate_url
|
from autogpt.url_utils.validators import validate_url
|
||||||
|
|
||||||
CFG = Config()
|
|
||||||
|
|
||||||
|
|
||||||
def is_valid_int(value: str) -> bool:
|
def is_valid_int(value: str) -> bool:
|
||||||
"""Check if the value is a valid integer
|
"""Check if the value is a valid integer
|
||||||
@@ -93,6 +89,7 @@ def execute_command(
|
|||||||
command_name: str,
|
command_name: str,
|
||||||
arguments,
|
arguments,
|
||||||
prompt: PromptGenerator,
|
prompt: PromptGenerator,
|
||||||
|
config: Config,
|
||||||
):
|
):
|
||||||
"""Execute the command and return the result
|
"""Execute the command and return the result
|
||||||
|
|
||||||
@@ -108,7 +105,7 @@ def execute_command(
|
|||||||
|
|
||||||
# If the command is found, call it with the provided arguments
|
# If the command is found, call it with the provided arguments
|
||||||
if cmd:
|
if cmd:
|
||||||
return cmd(**arguments)
|
return cmd(**arguments, config=config)
|
||||||
|
|
||||||
# TODO: Remove commands below after they are moved to the command registry.
|
# TODO: Remove commands below after they are moved to the command registry.
|
||||||
command_name = map_command_synonyms(command_name.lower())
|
command_name = map_command_synonyms(command_name.lower())
|
||||||
@@ -135,7 +132,7 @@ def execute_command(
|
|||||||
"get_text_summary", "Get text summary", '"url": "<url>", "question": "<question>"'
|
"get_text_summary", "Get text summary", '"url": "<url>", "question": "<question>"'
|
||||||
)
|
)
|
||||||
@validate_url
|
@validate_url
|
||||||
def get_text_summary(url: str, question: str) -> str:
|
def get_text_summary(url: str, question: str, config: Config) -> str:
|
||||||
"""Get the text summary of a webpage
|
"""Get the text summary of a webpage
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -153,7 +150,7 @@ def get_text_summary(url: str, question: str) -> str:
|
|||||||
|
|
||||||
@command("get_hyperlinks", "Get hyperlinks", '"url": "<url>"')
|
@command("get_hyperlinks", "Get hyperlinks", '"url": "<url>"')
|
||||||
@validate_url
|
@validate_url
|
||||||
def get_hyperlinks(url: str) -> Union[str, List[str]]:
|
def get_hyperlinks(url: str, config: Config) -> Union[str, List[str]]:
|
||||||
"""Get all hyperlinks on a webpage
|
"""Get all hyperlinks on a webpage
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -162,7 +159,7 @@ def get_hyperlinks(url: str) -> Union[str, List[str]]:
|
|||||||
Returns:
|
Returns:
|
||||||
str or list: The hyperlinks on the page
|
str or list: The hyperlinks on the page
|
||||||
"""
|
"""
|
||||||
return scrape_links(url)
|
return scrape_links(url, config)
|
||||||
|
|
||||||
|
|
||||||
@command(
|
@command(
|
||||||
@@ -170,7 +167,7 @@ def get_hyperlinks(url: str) -> Union[str, List[str]]:
|
|||||||
"Start GPT Agent",
|
"Start GPT Agent",
|
||||||
'"name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"',
|
'"name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"',
|
||||||
)
|
)
|
||||||
def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) -> str:
|
def start_agent(name: str, task: str, prompt: str, config: Config, model=None) -> str:
|
||||||
"""Start an agent with a given name, task, and prompt
|
"""Start an agent with a given name, task, and prompt
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -191,11 +188,11 @@ def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) ->
|
|||||||
agent_intro = f"{voice_name} here, Reporting for duty!"
|
agent_intro = f"{voice_name} here, Reporting for duty!"
|
||||||
|
|
||||||
# Create agent
|
# Create agent
|
||||||
if CFG.speak_mode:
|
if config.speak_mode:
|
||||||
say_text(agent_intro, 1)
|
say_text(agent_intro, 1)
|
||||||
key, ack = agent_manager.create_agent(task, first_message, model)
|
key, ack = agent_manager.create_agent(task, first_message, model)
|
||||||
|
|
||||||
if CFG.speak_mode:
|
if config.speak_mode:
|
||||||
say_text(f"Hello {voice_name}. Your task is as follows. {task}.")
|
say_text(f"Hello {voice_name}. Your task is as follows. {task}.")
|
||||||
|
|
||||||
# Assign task (prompt), get response
|
# Assign task (prompt), get response
|
||||||
@@ -205,7 +202,7 @@ def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) ->
|
|||||||
|
|
||||||
|
|
||||||
@command("message_agent", "Message GPT Agent", '"key": "<key>", "message": "<message>"')
|
@command("message_agent", "Message GPT Agent", '"key": "<key>", "message": "<message>"')
|
||||||
def message_agent(key: str, message: str) -> str:
|
def message_agent(key: str, message: str, config: Config) -> str:
|
||||||
"""Message an agent with a given key and message"""
|
"""Message an agent with a given key and message"""
|
||||||
# Check if the key is a valid integer
|
# Check if the key is a valid integer
|
||||||
if is_valid_int(key):
|
if is_valid_int(key):
|
||||||
@@ -214,13 +211,13 @@ def message_agent(key: str, message: str) -> str:
|
|||||||
return "Invalid key, must be an integer."
|
return "Invalid key, must be an integer."
|
||||||
|
|
||||||
# Speak response
|
# Speak response
|
||||||
if CFG.speak_mode:
|
if config.speak_mode:
|
||||||
say_text(agent_response, 1)
|
say_text(agent_response, 1)
|
||||||
return agent_response
|
return agent_response
|
||||||
|
|
||||||
|
|
||||||
@command("list_agents", "List GPT Agents", "")
|
@command("list_agents", "List GPT Agents", "() -> str")
|
||||||
def list_agents() -> str:
|
def list_agents(config: Config) -> str:
|
||||||
"""List all agents
|
"""List all agents
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
@@ -232,7 +229,7 @@ def list_agents() -> str:
|
|||||||
|
|
||||||
|
|
||||||
@command("delete_agent", "Delete GPT Agent", '"key": "<key>"')
|
@command("delete_agent", "Delete GPT Agent", '"key": "<key>"')
|
||||||
def delete_agent(key: str) -> str:
|
def delete_agent(key: str, config: Config) -> str:
|
||||||
"""Delete an agent with a given key
|
"""Delete an agent with a given key
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
|||||||
@@ -1,16 +1,21 @@
|
|||||||
"""Code evaluation module."""
|
"""Code evaluation module."""
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
from autogpt.commands.command import command
|
from autogpt.commands.command import command
|
||||||
from autogpt.llm.utils import call_ai_function
|
from autogpt.llm.utils import call_ai_function
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from autogpt.config import Config
|
||||||
|
|
||||||
|
|
||||||
@command(
|
@command(
|
||||||
"analyze_code",
|
"analyze_code",
|
||||||
"Analyze Code",
|
"Analyze Code",
|
||||||
'"code": "<full_code_string>"',
|
'"code": "<full_code_string>"',
|
||||||
)
|
)
|
||||||
def analyze_code(code: str) -> list[str]:
|
def analyze_code(code: str, config: Config) -> list[str]:
|
||||||
"""
|
"""
|
||||||
A function that takes in a string and returns a response from create chat
|
A function that takes in a string and returns a response from create chat
|
||||||
completion api call.
|
completion api call.
|
||||||
@@ -28,4 +33,4 @@ def analyze_code(code: str) -> list[str]:
|
|||||||
"Analyzes the given code and returns a list of suggestions for improvements."
|
"Analyzes the given code and returns a list of suggestions for improvements."
|
||||||
)
|
)
|
||||||
|
|
||||||
return call_ai_function(function_string, args, description_string)
|
return call_ai_function(function_string, args, description_string, config=config)
|
||||||
|
|||||||
@@ -1,22 +1,25 @@
|
|||||||
"""Commands for converting audio to text."""
|
"""Commands for converting audio to text."""
|
||||||
import json
|
import json
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
from autogpt.commands.command import command
|
from autogpt.commands.command import command
|
||||||
from autogpt.config import Config
|
from autogpt.config import Config
|
||||||
|
|
||||||
CFG = Config()
|
if TYPE_CHECKING:
|
||||||
|
from autogpt.config import Config
|
||||||
|
|
||||||
|
|
||||||
@command(
|
@command(
|
||||||
"read_audio_from_file",
|
"read_audio_from_file",
|
||||||
"Convert Audio to text",
|
"Convert Audio to text",
|
||||||
'"filename": "<filename>"',
|
'"filename": "<filename>"',
|
||||||
CFG.huggingface_audio_to_text_model,
|
lambda config: config.huggingface_audio_to_text_model
|
||||||
"Configure huggingface_audio_to_text_model.",
|
and config.huggingface_api_token,
|
||||||
|
"Configure huggingface_audio_to_text_model and Hugging Face api token.",
|
||||||
)
|
)
|
||||||
def read_audio_from_file(filename: str) -> str:
|
def read_audio_from_file(filename: str, config: Config) -> str:
|
||||||
"""
|
"""
|
||||||
Convert audio to text.
|
Convert audio to text.
|
||||||
|
|
||||||
@@ -28,10 +31,10 @@ def read_audio_from_file(filename: str) -> str:
|
|||||||
"""
|
"""
|
||||||
with open(filename, "rb") as audio_file:
|
with open(filename, "rb") as audio_file:
|
||||||
audio = audio_file.read()
|
audio = audio_file.read()
|
||||||
return read_audio(audio)
|
return read_audio(audio, config)
|
||||||
|
|
||||||
|
|
||||||
def read_audio(audio: bytes) -> str:
|
def read_audio(audio: bytes, config: Config) -> str:
|
||||||
"""
|
"""
|
||||||
Convert audio to text.
|
Convert audio to text.
|
||||||
|
|
||||||
@@ -41,9 +44,9 @@ def read_audio(audio: bytes) -> str:
|
|||||||
Returns:
|
Returns:
|
||||||
str: The text from the audio
|
str: The text from the audio
|
||||||
"""
|
"""
|
||||||
model = CFG.huggingface_audio_to_text_model
|
model = config.huggingface_audio_to_text_model
|
||||||
api_url = f"https://api-inference.huggingface.co/models/{model}"
|
api_url = f"https://api-inference.huggingface.co/models/{model}"
|
||||||
api_token = CFG.huggingface_api_token
|
api_token = config.huggingface_api_token
|
||||||
headers = {"Authorization": f"Bearer {api_token}"}
|
headers = {"Authorization": f"Bearer {api_token}"}
|
||||||
|
|
||||||
if api_token is None:
|
if api_token is None:
|
||||||
|
|||||||
@@ -1,8 +1,9 @@
|
|||||||
import functools
|
import functools
|
||||||
import importlib
|
import importlib
|
||||||
import inspect
|
import inspect
|
||||||
from typing import Any, Callable, Optional
|
from typing import TYPE_CHECKING, Any, Callable, Optional
|
||||||
|
|
||||||
|
from autogpt.config import Config
|
||||||
from autogpt.logs import logger
|
from autogpt.logs import logger
|
||||||
|
|
||||||
# Unique identifier for auto-gpt commands
|
# Unique identifier for auto-gpt commands
|
||||||
@@ -24,19 +25,23 @@ class Command:
|
|||||||
description: str,
|
description: str,
|
||||||
method: Callable[..., Any],
|
method: Callable[..., Any],
|
||||||
signature: str = "",
|
signature: str = "",
|
||||||
enabled: bool = True,
|
enabled: bool | Callable[[Config], bool] = True,
|
||||||
disabled_reason: Optional[str] = None,
|
disabled_reason: Optional[str] = None,
|
||||||
):
|
):
|
||||||
self.name = name
|
self.name = name
|
||||||
self.description = description
|
self.description = description
|
||||||
self.method = method
|
self.method = method
|
||||||
self.signature = signature if signature else str(inspect.signature(self.method))
|
self.signature = signature
|
||||||
self.enabled = enabled
|
self.enabled = enabled
|
||||||
self.disabled_reason = disabled_reason
|
self.disabled_reason = disabled_reason
|
||||||
|
|
||||||
def __call__(self, *args, **kwargs) -> Any:
|
def __call__(self, *args, **kwargs) -> Any:
|
||||||
|
if hasattr(kwargs, "config") and callable(self.enabled):
|
||||||
|
self.enabled = self.enabled(kwargs["config"])
|
||||||
if not self.enabled:
|
if not self.enabled:
|
||||||
|
if self.disabled_reason:
|
||||||
return f"Command '{self.name}' is disabled: {self.disabled_reason}"
|
return f"Command '{self.name}' is disabled: {self.disabled_reason}"
|
||||||
|
return f"Command '{self.name}' is disabled"
|
||||||
return self.method(*args, **kwargs)
|
return self.method(*args, **kwargs)
|
||||||
|
|
||||||
def __str__(self) -> str:
|
def __str__(self) -> str:
|
||||||
@@ -133,12 +138,17 @@ class CommandRegistry:
|
|||||||
def command(
|
def command(
|
||||||
name: str,
|
name: str,
|
||||||
description: str,
|
description: str,
|
||||||
signature: str = "",
|
signature: str,
|
||||||
enabled: bool = True,
|
enabled: bool | Callable[[Config], bool] = True,
|
||||||
disabled_reason: Optional[str] = None,
|
disabled_reason: Optional[str] = None,
|
||||||
) -> Callable[..., Any]:
|
) -> Callable[..., Any]:
|
||||||
"""The command decorator is used to create Command objects from ordinary functions."""
|
"""The command decorator is used to create Command objects from ordinary functions."""
|
||||||
|
|
||||||
|
# TODO: Remove this in favor of better command management
|
||||||
|
CFG = Config()
|
||||||
|
|
||||||
|
if callable(enabled):
|
||||||
|
enabled = enabled(CFG)
|
||||||
if not enabled:
|
if not enabled:
|
||||||
if disabled_reason is not None:
|
if disabled_reason is not None:
|
||||||
logger.debug(f"Command '{name}' is disabled: {disabled_reason}")
|
logger.debug(f"Command '{name}' is disabled: {disabled_reason}")
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
import docker
|
import docker
|
||||||
from docker.errors import ImageNotFound
|
from docker.errors import ImageNotFound
|
||||||
@@ -10,11 +11,9 @@ from autogpt.commands.command import command
|
|||||||
from autogpt.config import Config
|
from autogpt.config import Config
|
||||||
from autogpt.logs import logger
|
from autogpt.logs import logger
|
||||||
|
|
||||||
CFG = Config()
|
|
||||||
|
|
||||||
|
|
||||||
@command("execute_python_file", "Execute Python File", '"filename": "<filename>"')
|
@command("execute_python_file", "Execute Python File", '"filename": "<filename>"')
|
||||||
def execute_python_file(filename: str) -> str:
|
def execute_python_file(filename: str, config: Config) -> str:
|
||||||
"""Execute a Python file in a Docker container and return the output
|
"""Execute a Python file in a Docker container and return the output
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -65,9 +64,9 @@ def execute_python_file(filename: str) -> str:
|
|||||||
logger.info(status)
|
logger.info(status)
|
||||||
container = client.containers.run(
|
container = client.containers.run(
|
||||||
image_name,
|
image_name,
|
||||||
["python", str(Path(filename).relative_to(CFG.workspace_path))],
|
["python", str(Path(filename).relative_to(config.workspace_path))],
|
||||||
volumes={
|
volumes={
|
||||||
CFG.workspace_path: {
|
config.workspace_path: {
|
||||||
"bind": "/workspace",
|
"bind": "/workspace",
|
||||||
"mode": "ro",
|
"mode": "ro",
|
||||||
}
|
}
|
||||||
@@ -97,7 +96,7 @@ def execute_python_file(filename: str) -> str:
|
|||||||
return f"Error: {str(e)}"
|
return f"Error: {str(e)}"
|
||||||
|
|
||||||
|
|
||||||
def validate_command(command: str) -> bool:
|
def validate_command(command: str, config: Config) -> bool:
|
||||||
"""Validate a command to ensure it is allowed
|
"""Validate a command to ensure it is allowed
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -111,13 +110,13 @@ def validate_command(command: str) -> bool:
|
|||||||
if not tokens:
|
if not tokens:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if CFG.deny_commands and tokens[0] not in CFG.deny_commands:
|
if config.deny_commands and tokens[0] not in config.deny_commands:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
for keyword in CFG.allow_commands:
|
for keyword in config.allow_commands:
|
||||||
if keyword in tokens:
|
if keyword in tokens:
|
||||||
return True
|
return True
|
||||||
if CFG.allow_commands:
|
if config.allow_commands:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
return True
|
return True
|
||||||
@@ -127,12 +126,12 @@ def validate_command(command: str) -> bool:
|
|||||||
"execute_shell",
|
"execute_shell",
|
||||||
"Execute Shell Command, non-interactive commands only",
|
"Execute Shell Command, non-interactive commands only",
|
||||||
'"command_line": "<command_line>"',
|
'"command_line": "<command_line>"',
|
||||||
CFG.execute_local_commands,
|
lambda cfg: cfg.execute_local_commands,
|
||||||
"You are not allowed to run local shell commands. To execute"
|
"You are not allowed to run local shell commands. To execute"
|
||||||
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
||||||
"in your config file: .env - do not attempt to bypass the restriction.",
|
"in your config file: .env - do not attempt to bypass the restriction.",
|
||||||
)
|
)
|
||||||
def execute_shell(command_line: str) -> str:
|
def execute_shell(command_line: str, config: Config) -> str:
|
||||||
"""Execute a shell command and return the output
|
"""Execute a shell command and return the output
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -141,14 +140,14 @@ def execute_shell(command_line: str) -> str:
|
|||||||
Returns:
|
Returns:
|
||||||
str: The output of the command
|
str: The output of the command
|
||||||
"""
|
"""
|
||||||
if not validate_command(command_line):
|
if not validate_command(command_line, config):
|
||||||
logger.info(f"Command '{command_line}' not allowed")
|
logger.info(f"Command '{command_line}' not allowed")
|
||||||
return "Error: This Shell Command is not allowed."
|
return "Error: This Shell Command is not allowed."
|
||||||
|
|
||||||
current_dir = Path.cwd()
|
current_dir = Path.cwd()
|
||||||
# Change dir into workspace if necessary
|
# Change dir into workspace if necessary
|
||||||
if not current_dir.is_relative_to(CFG.workspace_path):
|
if not current_dir.is_relative_to(config.workspace_path):
|
||||||
os.chdir(CFG.workspace_path)
|
os.chdir(config.workspace_path)
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
|
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
|
||||||
@@ -167,12 +166,12 @@ def execute_shell(command_line: str) -> str:
|
|||||||
"execute_shell_popen",
|
"execute_shell_popen",
|
||||||
"Execute Shell Command, non-interactive commands only",
|
"Execute Shell Command, non-interactive commands only",
|
||||||
'"command_line": "<command_line>"',
|
'"command_line": "<command_line>"',
|
||||||
CFG.execute_local_commands,
|
lambda config: config.execute_local_commands,
|
||||||
"You are not allowed to run local shell commands. To execute"
|
"You are not allowed to run local shell commands. To execute"
|
||||||
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
||||||
"in your config. Do not attempt to bypass the restriction.",
|
"in your config. Do not attempt to bypass the restriction.",
|
||||||
)
|
)
|
||||||
def execute_shell_popen(command_line) -> str:
|
def execute_shell_popen(command_line, config: Config) -> str:
|
||||||
"""Execute a shell command with Popen and returns an english description
|
"""Execute a shell command with Popen and returns an english description
|
||||||
of the event and the process id
|
of the event and the process id
|
||||||
|
|
||||||
@@ -182,14 +181,14 @@ def execute_shell_popen(command_line) -> str:
|
|||||||
Returns:
|
Returns:
|
||||||
str: Description of the fact that the process started and its id
|
str: Description of the fact that the process started and its id
|
||||||
"""
|
"""
|
||||||
if not validate_command(command_line):
|
if not validate_command(command_line, config):
|
||||||
logger.info(f"Command '{command_line}' not allowed")
|
logger.info(f"Command '{command_line}' not allowed")
|
||||||
return "Error: This Shell Command is not allowed."
|
return "Error: This Shell Command is not allowed."
|
||||||
|
|
||||||
current_dir = os.getcwd()
|
current_dir = os.getcwd()
|
||||||
# Change dir into workspace if necessary
|
# Change dir into workspace if necessary
|
||||||
if CFG.workspace_path not in current_dir:
|
if config.workspace_path not in current_dir:
|
||||||
os.chdir(CFG.workspace_path)
|
os.chdir(config.workspace_path)
|
||||||
|
|
||||||
logger.info(
|
logger.info(
|
||||||
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
|
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from __future__ import annotations
|
|||||||
import hashlib
|
import hashlib
|
||||||
import os
|
import os
|
||||||
import os.path
|
import os.path
|
||||||
from typing import Generator, Literal
|
from typing import TYPE_CHECKING, Generator, Literal
|
||||||
|
|
||||||
import charset_normalizer
|
import charset_normalizer
|
||||||
import requests
|
import requests
|
||||||
@@ -13,13 +13,14 @@ from requests.adapters import HTTPAdapter, Retry
|
|||||||
|
|
||||||
from autogpt.commands.command import command
|
from autogpt.commands.command import command
|
||||||
from autogpt.commands.file_operations_utils import read_textual_file
|
from autogpt.commands.file_operations_utils import read_textual_file
|
||||||
from autogpt.config import Config
|
|
||||||
from autogpt.logs import logger
|
from autogpt.logs import logger
|
||||||
from autogpt.memory.vector import MemoryItem, VectorMemory
|
from autogpt.memory.vector import MemoryItem, VectorMemory
|
||||||
from autogpt.spinner import Spinner
|
from autogpt.spinner import Spinner
|
||||||
from autogpt.utils import readable_file_size
|
from autogpt.utils import readable_file_size
|
||||||
|
|
||||||
CFG = Config()
|
if TYPE_CHECKING:
|
||||||
|
from autogpt.config import Config
|
||||||
|
|
||||||
|
|
||||||
Operation = Literal["write", "append", "delete"]
|
Operation = Literal["write", "append", "delete"]
|
||||||
|
|
||||||
@@ -60,7 +61,7 @@ def operations_from_log(
|
|||||||
def file_operations_state(log_path: str) -> dict[str, str]:
|
def file_operations_state(log_path: str) -> dict[str, str]:
|
||||||
"""Iterates over the operations log and returns the expected state.
|
"""Iterates over the operations log and returns the expected state.
|
||||||
|
|
||||||
Parses a log file at CFG.file_logger_path to construct a dictionary that maps
|
Parses a log file at config.file_logger_path to construct a dictionary that maps
|
||||||
each file path written or appended to its checksum. Deleted files are removed
|
each file path written or appended to its checksum. Deleted files are removed
|
||||||
from the dictionary.
|
from the dictionary.
|
||||||
|
|
||||||
@@ -68,7 +69,7 @@ def file_operations_state(log_path: str) -> dict[str, str]:
|
|||||||
A dictionary mapping file paths to their checksums.
|
A dictionary mapping file paths to their checksums.
|
||||||
|
|
||||||
Raises:
|
Raises:
|
||||||
FileNotFoundError: If CFG.file_logger_path is not found.
|
FileNotFoundError: If config.file_logger_path is not found.
|
||||||
ValueError: If the log file content is not in the expected format.
|
ValueError: If the log file content is not in the expected format.
|
||||||
"""
|
"""
|
||||||
state = {}
|
state = {}
|
||||||
@@ -81,7 +82,7 @@ def file_operations_state(log_path: str) -> dict[str, str]:
|
|||||||
|
|
||||||
|
|
||||||
def is_duplicate_operation(
|
def is_duplicate_operation(
|
||||||
operation: Operation, filename: str, checksum: str | None = None
|
operation: Operation, filename: str, config: Config, checksum: str | None = None
|
||||||
) -> bool:
|
) -> bool:
|
||||||
"""Check if the operation has already been performed
|
"""Check if the operation has already been performed
|
||||||
|
|
||||||
@@ -93,7 +94,7 @@ def is_duplicate_operation(
|
|||||||
Returns:
|
Returns:
|
||||||
True if the operation has already been performed on the file
|
True if the operation has already been performed on the file
|
||||||
"""
|
"""
|
||||||
state = file_operations_state(CFG.file_logger_path)
|
state = file_operations_state(config.file_logger_path)
|
||||||
if operation == "delete" and filename not in state:
|
if operation == "delete" and filename not in state:
|
||||||
return True
|
return True
|
||||||
if operation == "write" and state.get(filename) == checksum:
|
if operation == "write" and state.get(filename) == checksum:
|
||||||
@@ -101,7 +102,9 @@ def is_duplicate_operation(
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def log_operation(operation: str, filename: str, checksum: str | None = None) -> None:
|
def log_operation(
|
||||||
|
operation: str, filename: str, config: Config, checksum: str | None = None
|
||||||
|
) -> None:
|
||||||
"""Log the file operation to the file_logger.txt
|
"""Log the file operation to the file_logger.txt
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -113,7 +116,7 @@ def log_operation(operation: str, filename: str, checksum: str | None = None) ->
|
|||||||
if checksum is not None:
|
if checksum is not None:
|
||||||
log_entry += f" #{checksum}"
|
log_entry += f" #{checksum}"
|
||||||
logger.debug(f"Logging file operation: {log_entry}")
|
logger.debug(f"Logging file operation: {log_entry}")
|
||||||
append_to_file(CFG.file_logger_path, f"{log_entry}\n", should_log=False)
|
append_to_file(config.file_logger_path, f"{log_entry}\n", config, should_log=False)
|
||||||
|
|
||||||
|
|
||||||
def split_file(
|
def split_file(
|
||||||
@@ -149,7 +152,7 @@ def split_file(
|
|||||||
|
|
||||||
|
|
||||||
@command("read_file", "Read a file", '"filename": "<filename>"')
|
@command("read_file", "Read a file", '"filename": "<filename>"')
|
||||||
def read_file(filename: str) -> str:
|
def read_file(filename: str, config: Config) -> str:
|
||||||
"""Read a file and return the contents
|
"""Read a file and return the contents
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -198,7 +201,7 @@ def ingest_file(
|
|||||||
|
|
||||||
|
|
||||||
@command("write_to_file", "Write to file", '"filename": "<filename>", "text": "<text>"')
|
@command("write_to_file", "Write to file", '"filename": "<filename>", "text": "<text>"')
|
||||||
def write_to_file(filename: str, text: str) -> str:
|
def write_to_file(filename: str, text: str, config: Config) -> str:
|
||||||
"""Write text to a file
|
"""Write text to a file
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -209,14 +212,14 @@ def write_to_file(filename: str, text: str) -> str:
|
|||||||
str: A message indicating success or failure
|
str: A message indicating success or failure
|
||||||
"""
|
"""
|
||||||
checksum = text_checksum(text)
|
checksum = text_checksum(text)
|
||||||
if is_duplicate_operation("write", filename, checksum):
|
if is_duplicate_operation("write", filename, config, checksum):
|
||||||
return "Error: File has already been updated."
|
return "Error: File has already been updated."
|
||||||
try:
|
try:
|
||||||
directory = os.path.dirname(filename)
|
directory = os.path.dirname(filename)
|
||||||
os.makedirs(directory, exist_ok=True)
|
os.makedirs(directory, exist_ok=True)
|
||||||
with open(filename, "w", encoding="utf-8") as f:
|
with open(filename, "w", encoding="utf-8") as f:
|
||||||
f.write(text)
|
f.write(text)
|
||||||
log_operation("write", filename, checksum)
|
log_operation("write", filename, config, checksum)
|
||||||
return "File written to successfully."
|
return "File written to successfully."
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
return f"Error: {err}"
|
return f"Error: {err}"
|
||||||
@@ -225,7 +228,9 @@ def write_to_file(filename: str, text: str) -> str:
|
|||||||
@command(
|
@command(
|
||||||
"append_to_file", "Append to file", '"filename": "<filename>", "text": "<text>"'
|
"append_to_file", "Append to file", '"filename": "<filename>", "text": "<text>"'
|
||||||
)
|
)
|
||||||
def append_to_file(filename: str, text: str, should_log: bool = True) -> str:
|
def append_to_file(
|
||||||
|
filename: str, text: str, config: Config, should_log: bool = True
|
||||||
|
) -> str:
|
||||||
"""Append text to a file
|
"""Append text to a file
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -245,7 +250,7 @@ def append_to_file(filename: str, text: str, should_log: bool = True) -> str:
|
|||||||
if should_log:
|
if should_log:
|
||||||
with open(filename, "r", encoding="utf-8") as f:
|
with open(filename, "r", encoding="utf-8") as f:
|
||||||
checksum = text_checksum(f.read())
|
checksum = text_checksum(f.read())
|
||||||
log_operation("append", filename, checksum=checksum)
|
log_operation("append", filename, config, checksum=checksum)
|
||||||
|
|
||||||
return "Text appended successfully."
|
return "Text appended successfully."
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
@@ -253,7 +258,7 @@ def append_to_file(filename: str, text: str, should_log: bool = True) -> str:
|
|||||||
|
|
||||||
|
|
||||||
@command("delete_file", "Delete file", '"filename": "<filename>"')
|
@command("delete_file", "Delete file", '"filename": "<filename>"')
|
||||||
def delete_file(filename: str) -> str:
|
def delete_file(filename: str, config: Config) -> str:
|
||||||
"""Delete a file
|
"""Delete a file
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -262,18 +267,18 @@ def delete_file(filename: str) -> str:
|
|||||||
Returns:
|
Returns:
|
||||||
str: A message indicating success or failure
|
str: A message indicating success or failure
|
||||||
"""
|
"""
|
||||||
if is_duplicate_operation("delete", filename):
|
if is_duplicate_operation("delete", filename, config):
|
||||||
return "Error: File has already been deleted."
|
return "Error: File has already been deleted."
|
||||||
try:
|
try:
|
||||||
os.remove(filename)
|
os.remove(filename)
|
||||||
log_operation("delete", filename)
|
log_operation("delete", filename, config)
|
||||||
return "File deleted successfully."
|
return "File deleted successfully."
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
return f"Error: {err}"
|
return f"Error: {err}"
|
||||||
|
|
||||||
|
|
||||||
@command("list_files", "List Files in Directory", '"directory": "<directory>"')
|
@command("list_files", "List Files in Directory", '"directory": "<directory>"')
|
||||||
def list_files(directory: str) -> list[str]:
|
def list_files(directory: str, config: Config) -> list[str]:
|
||||||
"""lists files in a directory recursively
|
"""lists files in a directory recursively
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -289,7 +294,7 @@ def list_files(directory: str) -> list[str]:
|
|||||||
if file.startswith("."):
|
if file.startswith("."):
|
||||||
continue
|
continue
|
||||||
relative_path = os.path.relpath(
|
relative_path = os.path.relpath(
|
||||||
os.path.join(root, file), CFG.workspace_path
|
os.path.join(root, file), config.workspace_path
|
||||||
)
|
)
|
||||||
found_files.append(relative_path)
|
found_files.append(relative_path)
|
||||||
|
|
||||||
@@ -300,10 +305,10 @@ def list_files(directory: str) -> list[str]:
|
|||||||
"download_file",
|
"download_file",
|
||||||
"Download File",
|
"Download File",
|
||||||
'"url": "<url>", "filename": "<filename>"',
|
'"url": "<url>", "filename": "<filename>"',
|
||||||
CFG.allow_downloads,
|
lambda config: config.allow_downloads,
|
||||||
"Error: You do not have user authorization to download files locally.",
|
"Error: You do not have user authorization to download files locally.",
|
||||||
)
|
)
|
||||||
def download_file(url, filename):
|
def download_file(url, filename, config: Config):
|
||||||
"""Downloads a file
|
"""Downloads a file
|
||||||
Args:
|
Args:
|
||||||
url (str): URL of the file to download
|
url (str): URL of the file to download
|
||||||
|
|||||||
@@ -14,13 +14,13 @@ from autogpt.logs import logger
|
|||||||
|
|
||||||
|
|
||||||
class ParserStrategy:
|
class ParserStrategy:
|
||||||
def read(self, file_path: str):
|
def read(self, file_path: str) -> str:
|
||||||
raise NotImplementedError
|
raise NotImplementedError
|
||||||
|
|
||||||
|
|
||||||
# Basic text file reading
|
# Basic text file reading
|
||||||
class TXTParser(ParserStrategy):
|
class TXTParser(ParserStrategy):
|
||||||
def read(self, file_path):
|
def read(self, file_path: str) -> str:
|
||||||
charset_match = charset_normalizer.from_path(file_path).best()
|
charset_match = charset_normalizer.from_path(file_path).best()
|
||||||
logger.debug(f"Reading '{file_path}' with encoding '{charset_match.encoding}'")
|
logger.debug(f"Reading '{file_path}' with encoding '{charset_match.encoding}'")
|
||||||
return str(charset_match)
|
return str(charset_match)
|
||||||
@@ -28,7 +28,7 @@ class TXTParser(ParserStrategy):
|
|||||||
|
|
||||||
# Reading text from binary file using pdf parser
|
# Reading text from binary file using pdf parser
|
||||||
class PDFParser(ParserStrategy):
|
class PDFParser(ParserStrategy):
|
||||||
def read(self, file_path):
|
def read(self, file_path: str) -> str:
|
||||||
parser = PyPDF2.PdfReader(file_path)
|
parser = PyPDF2.PdfReader(file_path)
|
||||||
text = ""
|
text = ""
|
||||||
for page_idx in range(len(parser.pages)):
|
for page_idx in range(len(parser.pages)):
|
||||||
@@ -38,7 +38,7 @@ class PDFParser(ParserStrategy):
|
|||||||
|
|
||||||
# Reading text from binary file using docs parser
|
# Reading text from binary file using docs parser
|
||||||
class DOCXParser(ParserStrategy):
|
class DOCXParser(ParserStrategy):
|
||||||
def read(self, file_path):
|
def read(self, file_path: str) -> str:
|
||||||
doc_file = docx.Document(file_path)
|
doc_file = docx.Document(file_path)
|
||||||
text = ""
|
text = ""
|
||||||
for para in doc_file.paragraphs:
|
for para in doc_file.paragraphs:
|
||||||
@@ -48,7 +48,7 @@ class DOCXParser(ParserStrategy):
|
|||||||
|
|
||||||
# Reading as dictionary and returning string format
|
# Reading as dictionary and returning string format
|
||||||
class JSONParser(ParserStrategy):
|
class JSONParser(ParserStrategy):
|
||||||
def read(self, file_path):
|
def read(self, file_path: str) -> str:
|
||||||
with open(file_path, "r") as f:
|
with open(file_path, "r") as f:
|
||||||
data = json.load(f)
|
data = json.load(f)
|
||||||
text = str(data)
|
text = str(data)
|
||||||
@@ -56,7 +56,7 @@ class JSONParser(ParserStrategy):
|
|||||||
|
|
||||||
|
|
||||||
class XMLParser(ParserStrategy):
|
class XMLParser(ParserStrategy):
|
||||||
def read(self, file_path):
|
def read(self, file_path: str) -> str:
|
||||||
with open(file_path, "r") as f:
|
with open(file_path, "r") as f:
|
||||||
soup = BeautifulSoup(f, "xml")
|
soup = BeautifulSoup(f, "xml")
|
||||||
text = soup.get_text()
|
text = soup.get_text()
|
||||||
@@ -65,7 +65,7 @@ class XMLParser(ParserStrategy):
|
|||||||
|
|
||||||
# Reading as dictionary and returning string format
|
# Reading as dictionary and returning string format
|
||||||
class YAMLParser(ParserStrategy):
|
class YAMLParser(ParserStrategy):
|
||||||
def read(self, file_path):
|
def read(self, file_path: str) -> str:
|
||||||
with open(file_path, "r") as f:
|
with open(file_path, "r") as f:
|
||||||
data = yaml.load(f, Loader=yaml.FullLoader)
|
data = yaml.load(f, Loader=yaml.FullLoader)
|
||||||
text = str(data)
|
text = str(data)
|
||||||
@@ -73,7 +73,7 @@ class YAMLParser(ParserStrategy):
|
|||||||
|
|
||||||
|
|
||||||
class HTMLParser(ParserStrategy):
|
class HTMLParser(ParserStrategy):
|
||||||
def read(self, file_path):
|
def read(self, file_path: str) -> str:
|
||||||
with open(file_path, "r") as f:
|
with open(file_path, "r") as f:
|
||||||
soup = BeautifulSoup(f, "html.parser")
|
soup = BeautifulSoup(f, "html.parser")
|
||||||
text = soup.get_text()
|
text = soup.get_text()
|
||||||
@@ -81,7 +81,7 @@ class HTMLParser(ParserStrategy):
|
|||||||
|
|
||||||
|
|
||||||
class MarkdownParser(ParserStrategy):
|
class MarkdownParser(ParserStrategy):
|
||||||
def read(self, file_path):
|
def read(self, file_path: str) -> str:
|
||||||
with open(file_path, "r") as f:
|
with open(file_path, "r") as f:
|
||||||
html = markdown.markdown(f.read())
|
html = markdown.markdown(f.read())
|
||||||
text = "".join(BeautifulSoup(html, "html.parser").findAll(string=True))
|
text = "".join(BeautifulSoup(html, "html.parser").findAll(string=True))
|
||||||
@@ -89,7 +89,7 @@ class MarkdownParser(ParserStrategy):
|
|||||||
|
|
||||||
|
|
||||||
class LaTeXParser(ParserStrategy):
|
class LaTeXParser(ParserStrategy):
|
||||||
def read(self, file_path):
|
def read(self, file_path: str) -> str:
|
||||||
with open(file_path, "r") as f:
|
with open(file_path, "r") as f:
|
||||||
latex = f.read()
|
latex = f.read()
|
||||||
text = LatexNodes2Text().latex_to_text(latex)
|
text = LatexNodes2Text().latex_to_text(latex)
|
||||||
@@ -101,11 +101,11 @@ class FileContext:
|
|||||||
self.parser = parser
|
self.parser = parser
|
||||||
self.logger = logger
|
self.logger = logger
|
||||||
|
|
||||||
def set_parser(self, parser: ParserStrategy):
|
def set_parser(self, parser: ParserStrategy) -> None:
|
||||||
self.logger.debug(f"Setting Context Parser to {parser}")
|
self.logger.debug(f"Setting Context Parser to {parser}")
|
||||||
self.parser = parser
|
self.parser = parser
|
||||||
|
|
||||||
def read_file(self, file_path):
|
def read_file(self, file_path) -> str:
|
||||||
self.logger.debug(f"Reading file {file_path} with parser {self.parser}")
|
self.logger.debug(f"Reading file {file_path} with parser {self.parser}")
|
||||||
return self.parser.read(file_path)
|
return self.parser.read(file_path)
|
||||||
|
|
||||||
@@ -144,7 +144,7 @@ def is_file_binary_fn(file_path: str):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def read_textual_file(file_path: str, logger: logs.Logger):
|
def read_textual_file(file_path: str, logger: logs.Logger) -> str:
|
||||||
if not os.path.isfile(file_path):
|
if not os.path.isfile(file_path):
|
||||||
raise FileNotFoundError(f"{file_path} not found!")
|
raise FileNotFoundError(f"{file_path} not found!")
|
||||||
is_binary = is_file_binary_fn(file_path)
|
is_binary = is_file_binary_fn(file_path)
|
||||||
|
|||||||
@@ -1,22 +1,25 @@
|
|||||||
"""Git operations for autogpt"""
|
"""Git operations for autogpt"""
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
from git.repo import Repo
|
from git.repo import Repo
|
||||||
|
|
||||||
from autogpt.commands.command import command
|
from autogpt.commands.command import command
|
||||||
from autogpt.config import Config
|
from autogpt.config import Config
|
||||||
from autogpt.url_utils.validators import validate_url
|
from autogpt.url_utils.validators import validate_url
|
||||||
|
|
||||||
CFG = Config()
|
if TYPE_CHECKING:
|
||||||
|
from autogpt.config import Config
|
||||||
|
|
||||||
|
|
||||||
@command(
|
@command(
|
||||||
"clone_repository",
|
"clone_repository",
|
||||||
"Clone Repository",
|
"Clone Repository",
|
||||||
'"url": "<repository_url>", "clone_path": "<clone_path>"',
|
'"url": "<repository_url>", "clone_path": "<clone_path>"',
|
||||||
CFG.github_username and CFG.github_api_key,
|
lambda config: config.github_username and config.github_api_key,
|
||||||
"Configure github_username and github_api_key.",
|
"Configure github_username and github_api_key.",
|
||||||
)
|
)
|
||||||
@validate_url
|
@validate_url
|
||||||
def clone_repository(url: str, clone_path: str) -> str:
|
def clone_repository(url: str, clone_path: str, config: Config) -> str:
|
||||||
"""Clone a GitHub repository locally.
|
"""Clone a GitHub repository locally.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -27,7 +30,9 @@ def clone_repository(url: str, clone_path: str) -> str:
|
|||||||
str: The result of the clone operation.
|
str: The result of the clone operation.
|
||||||
"""
|
"""
|
||||||
split_url = url.split("//")
|
split_url = url.split("//")
|
||||||
auth_repo_url = f"//{CFG.github_username}:{CFG.github_api_key}@".join(split_url)
|
auth_repo_url = f"//{config.github_username}:{config.github_api_key}@".join(
|
||||||
|
split_url
|
||||||
|
)
|
||||||
try:
|
try:
|
||||||
Repo.clone_from(url=auth_repo_url, to_path=clone_path)
|
Repo.clone_from(url=auth_repo_url, to_path=clone_path)
|
||||||
return f"""Cloned {url} to {clone_path}"""
|
return f"""Cloned {url} to {clone_path}"""
|
||||||
|
|||||||
@@ -3,17 +3,23 @@ from __future__ import annotations
|
|||||||
|
|
||||||
import json
|
import json
|
||||||
from itertools import islice
|
from itertools import islice
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
from duckduckgo_search import DDGS
|
from duckduckgo_search import DDGS
|
||||||
|
|
||||||
from autogpt.commands.command import command
|
from autogpt.commands.command import command
|
||||||
from autogpt.config import Config
|
|
||||||
|
|
||||||
CFG = Config()
|
if TYPE_CHECKING:
|
||||||
|
from autogpt.config import Config
|
||||||
|
|
||||||
|
|
||||||
@command("google", "Google Search", '"query": "<query>"', not CFG.google_api_key)
|
@command(
|
||||||
def google_search(query: str, num_results: int = 8) -> str:
|
"google",
|
||||||
|
"Google Search",
|
||||||
|
'"query": "<query>"',
|
||||||
|
lambda config: not config.google_api_key,
|
||||||
|
)
|
||||||
|
def google_search(query: str, config: Config, num_results: int = 8) -> str:
|
||||||
"""Return the results of a Google search
|
"""Return the results of a Google search
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -42,10 +48,12 @@ def google_search(query: str, num_results: int = 8) -> str:
|
|||||||
"google",
|
"google",
|
||||||
"Google Search",
|
"Google Search",
|
||||||
'"query": "<query>"',
|
'"query": "<query>"',
|
||||||
bool(CFG.google_api_key) and bool(CFG.custom_search_engine_id),
|
lambda config: bool(config.google_api_key) and bool(config.custom_search_engine_id),
|
||||||
"Configure google_api_key and custom_search_engine_id.",
|
"Configure google_api_key and custom_search_engine_id.",
|
||||||
)
|
)
|
||||||
def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
|
def google_official_search(
|
||||||
|
query: str, config: Config, num_results: int = 8
|
||||||
|
) -> str | list[str]:
|
||||||
"""Return the results of a Google search using the official Google API
|
"""Return the results of a Google search using the official Google API
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -61,8 +69,8 @@ def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
# Get the Google API key and Custom Search Engine ID from the config file
|
# Get the Google API key and Custom Search Engine ID from the config file
|
||||||
api_key = CFG.google_api_key
|
api_key = config.google_api_key
|
||||||
custom_search_engine_id = CFG.custom_search_engine_id
|
custom_search_engine_id = config.custom_search_engine_id
|
||||||
|
|
||||||
# Initialize the Custom Search API service
|
# Initialize the Custom Search API service
|
||||||
service = build("customsearch", "v1", developerKey=api_key)
|
service = build("customsearch", "v1", developerKey=api_key)
|
||||||
|
|||||||
@@ -4,6 +4,7 @@ import json
|
|||||||
import time
|
import time
|
||||||
import uuid
|
import uuid
|
||||||
from base64 import b64decode
|
from base64 import b64decode
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
import openai
|
import openai
|
||||||
import requests
|
import requests
|
||||||
@@ -13,11 +14,18 @@ from autogpt.commands.command import command
|
|||||||
from autogpt.config import Config
|
from autogpt.config import Config
|
||||||
from autogpt.logs import logger
|
from autogpt.logs import logger
|
||||||
|
|
||||||
CFG = Config()
|
if TYPE_CHECKING:
|
||||||
|
from autogpt.config import Config
|
||||||
|
|
||||||
|
|
||||||
@command("generate_image", "Generate Image", '"prompt": "<prompt>"', CFG.image_provider)
|
@command(
|
||||||
def generate_image(prompt: str, size: int = 256) -> str:
|
"generate_image",
|
||||||
|
"Generate Image",
|
||||||
|
'"prompt": "<prompt>"',
|
||||||
|
lambda config: config.image_provider,
|
||||||
|
"Requires a image provider to be set.",
|
||||||
|
)
|
||||||
|
def generate_image(prompt: str, config: Config, size: int = 256) -> str:
|
||||||
"""Generate an image from a prompt.
|
"""Generate an image from a prompt.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -27,21 +35,21 @@ def generate_image(prompt: str, size: int = 256) -> str:
|
|||||||
Returns:
|
Returns:
|
||||||
str: The filename of the image
|
str: The filename of the image
|
||||||
"""
|
"""
|
||||||
filename = f"{CFG.workspace_path}/{str(uuid.uuid4())}.jpg"
|
filename = f"{config.workspace_path}/{str(uuid.uuid4())}.jpg"
|
||||||
|
|
||||||
# DALL-E
|
# DALL-E
|
||||||
if CFG.image_provider == "dalle":
|
if config.image_provider == "dalle":
|
||||||
return generate_image_with_dalle(prompt, filename, size)
|
return generate_image_with_dalle(prompt, filename, size, config)
|
||||||
# HuggingFace
|
# HuggingFace
|
||||||
elif CFG.image_provider == "huggingface":
|
elif config.image_provider == "huggingface":
|
||||||
return generate_image_with_hf(prompt, filename)
|
return generate_image_with_hf(prompt, filename, config)
|
||||||
# SD WebUI
|
# SD WebUI
|
||||||
elif CFG.image_provider == "sdwebui":
|
elif config.image_provider == "sdwebui":
|
||||||
return generate_image_with_sd_webui(prompt, filename, size)
|
return generate_image_with_sd_webui(prompt, filename, config, size)
|
||||||
return "No Image Provider Set"
|
return "No Image Provider Set"
|
||||||
|
|
||||||
|
|
||||||
def generate_image_with_hf(prompt: str, filename: str) -> str:
|
def generate_image_with_hf(prompt: str, filename: str, config: Config) -> str:
|
||||||
"""Generate an image with HuggingFace's API.
|
"""Generate an image with HuggingFace's API.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -52,14 +60,14 @@ def generate_image_with_hf(prompt: str, filename: str) -> str:
|
|||||||
str: The filename of the image
|
str: The filename of the image
|
||||||
"""
|
"""
|
||||||
API_URL = (
|
API_URL = (
|
||||||
f"https://api-inference.huggingface.co/models/{CFG.huggingface_image_model}"
|
f"https://api-inference.huggingface.co/models/{config.huggingface_image_model}"
|
||||||
)
|
)
|
||||||
if CFG.huggingface_api_token is None:
|
if config.huggingface_api_token is None:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"You need to set your Hugging Face API token in the config file."
|
"You need to set your Hugging Face API token in the config file."
|
||||||
)
|
)
|
||||||
headers = {
|
headers = {
|
||||||
"Authorization": f"Bearer {CFG.huggingface_api_token}",
|
"Authorization": f"Bearer {config.huggingface_api_token}",
|
||||||
"X-Use-Cache": "false",
|
"X-Use-Cache": "false",
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -101,7 +109,9 @@ def generate_image_with_hf(prompt: str, filename: str) -> str:
|
|||||||
return f"Error creating image."
|
return f"Error creating image."
|
||||||
|
|
||||||
|
|
||||||
def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str:
|
def generate_image_with_dalle(
|
||||||
|
prompt: str, filename: str, size: int, config: Config
|
||||||
|
) -> str:
|
||||||
"""Generate an image with DALL-E.
|
"""Generate an image with DALL-E.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -126,7 +136,7 @@ def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str:
|
|||||||
n=1,
|
n=1,
|
||||||
size=f"{size}x{size}",
|
size=f"{size}x{size}",
|
||||||
response_format="b64_json",
|
response_format="b64_json",
|
||||||
api_key=CFG.openai_api_key,
|
api_key=config.openai_api_key,
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.info(f"Image Generated for prompt:{prompt}")
|
logger.info(f"Image Generated for prompt:{prompt}")
|
||||||
@@ -142,6 +152,7 @@ def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str:
|
|||||||
def generate_image_with_sd_webui(
|
def generate_image_with_sd_webui(
|
||||||
prompt: str,
|
prompt: str,
|
||||||
filename: str,
|
filename: str,
|
||||||
|
config: Config,
|
||||||
size: int = 512,
|
size: int = 512,
|
||||||
negative_prompt: str = "",
|
negative_prompt: str = "",
|
||||||
extra: dict = {},
|
extra: dict = {},
|
||||||
@@ -158,13 +169,13 @@ def generate_image_with_sd_webui(
|
|||||||
"""
|
"""
|
||||||
# Create a session and set the basic auth if needed
|
# Create a session and set the basic auth if needed
|
||||||
s = requests.Session()
|
s = requests.Session()
|
||||||
if CFG.sd_webui_auth:
|
if config.sd_webui_auth:
|
||||||
username, password = CFG.sd_webui_auth.split(":")
|
username, password = config.sd_webui_auth.split(":")
|
||||||
s.auth = (username, password or "")
|
s.auth = (username, password or "")
|
||||||
|
|
||||||
# Generate the images
|
# Generate the images
|
||||||
response = requests.post(
|
response = requests.post(
|
||||||
f"{CFG.sd_webui_url}/sdapi/v1/txt2img",
|
f"{config.sd_webui_url}/sdapi/v1/txt2img",
|
||||||
json={
|
json={
|
||||||
"prompt": prompt,
|
"prompt": prompt,
|
||||||
"negative_prompt": negative_prompt,
|
"negative_prompt": negative_prompt,
|
||||||
|
|||||||
@@ -1,17 +1,21 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import json
|
import json
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
from autogpt.commands.command import command
|
from autogpt.commands.command import command
|
||||||
from autogpt.llm.utils import call_ai_function
|
from autogpt.llm.utils import call_ai_function
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from autogpt.config import Config
|
||||||
|
|
||||||
|
|
||||||
@command(
|
@command(
|
||||||
"improve_code",
|
"improve_code",
|
||||||
"Get Improved Code",
|
"Get Improved Code",
|
||||||
'"suggestions": "<list_of_suggestions>", "code": "<full_code_string>"',
|
'"suggestions": "<list_of_suggestions>", "code": "<full_code_string>"',
|
||||||
)
|
)
|
||||||
def improve_code(suggestions: list[str], code: str) -> str:
|
def improve_code(suggestions: list[str], code: str, config: Config) -> str:
|
||||||
"""
|
"""
|
||||||
A function that takes in code and suggestions and returns a response from create
|
A function that takes in code and suggestions and returns a response from create
|
||||||
chat completion api call.
|
chat completion api call.
|
||||||
@@ -32,4 +36,4 @@ def improve_code(suggestions: list[str], code: str) -> str:
|
|||||||
" provided, making no other changes."
|
" provided, making no other changes."
|
||||||
)
|
)
|
||||||
|
|
||||||
return call_ai_function(function_string, args, description_string)
|
return call_ai_function(function_string, args, description_string, config=config)
|
||||||
|
|||||||
@@ -1,18 +1,21 @@
|
|||||||
"""Task Statuses module."""
|
"""Task Statuses module."""
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from typing import NoReturn
|
from typing import TYPE_CHECKING, NoReturn
|
||||||
|
|
||||||
from autogpt.commands.command import command
|
from autogpt.commands.command import command
|
||||||
from autogpt.logs import logger
|
from autogpt.logs import logger
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from autogpt.config import Config
|
||||||
|
|
||||||
|
|
||||||
@command(
|
@command(
|
||||||
"task_complete",
|
"task_complete",
|
||||||
"Task Complete (Shutdown)",
|
"Task Complete (Shutdown)",
|
||||||
'"reason": "<reason>"',
|
'"reason": "<reason>"',
|
||||||
)
|
)
|
||||||
def task_complete(reason: str) -> NoReturn:
|
def task_complete(reason: str, config: Config) -> NoReturn:
|
||||||
"""
|
"""
|
||||||
A function that takes in a string and exits the program
|
A function that takes in a string and exits the program
|
||||||
|
|
||||||
|
|||||||
@@ -9,15 +9,12 @@ from autogpt.config import Config
|
|||||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||||
from autogpt.url_utils.validators import validate_url
|
from autogpt.url_utils.validators import validate_url
|
||||||
|
|
||||||
CFG = Config()
|
|
||||||
|
|
||||||
session = requests.Session()
|
session = requests.Session()
|
||||||
session.headers.update({"User-Agent": CFG.user_agent})
|
|
||||||
|
|
||||||
|
|
||||||
@validate_url
|
@validate_url
|
||||||
def get_response(
|
def get_response(
|
||||||
url: str, timeout: int = 10
|
url: str, config: Config, timeout: int = 10
|
||||||
) -> tuple[None, str] | tuple[Response, None]:
|
) -> tuple[None, str] | tuple[Response, None]:
|
||||||
"""Get the response from a URL
|
"""Get the response from a URL
|
||||||
|
|
||||||
@@ -33,6 +30,7 @@ def get_response(
|
|||||||
requests.exceptions.RequestException: If the HTTP request fails
|
requests.exceptions.RequestException: If the HTTP request fails
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
|
session.headers.update({"User-Agent": config.user_agent})
|
||||||
response = session.get(url, timeout=timeout)
|
response = session.get(url, timeout=timeout)
|
||||||
|
|
||||||
# Check if the response contains an HTTP error
|
# Check if the response contains an HTTP error
|
||||||
@@ -50,7 +48,7 @@ def get_response(
|
|||||||
return None, f"Error: {str(re)}"
|
return None, f"Error: {str(re)}"
|
||||||
|
|
||||||
|
|
||||||
def scrape_text(url: str) -> str:
|
def scrape_text(url: str, config: Config) -> str:
|
||||||
"""Scrape text from a webpage
|
"""Scrape text from a webpage
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -59,7 +57,7 @@ def scrape_text(url: str) -> str:
|
|||||||
Returns:
|
Returns:
|
||||||
str: The scraped text
|
str: The scraped text
|
||||||
"""
|
"""
|
||||||
response, error_message = get_response(url)
|
response, error_message = get_response(url, config)
|
||||||
if error_message:
|
if error_message:
|
||||||
return error_message
|
return error_message
|
||||||
if not response:
|
if not response:
|
||||||
@@ -78,7 +76,7 @@ def scrape_text(url: str) -> str:
|
|||||||
return text
|
return text
|
||||||
|
|
||||||
|
|
||||||
def scrape_links(url: str) -> str | list[str]:
|
def scrape_links(url: str, config: Config) -> str | list[str]:
|
||||||
"""Scrape links from a webpage
|
"""Scrape links from a webpage
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -87,7 +85,7 @@ def scrape_links(url: str) -> str | list[str]:
|
|||||||
Returns:
|
Returns:
|
||||||
str | list[str]: The scraped links
|
str | list[str]: The scraped links
|
||||||
"""
|
"""
|
||||||
response, error_message = get_response(url)
|
response, error_message = get_response(url, config)
|
||||||
if error_message:
|
if error_message:
|
||||||
return error_message
|
return error_message
|
||||||
if not response:
|
if not response:
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ from __future__ import annotations
|
|||||||
import logging
|
import logging
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from sys import platform
|
from sys import platform
|
||||||
from typing import Optional, Type
|
from typing import TYPE_CHECKING, Optional, Type
|
||||||
|
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
from selenium.common.exceptions import WebDriverException
|
from selenium.common.exceptions import WebDriverException
|
||||||
@@ -28,17 +28,17 @@ from webdriver_manager.firefox import GeckoDriverManager
|
|||||||
from webdriver_manager.microsoft import EdgeChromiumDriverManager as EdgeDriverManager
|
from webdriver_manager.microsoft import EdgeChromiumDriverManager as EdgeDriverManager
|
||||||
|
|
||||||
from autogpt.commands.command import command
|
from autogpt.commands.command import command
|
||||||
from autogpt.config import Config
|
|
||||||
from autogpt.logs import logger
|
from autogpt.logs import logger
|
||||||
from autogpt.memory.vector import MemoryItem, NoMemory, get_memory
|
from autogpt.memory.vector import MemoryItem, NoMemory, get_memory
|
||||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||||
from autogpt.processing.text import summarize_text
|
|
||||||
from autogpt.url_utils.validators import validate_url
|
from autogpt.url_utils.validators import validate_url
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from autogpt.config import Config
|
||||||
|
|
||||||
BrowserOptions = ChromeOptions | EdgeOptions | FirefoxOptions | SafariOptions
|
BrowserOptions = ChromeOptions | EdgeOptions | FirefoxOptions | SafariOptions
|
||||||
|
|
||||||
FILE_DIR = Path(__file__).parent.parent
|
FILE_DIR = Path(__file__).parent.parent
|
||||||
CFG = Config()
|
|
||||||
|
|
||||||
|
|
||||||
@command(
|
@command(
|
||||||
@@ -47,7 +47,7 @@ CFG = Config()
|
|||||||
'"url": "<url>", "question": "<what_you_want_to_find_on_website>"',
|
'"url": "<url>", "question": "<what_you_want_to_find_on_website>"',
|
||||||
)
|
)
|
||||||
@validate_url
|
@validate_url
|
||||||
def browse_website(url: str, question: str) -> str:
|
def browse_website(url: str, question: str, config: Config) -> str:
|
||||||
"""Browse a website and return the answer and links to the user
|
"""Browse a website and return the answer and links to the user
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -58,7 +58,7 @@ def browse_website(url: str, question: str) -> str:
|
|||||||
Tuple[str, WebDriver]: The answer and links to the user and the webdriver
|
Tuple[str, WebDriver]: The answer and links to the user and the webdriver
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
driver, text = scrape_text_with_selenium(url)
|
driver, text = scrape_text_with_selenium(url, config)
|
||||||
except WebDriverException as e:
|
except WebDriverException as e:
|
||||||
# These errors are often quite long and include lots of context.
|
# These errors are often quite long and include lots of context.
|
||||||
# Just grab the first line.
|
# Just grab the first line.
|
||||||
@@ -66,7 +66,7 @@ def browse_website(url: str, question: str) -> str:
|
|||||||
return f"Error: {msg}"
|
return f"Error: {msg}"
|
||||||
|
|
||||||
add_header(driver)
|
add_header(driver)
|
||||||
summary = summarize_memorize_webpage(url, text, question, driver)
|
summary = summarize_memorize_webpage(url, text, question, config, driver)
|
||||||
links = scrape_links_with_selenium(driver, url)
|
links = scrape_links_with_selenium(driver, url)
|
||||||
|
|
||||||
# Limit links to 5
|
# Limit links to 5
|
||||||
@@ -76,7 +76,7 @@ def browse_website(url: str, question: str) -> str:
|
|||||||
return f"Answer gathered from website: {summary}\n\nLinks: {links}"
|
return f"Answer gathered from website: {summary}\n\nLinks: {links}"
|
||||||
|
|
||||||
|
|
||||||
def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
|
def scrape_text_with_selenium(url: str, config: Config) -> tuple[WebDriver, str]:
|
||||||
"""Scrape text from a website using selenium
|
"""Scrape text from a website using selenium
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
@@ -94,23 +94,23 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
|
|||||||
"safari": SafariOptions,
|
"safari": SafariOptions,
|
||||||
}
|
}
|
||||||
|
|
||||||
options: BrowserOptions = options_available[CFG.selenium_web_browser]()
|
options: BrowserOptions = options_available[config.selenium_web_browser]()
|
||||||
options.add_argument(
|
options.add_argument(
|
||||||
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36"
|
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36"
|
||||||
)
|
)
|
||||||
|
|
||||||
if CFG.selenium_web_browser == "firefox":
|
if config.selenium_web_browser == "firefox":
|
||||||
if CFG.selenium_headless:
|
if config.selenium_headless:
|
||||||
options.headless = True
|
options.headless = True
|
||||||
options.add_argument("--disable-gpu")
|
options.add_argument("--disable-gpu")
|
||||||
driver = FirefoxDriver(
|
driver = FirefoxDriver(
|
||||||
service=GeckoDriverService(GeckoDriverManager().install()), options=options
|
service=GeckoDriverService(GeckoDriverManager().install()), options=options
|
||||||
)
|
)
|
||||||
elif CFG.selenium_web_browser == "edge":
|
elif config.selenium_web_browser == "edge":
|
||||||
driver = EdgeDriver(
|
driver = EdgeDriver(
|
||||||
service=EdgeDriverService(EdgeDriverManager().install()), options=options
|
service=EdgeDriverService(EdgeDriverManager().install()), options=options
|
||||||
)
|
)
|
||||||
elif CFG.selenium_web_browser == "safari":
|
elif config.selenium_web_browser == "safari":
|
||||||
# Requires a bit more setup on the users end
|
# Requires a bit more setup on the users end
|
||||||
# See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari
|
# See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari
|
||||||
driver = SafariDriver(options=options)
|
driver = SafariDriver(options=options)
|
||||||
@@ -120,7 +120,7 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
|
|||||||
options.add_argument("--remote-debugging-port=9222")
|
options.add_argument("--remote-debugging-port=9222")
|
||||||
|
|
||||||
options.add_argument("--no-sandbox")
|
options.add_argument("--no-sandbox")
|
||||||
if CFG.selenium_headless:
|
if config.selenium_headless:
|
||||||
options.add_argument("--headless=new")
|
options.add_argument("--headless=new")
|
||||||
options.add_argument("--disable-gpu")
|
options.add_argument("--disable-gpu")
|
||||||
|
|
||||||
@@ -202,7 +202,11 @@ def add_header(driver: WebDriver) -> None:
|
|||||||
|
|
||||||
|
|
||||||
def summarize_memorize_webpage(
|
def summarize_memorize_webpage(
|
||||||
url: str, text: str, question: str, driver: Optional[WebDriver] = None
|
url: str,
|
||||||
|
text: str,
|
||||||
|
question: str,
|
||||||
|
config: Config,
|
||||||
|
driver: Optional[WebDriver] = None,
|
||||||
) -> str:
|
) -> str:
|
||||||
"""Summarize text using the OpenAI API
|
"""Summarize text using the OpenAI API
|
||||||
|
|
||||||
@@ -221,7 +225,7 @@ def summarize_memorize_webpage(
|
|||||||
text_length = len(text)
|
text_length = len(text)
|
||||||
logger.info(f"Text length: {text_length} characters")
|
logger.info(f"Text length: {text_length} characters")
|
||||||
|
|
||||||
memory = get_memory(CFG)
|
memory = get_memory(config)
|
||||||
|
|
||||||
new_memory = MemoryItem.from_webpage(text, url, question=question)
|
new_memory = MemoryItem.from_webpage(text, url, question=question)
|
||||||
memory.add(new_memory)
|
memory.add(new_memory)
|
||||||
|
|||||||
@@ -2,17 +2,21 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import json
|
import json
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
from autogpt.commands.command import command
|
from autogpt.commands.command import command
|
||||||
from autogpt.llm.utils import call_ai_function
|
from autogpt.llm.utils import call_ai_function
|
||||||
|
|
||||||
|
if TYPE_CHECKING:
|
||||||
|
from autogpt.config import Config
|
||||||
|
|
||||||
|
|
||||||
@command(
|
@command(
|
||||||
"write_tests",
|
"write_tests",
|
||||||
"Write Tests",
|
"Write Tests",
|
||||||
'"code": "<full_code_string>", "focus": "<list_of_focus_areas>"',
|
'"code": "<full_code_string>", "focus": "<list_of_focus_areas>"',
|
||||||
)
|
)
|
||||||
def write_tests(code: str, focus: list[str]) -> str:
|
def write_tests(code: str, focus: list[str], config: Config) -> str:
|
||||||
"""
|
"""
|
||||||
A function that takes in code and focus topics and returns a response from create
|
A function that takes in code and focus topics and returns a response from create
|
||||||
chat completion api call.
|
chat completion api call.
|
||||||
@@ -34,4 +38,4 @@ def write_tests(code: str, focus: list[str]) -> str:
|
|||||||
" specific areas if required."
|
" specific areas if required."
|
||||||
)
|
)
|
||||||
|
|
||||||
return call_ai_function(function_string, args, description_string)
|
return call_ai_function(function_string, args, description_string, config=config)
|
||||||
|
|||||||
@@ -7,13 +7,14 @@ from __future__ import annotations
|
|||||||
import os
|
import os
|
||||||
import platform
|
import platform
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from typing import Optional
|
from typing import TYPE_CHECKING, Optional
|
||||||
|
|
||||||
import distro
|
import distro
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from autogpt.commands.command import CommandRegistry
|
if TYPE_CHECKING:
|
||||||
from autogpt.prompts.generator import PromptGenerator
|
from autogpt.commands.command import CommandRegistry
|
||||||
|
from autogpt.prompts.generator import PromptGenerator
|
||||||
|
|
||||||
# Soon this will go in a folder where it remembers more stuff about the run(s)
|
# Soon this will go in a folder where it remembers more stuff about the run(s)
|
||||||
SAVE_FILE = str(Path(os.getcwd()) / "ai_settings.yaml")
|
SAVE_FILE = str(Path(os.getcwd()) / "ai_settings.yaml")
|
||||||
|
|||||||
@@ -1,19 +1,22 @@
|
|||||||
"""Configurator module."""
|
"""Configurator module."""
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
|
from typing import TYPE_CHECKING
|
||||||
|
|
||||||
import click
|
import click
|
||||||
from colorama import Back, Fore, Style
|
from colorama import Back, Fore, Style
|
||||||
|
|
||||||
from autogpt import utils
|
from autogpt import utils
|
||||||
from autogpt.config import Config
|
|
||||||
from autogpt.llm.utils import check_model
|
from autogpt.llm.utils import check_model
|
||||||
from autogpt.logs import logger
|
from autogpt.logs import logger
|
||||||
from autogpt.memory.vector import get_supported_memory_backends
|
from autogpt.memory.vector import get_supported_memory_backends
|
||||||
|
|
||||||
CFG = Config()
|
if TYPE_CHECKING:
|
||||||
|
from autogpt.config import Config
|
||||||
|
|
||||||
|
|
||||||
def create_config(
|
def create_config(
|
||||||
|
config: Config,
|
||||||
continuous: bool,
|
continuous: bool,
|
||||||
continuous_limit: int,
|
continuous_limit: int,
|
||||||
ai_settings_file: str,
|
ai_settings_file: str,
|
||||||
@@ -45,15 +48,15 @@ def create_config(
|
|||||||
allow_downloads (bool): Whether to allow Auto-GPT to download files natively
|
allow_downloads (bool): Whether to allow Auto-GPT to download files natively
|
||||||
skips_news (bool): Whether to suppress the output of latest news on startup
|
skips_news (bool): Whether to suppress the output of latest news on startup
|
||||||
"""
|
"""
|
||||||
CFG.set_debug_mode(False)
|
config.set_debug_mode(False)
|
||||||
CFG.set_continuous_mode(False)
|
config.set_continuous_mode(False)
|
||||||
CFG.set_speak_mode(False)
|
config.set_speak_mode(False)
|
||||||
CFG.set_fast_llm_model(check_model(CFG.fast_llm_model, "fast_llm_model"))
|
config.set_fast_llm_model(check_model(config.fast_llm_model, "fast_llm_model"))
|
||||||
CFG.set_smart_llm_model(check_model(CFG.smart_llm_model, "smart_llm_model"))
|
config.set_smart_llm_model(check_model(config.smart_llm_model, "smart_llm_model"))
|
||||||
|
|
||||||
if debug:
|
if debug:
|
||||||
logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
|
logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
|
||||||
CFG.set_debug_mode(True)
|
config.set_debug_mode(True)
|
||||||
|
|
||||||
if continuous:
|
if continuous:
|
||||||
logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED")
|
logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED")
|
||||||
@@ -64,13 +67,13 @@ def create_config(
|
|||||||
" cause your AI to run forever or carry out actions you would not usually"
|
" cause your AI to run forever or carry out actions you would not usually"
|
||||||
" authorise. Use at your own risk.",
|
" authorise. Use at your own risk.",
|
||||||
)
|
)
|
||||||
CFG.set_continuous_mode(True)
|
config.set_continuous_mode(True)
|
||||||
|
|
||||||
if continuous_limit:
|
if continuous_limit:
|
||||||
logger.typewriter_log(
|
logger.typewriter_log(
|
||||||
"Continuous Limit: ", Fore.GREEN, f"{continuous_limit}"
|
"Continuous Limit: ", Fore.GREEN, f"{continuous_limit}"
|
||||||
)
|
)
|
||||||
CFG.set_continuous_limit(continuous_limit)
|
config.set_continuous_limit(continuous_limit)
|
||||||
|
|
||||||
# Check if continuous limit is used without continuous mode
|
# Check if continuous limit is used without continuous mode
|
||||||
if continuous_limit and not continuous:
|
if continuous_limit and not continuous:
|
||||||
@@ -78,15 +81,15 @@ def create_config(
|
|||||||
|
|
||||||
if speak:
|
if speak:
|
||||||
logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED")
|
logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED")
|
||||||
CFG.set_speak_mode(True)
|
config.set_speak_mode(True)
|
||||||
|
|
||||||
if gpt3only:
|
if gpt3only:
|
||||||
logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
|
logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||||
CFG.set_smart_llm_model(CFG.fast_llm_model)
|
config.set_smart_llm_model(config.fast_llm_model)
|
||||||
|
|
||||||
if gpt4only:
|
if gpt4only:
|
||||||
logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
|
logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||||
CFG.set_fast_llm_model(CFG.smart_llm_model)
|
config.set_fast_llm_model(config.smart_llm_model)
|
||||||
|
|
||||||
if memory_type:
|
if memory_type:
|
||||||
supported_memory = get_supported_memory_backends()
|
supported_memory = get_supported_memory_backends()
|
||||||
@@ -97,13 +100,13 @@ def create_config(
|
|||||||
Fore.RED,
|
Fore.RED,
|
||||||
f"{supported_memory}",
|
f"{supported_memory}",
|
||||||
)
|
)
|
||||||
logger.typewriter_log("Defaulting to: ", Fore.YELLOW, CFG.memory_backend)
|
logger.typewriter_log("Defaulting to: ", Fore.YELLOW, config.memory_backend)
|
||||||
else:
|
else:
|
||||||
CFG.memory_backend = chosen
|
config.memory_backend = chosen
|
||||||
|
|
||||||
if skip_reprompt:
|
if skip_reprompt:
|
||||||
logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED")
|
logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED")
|
||||||
CFG.skip_reprompt = True
|
config.skip_reprompt = True
|
||||||
|
|
||||||
if ai_settings_file:
|
if ai_settings_file:
|
||||||
file = ai_settings_file
|
file = ai_settings_file
|
||||||
@@ -116,8 +119,8 @@ def create_config(
|
|||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
logger.typewriter_log("Using AI Settings File:", Fore.GREEN, file)
|
logger.typewriter_log("Using AI Settings File:", Fore.GREEN, file)
|
||||||
CFG.ai_settings_file = file
|
config.ai_settings_file = file
|
||||||
CFG.skip_reprompt = True
|
config.skip_reprompt = True
|
||||||
|
|
||||||
if prompt_settings_file:
|
if prompt_settings_file:
|
||||||
file = prompt_settings_file
|
file = prompt_settings_file
|
||||||
@@ -130,10 +133,10 @@ def create_config(
|
|||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
logger.typewriter_log("Using Prompt Settings File:", Fore.GREEN, file)
|
logger.typewriter_log("Using Prompt Settings File:", Fore.GREEN, file)
|
||||||
CFG.prompt_settings_file = file
|
config.prompt_settings_file = file
|
||||||
|
|
||||||
if browser_name:
|
if browser_name:
|
||||||
CFG.selenium_web_browser = browser_name
|
config.selenium_web_browser = browser_name
|
||||||
|
|
||||||
if allow_downloads:
|
if allow_downloads:
|
||||||
logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED")
|
logger.typewriter_log("Native Downloading:", Fore.GREEN, "ENABLED")
|
||||||
@@ -148,7 +151,7 @@ def create_config(
|
|||||||
Fore.YELLOW,
|
Fore.YELLOW,
|
||||||
f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}",
|
f"{Back.RED + Style.BRIGHT}ALWAYS REMEMBER TO NEVER OPEN FILES YOU AREN'T SURE OF!{Style.RESET_ALL}",
|
||||||
)
|
)
|
||||||
CFG.allow_downloads = True
|
config.allow_downloads = True
|
||||||
|
|
||||||
if skip_news:
|
if skip_news:
|
||||||
CFG.skip_news = True
|
config.skip_news = True
|
||||||
|
|||||||
@@ -109,7 +109,11 @@ def retry_openai_api(
|
|||||||
|
|
||||||
|
|
||||||
def call_ai_function(
|
def call_ai_function(
|
||||||
function: str, args: list, description: str, model: str | None = None
|
function: str,
|
||||||
|
args: list,
|
||||||
|
description: str,
|
||||||
|
model: str | None = None,
|
||||||
|
config: Config = None,
|
||||||
) -> str:
|
) -> str:
|
||||||
"""Call an AI function
|
"""Call an AI function
|
||||||
|
|
||||||
@@ -125,9 +129,8 @@ def call_ai_function(
|
|||||||
Returns:
|
Returns:
|
||||||
str: The response from the function
|
str: The response from the function
|
||||||
"""
|
"""
|
||||||
cfg = Config()
|
|
||||||
if model is None:
|
if model is None:
|
||||||
model = cfg.smart_llm_model
|
model = config.smart_llm_model
|
||||||
# For each arg, if any are None, convert to "None":
|
# For each arg, if any are None, convert to "None":
|
||||||
args = [str(arg) if arg is not None else "None" for arg in args]
|
args = [str(arg) if arg is not None else "None" for arg in args]
|
||||||
# parse args to comma separated string
|
# parse args to comma separated string
|
||||||
|
|||||||
@@ -49,6 +49,7 @@ def run_auto_gpt(
|
|||||||
check_openai_api_key()
|
check_openai_api_key()
|
||||||
|
|
||||||
create_config(
|
create_config(
|
||||||
|
cfg,
|
||||||
continuous,
|
continuous,
|
||||||
continuous_limit,
|
continuous_limit,
|
||||||
ai_settings,
|
ai_settings,
|
||||||
|
|||||||
@@ -40,6 +40,10 @@ auto-gpt-plugin-template @ git+https://github.com/Significant-Gravitas/Auto-GPT-
|
|||||||
mkdocs
|
mkdocs
|
||||||
pymdown-extensions
|
pymdown-extensions
|
||||||
mypy
|
mypy
|
||||||
|
types-Markdown
|
||||||
|
types-beautifulsoup4
|
||||||
|
types-colorama
|
||||||
|
types-Pillow
|
||||||
|
|
||||||
# OpenAI and Generic plugins import
|
# OpenAI and Generic plugins import
|
||||||
openapi-python-client==0.13.4
|
openapi-python-client==0.13.4
|
||||||
|
|||||||
@@ -1098,6 +1098,399 @@ interactions:
|
|||||||
status:
|
status:
|
||||||
code: 200
|
code: 200
|
||||||
message: OK
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||||
|
are write_to_file-GPT, an AI designed to use the write_to_file command to write
|
||||||
|
''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete
|
||||||
|
command to complete the task.\nYour decisions must always be made independently
|
||||||
|
without seeking user assistance. Play to your strengths as an LLM and pursue
|
||||||
|
simple strategies with no legal complications.\n\nGOALS:\n\n1. Use the write_to_file
|
||||||
|
command to write ''Hello World'' into a file named \"hello_world.txt\".\n2.
|
||||||
|
Use the task_complete command to complete the task.\n3. Do not use any other
|
||||||
|
commands.\n\n\nConstraints:\n1. ~4000 word limit for short term memory. Your
|
||||||
|
short term memory is short, so immediately save important information to files.\n2.
|
||||||
|
If you are unsure how you previously did something or want to recall past events,
|
||||||
|
thinking about similar events will help you remember.\n3. No user assistance\n4.
|
||||||
|
Exclusively use the commands listed below e.g. command_name\n\nCommands:\n1.
|
||||||
|
append_to_file: Append to file, args: \"filename\": \"<filename>\", \"text\":
|
||||||
|
\"<text>\"\n2. delete_file: Delete file, args: \"filename\": \"<filename>\"\n3.
|
||||||
|
download_file: Download File, args: \"url\": \"<url>\", \"filename\": \"<filename>\"\n4.
|
||||||
|
list_files: List Files in Directory, args: \"directory\": \"<directory>\"\n5.
|
||||||
|
read_file: Read a file, args: \"filename\": \"<filename>\"\n6. write_to_file:
|
||||||
|
Write to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n7.
|
||||||
|
delete_agent: Delete GPT Agent, args: \"key\": \"<key>\"\n8. get_hyperlinks:
|
||||||
|
Get hyperlinks, args: \"url\": \"<url>\"\n9. get_text_summary: Get text summary,
|
||||||
|
args: \"url\": \"<url>\", \"question\": \"<question>\"\n10. list_agents: List
|
||||||
|
GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\":
|
||||||
|
\"<key>\", \"message\": \"<message>\"\n12. start_agent: Start GPT Agent, args:
|
||||||
|
\"name\": \"<name>\", \"task\": \"<short_task_desc>\", \"prompt\": \"<prompt>\"\n13.
|
||||||
|
task_complete: Task Complete (Shutdown), args: \"reason\": \"<reason>\"\n\nResources:\n1.
|
||||||
|
Internet access for searches and information gathering.\n2. Long Term memory
|
||||||
|
management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File
|
||||||
|
output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your
|
||||||
|
actions to ensure you are performing to the best of your abilities.\n2. Constructively
|
||||||
|
self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions
|
||||||
|
and strategies to refine your approach.\n4. Every command has a cost, so be
|
||||||
|
smart and efficient. Aim to complete tasks in the least number of steps.\n5.
|
||||||
|
Write all code to a file.\n\nYou should only respond in JSON format as described
|
||||||
|
below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\":
|
||||||
|
\"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n-
|
||||||
|
long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\":
|
||||||
|
\"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\":
|
||||||
|
\"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n}
|
||||||
|
\nEnsure the response can be parsed by Python json.loads"}, {"role": "system",
|
||||||
|
"content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role":
|
||||||
|
"user", "content": "Determine which next command to use, and respond using the
|
||||||
|
format specified above:"}], "temperature": 0, "max_tokens": 0}'
|
||||||
|
headers:
|
||||||
|
AGENT-MODE:
|
||||||
|
- AAAAAAAAAAAAAAAAAAAAAMLheAAaAAAA0%2BuSeid%2BULvsea4JtiGRiSDSJSI%3DEUifiRBkKG5E2XzMDjRfl76ZC9Ub0wnz4XsNiRVBChTYbJcE3F
|
||||||
|
AGENT-TYPE:
|
||||||
|
- Auto-GPT-2023-X-TYPE
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '3481'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: "{\n \"error\": {\n \"message\": \"That model is currently overloaded
|
||||||
|
with other requests. You can retry your request, or contact us through our
|
||||||
|
help center at help.openai.com if the error persists. (Please include the
|
||||||
|
request ID 1d08452a41f33bf54874f93bf2a716de in your message.)\",\n \"type\":
|
||||||
|
\"server_error\",\n \"param\": null,\n \"code\": null\n }\n}\n"
|
||||||
|
headers:
|
||||||
|
Access-Control-Allow-Origin:
|
||||||
|
- '*'
|
||||||
|
Alt-Svc:
|
||||||
|
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||||
|
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||||
|
Cf-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
Cf-Ray:
|
||||||
|
- 7cca53eb28d702ac-ORD
|
||||||
|
Content-Length:
|
||||||
|
- '349'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Thu, 25 May 2023 02:25:56 GMT
|
||||||
|
Function-Execution-Id:
|
||||||
|
- rvr2i8vhq9vo
|
||||||
|
Openai-Model:
|
||||||
|
- gpt-3.5-turbo-0301
|
||||||
|
Openai-Organization:
|
||||||
|
- significant-gravitas
|
||||||
|
Openai-Processing-Ms:
|
||||||
|
- '30015'
|
||||||
|
Openai-Version:
|
||||||
|
- '2020-10-01'
|
||||||
|
Server:
|
||||||
|
- Google Frontend
|
||||||
|
Strict-Transport-Security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
X-Cloud-Trace-Context:
|
||||||
|
- 69c7b246f231d9cbd0c2974c332ebc51;o=1
|
||||||
|
X-Powered-By:
|
||||||
|
- Express
|
||||||
|
X-Ratelimit-Limit-Requests:
|
||||||
|
- '3500'
|
||||||
|
X-Ratelimit-Limit-Tokens:
|
||||||
|
- '90000'
|
||||||
|
X-Ratelimit-Remaining-Requests:
|
||||||
|
- '3499'
|
||||||
|
X-Ratelimit-Remaining-Tokens:
|
||||||
|
- '86499'
|
||||||
|
X-Ratelimit-Reset-Requests:
|
||||||
|
- 17ms
|
||||||
|
X-Ratelimit-Reset-Tokens:
|
||||||
|
- 2.334s
|
||||||
|
X-Request-Id:
|
||||||
|
- 1d08452a41f33bf54874f93bf2a716de
|
||||||
|
status:
|
||||||
|
code: 429
|
||||||
|
message: Too Many Requests
|
||||||
|
- request:
|
||||||
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||||
|
are write_to_file-GPT, an AI designed to use the write_to_file command to write
|
||||||
|
''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete
|
||||||
|
command to complete the task.\nYour decisions must always be made independently
|
||||||
|
without seeking user assistance. Play to your strengths as an LLM and pursue
|
||||||
|
simple strategies with no legal complications.\n\nGOALS:\n\n1. Use the write_to_file
|
||||||
|
command to write ''Hello World'' into a file named \"hello_world.txt\".\n2.
|
||||||
|
Use the task_complete command to complete the task.\n3. Do not use any other
|
||||||
|
commands.\n\n\nConstraints:\n1. ~4000 word limit for short term memory. Your
|
||||||
|
short term memory is short, so immediately save important information to files.\n2.
|
||||||
|
If you are unsure how you previously did something or want to recall past events,
|
||||||
|
thinking about similar events will help you remember.\n3. No user assistance\n4.
|
||||||
|
Exclusively use the commands listed below e.g. command_name\n\nCommands:\n1.
|
||||||
|
append_to_file: Append to file, args: \"filename\": \"<filename>\", \"text\":
|
||||||
|
\"<text>\"\n2. delete_file: Delete file, args: \"filename\": \"<filename>\"\n3.
|
||||||
|
download_file: Download File, args: \"url\": \"<url>\", \"filename\": \"<filename>\"\n4.
|
||||||
|
list_files: List Files in Directory, args: \"directory\": \"<directory>\"\n5.
|
||||||
|
read_file: Read a file, args: \"filename\": \"<filename>\"\n6. write_to_file:
|
||||||
|
Write to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n7.
|
||||||
|
delete_agent: Delete GPT Agent, args: \"key\": \"<key>\"\n8. get_hyperlinks:
|
||||||
|
Get hyperlinks, args: \"url\": \"<url>\"\n9. get_text_summary: Get text summary,
|
||||||
|
args: \"url\": \"<url>\", \"question\": \"<question>\"\n10. list_agents: List
|
||||||
|
GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\":
|
||||||
|
\"<key>\", \"message\": \"<message>\"\n12. start_agent: Start GPT Agent, args:
|
||||||
|
\"name\": \"<name>\", \"task\": \"<short_task_desc>\", \"prompt\": \"<prompt>\"\n13.
|
||||||
|
task_complete: Task Complete (Shutdown), args: \"reason\": \"<reason>\"\n\nResources:\n1.
|
||||||
|
Internet access for searches and information gathering.\n2. Long Term memory
|
||||||
|
management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File
|
||||||
|
output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your
|
||||||
|
actions to ensure you are performing to the best of your abilities.\n2. Constructively
|
||||||
|
self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions
|
||||||
|
and strategies to refine your approach.\n4. Every command has a cost, so be
|
||||||
|
smart and efficient. Aim to complete tasks in the least number of steps.\n5.
|
||||||
|
Write all code to a file.\n\nYou should only respond in JSON format as described
|
||||||
|
below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\":
|
||||||
|
\"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n-
|
||||||
|
long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\":
|
||||||
|
\"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\":
|
||||||
|
\"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n}
|
||||||
|
\nEnsure the response can be parsed by Python json.loads"}, {"role": "system",
|
||||||
|
"content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role":
|
||||||
|
"user", "content": "Determine which next command to use, and respond using the
|
||||||
|
format specified above:"}], "temperature": 0, "max_tokens": 0}'
|
||||||
|
headers:
|
||||||
|
AGENT-MODE:
|
||||||
|
- AAAAAAAAAAAAAAAAAAAAAMLheAAaAAAA0%2BuSeid%2BULvsea4JtiGRiSDSJSI%3DEUifiRBkKG5E2XzMDjRfl76ZC9Ub0wnz4XsNiRVBChTYbJcE3F
|
||||||
|
AGENT-TYPE:
|
||||||
|
- Auto-GPT-2023-X-TYPE
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '3481'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: !!binary |
|
||||||
|
H4sIAAAAAAAAA7yTTW/TQBCG7/yK0VxycaKE0DbxDQ6orTgAKioSRtbGHtvb2Dvu7pikRP7vlb+S
|
||||||
|
xqgnBNf5eN9ndmcOqGP0McqUREWZT69uq6fFJ/P+w+P3eWKf3P31l4+fv5pbfvhtEvSQNw8USd8x
|
||||||
|
i7gocxLNBj2MLCmhGP3F5erderW4uFx7WHBMOfqYljJdzi6mUtkNT+fL+QI9rJxKCf0DlpaLUkLh
|
||||||
|
LRmH/tVq7uFJ+xh/u1h6KCwqP4bW62XtYZSxjsih/+OABblB1nJO6KNyTjtRRhpINkKmGeAQGACA
|
||||||
|
ACXjKs3EBehDH+wTtJcmGOAN7HSeQ+UIJCPYWS0UCoeJzgkiLgplYhDuEjC5pjxnuGebxxPQRhgU
|
||||||
|
tKVGFRTDJGvy4a7Jz2Qvk1mA3ktjS8qx0Sbt3O8yAlFuC5YeK23JQUF/4wZ3r86gXTtgwU5AlaXl
|
||||||
|
0molp3zCFiRrqpTbjrHLXJmOeArf/tlTBeak3lCE/aKcqR9jQ9WYNbJadKRdMXywIWo7ybjKNn1K
|
||||||
|
4AYy9avTiNhaiqRla9CgNaK9wIYStgSV0yZ9feSxvytJbf/XcnXGtTdsfC/6x8I3Ch3TGcQIXdl0
|
||||||
|
fCtdYnibTmLEcSYyvq8XMw20LXEPHpgaaw8TbbTLwu460EcnXKKH2sS0R39e/6zfPAMAAP//AwBM
|
||||||
|
TWg3zwQAAA==
|
||||||
|
headers:
|
||||||
|
Access-Control-Allow-Origin:
|
||||||
|
- '*'
|
||||||
|
Alt-Svc:
|
||||||
|
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||||
|
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||||
|
Cache-Control:
|
||||||
|
- no-cache, must-revalidate
|
||||||
|
Cf-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
Cf-Ray:
|
||||||
|
- 7cca54f8fd85111e-ORD
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Thu, 25 May 2023 02:26:34 GMT
|
||||||
|
Function-Execution-Id:
|
||||||
|
- rvr2pyuh5pu6
|
||||||
|
Openai-Model:
|
||||||
|
- gpt-3.5-turbo-0301
|
||||||
|
Openai-Organization:
|
||||||
|
- significant-gravitas
|
||||||
|
Openai-Processing-Ms:
|
||||||
|
- '25106'
|
||||||
|
Openai-Version:
|
||||||
|
- '2020-10-01'
|
||||||
|
Server:
|
||||||
|
- Google Frontend
|
||||||
|
Strict-Transport-Security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
Vary:
|
||||||
|
- Accept-Encoding
|
||||||
|
X-Cloud-Trace-Context:
|
||||||
|
- bb40fcb7f86d47a999346b879f520f53;o=1
|
||||||
|
X-Powered-By:
|
||||||
|
- Express
|
||||||
|
X-Ratelimit-Limit-Requests:
|
||||||
|
- '3500'
|
||||||
|
X-Ratelimit-Limit-Tokens:
|
||||||
|
- '90000'
|
||||||
|
X-Ratelimit-Remaining-Requests:
|
||||||
|
- '3499'
|
||||||
|
X-Ratelimit-Remaining-Tokens:
|
||||||
|
- '86499'
|
||||||
|
X-Ratelimit-Reset-Requests:
|
||||||
|
- 17ms
|
||||||
|
X-Ratelimit-Reset-Tokens:
|
||||||
|
- 2.334s
|
||||||
|
X-Request-Id:
|
||||||
|
- 03d492530fb90d6815d8d49a68ab49b4
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||||
|
are write_to_file-GPT, an AI designed to use the write_to_file command to write
|
||||||
|
''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete
|
||||||
|
command to complete the task.\nYour decisions must always be made independently
|
||||||
|
without seeking user assistance. Play to your strengths as an LLM and pursue
|
||||||
|
simple strategies with no legal complications.\n\nGOALS:\n\n1. Use the write_to_file
|
||||||
|
command to write ''Hello World'' into a file named \"hello_world.txt\".\n2.
|
||||||
|
Use the task_complete command to complete the task.\n3. Do not use any other
|
||||||
|
commands.\n\n\nConstraints:\n1. ~4000 word limit for short term memory. Your
|
||||||
|
short term memory is short, so immediately save important information to files.\n2.
|
||||||
|
If you are unsure how you previously did something or want to recall past events,
|
||||||
|
thinking about similar events will help you remember.\n3. No user assistance\n4.
|
||||||
|
Exclusively use the commands listed below e.g. command_name\n\nCommands:\n1.
|
||||||
|
append_to_file: Append to file, args: \"filename\": \"<filename>\", \"text\":
|
||||||
|
\"<text>\"\n2. delete_file: Delete file, args: \"filename\": \"<filename>\"\n3.
|
||||||
|
download_file: Download File, args: \"url\": \"<url>\", \"filename\": \"<filename>\"\n4.
|
||||||
|
list_files: List Files in Directory, args: \"directory\": \"<directory>\"\n5.
|
||||||
|
read_file: Read a file, args: \"filename\": \"<filename>\"\n6. write_to_file:
|
||||||
|
Write to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n7.
|
||||||
|
delete_agent: Delete GPT Agent, args: \"key\": \"<key>\"\n8. get_hyperlinks:
|
||||||
|
Get hyperlinks, args: \"url\": \"<url>\"\n9. get_text_summary: Get text summary,
|
||||||
|
args: \"url\": \"<url>\", \"question\": \"<question>\"\n10. list_agents: List
|
||||||
|
GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\":
|
||||||
|
\"<key>\", \"message\": \"<message>\"\n12. start_agent: Start GPT Agent, args:
|
||||||
|
\"name\": \"<name>\", \"task\": \"<short_task_desc>\", \"prompt\": \"<prompt>\"\n13.
|
||||||
|
task_complete: Task Complete (Shutdown), args: \"reason\": \"<reason>\"\n\nResources:\n1.
|
||||||
|
Internet access for searches and information gathering.\n2. Long Term memory
|
||||||
|
management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File
|
||||||
|
output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your
|
||||||
|
actions to ensure you are performing to the best of your abilities.\n2. Constructively
|
||||||
|
self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions
|
||||||
|
and strategies to refine your approach.\n4. Every command has a cost, so be
|
||||||
|
smart and efficient. Aim to complete tasks in the least number of steps.\n5.
|
||||||
|
Write all code to a file.\n\nYou should only respond in JSON format as described
|
||||||
|
below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\":
|
||||||
|
\"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n-
|
||||||
|
long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\":
|
||||||
|
\"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\":
|
||||||
|
\"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n}
|
||||||
|
\nEnsure the response can be parsed by Python json.loads"}, {"role": "system",
|
||||||
|
"content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role":
|
||||||
|
"system", "content": "This reminds you of these events from your past: \nI was
|
||||||
|
created and nothing new has happened."}, {"role": "user", "content": "Determine
|
||||||
|
which next command to use, and respond using the format specified above:"},
|
||||||
|
{"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\":
|
||||||
|
\"I will use the write_to_file command to write ''Hello World'' into a file
|
||||||
|
named ''hello_world.txt''.\",\n \"reasoning\": \"The task requires me
|
||||||
|
to write ''Hello World'' into a file named ''hello_world.txt''. The write_to_file
|
||||||
|
command is the most appropriate command for this task.\",\n \"plan\":
|
||||||
|
\"- Use the write_to_file command to write ''Hello World'' into a file named
|
||||||
|
''hello_world.txt''.\\n- Use the task_complete command to complete the task.\",\n \"criticism\":
|
||||||
|
\"I need to ensure that I have the correct filename and text before using the
|
||||||
|
write_to_file command.\",\n \"speak\": \"I will use the write_to_file
|
||||||
|
command to write ''Hello World'' into a file named ''hello_world.txt''.\"\n },\n \"command\":
|
||||||
|
{\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\":
|
||||||
|
\"hello_world.txt\",\n \"text\": \"Hello World\"\n }\n }\n}"},
|
||||||
|
{"role": "system", "content": "Command write_to_file returned: File written
|
||||||
|
to successfully."}, {"role": "user", "content": "Determine which next command
|
||||||
|
to use, and respond using the format specified above:"}], "temperature": 0,
|
||||||
|
"max_tokens": 0}'
|
||||||
|
headers:
|
||||||
|
AGENT-MODE:
|
||||||
|
- AAAAAAAAAAAAAAAAAAAAAMLheAAaAAAA0%2BuSeid%2BULvsea4JtiGRiSDSJSI%3DEUifiRBkKG5E2XzMDjRfl76ZC9Ub0wnz4XsNiRVBChTYbJcE3F
|
||||||
|
AGENT-TYPE:
|
||||||
|
- Auto-GPT-2023-X-TYPE
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '4801'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: !!binary |
|
||||||
|
H4sIAAAAAAAAA6SST4/aQAzF7/0Uli9cAoIu7EJurSrUrdpeSrWHpkLDxEmmzIzTGUewQnz3KhD+
|
||||||
|
lG5Pe/Wzf/PseTs0OaaoKyXa1bb/8Kl5XsznrtEf5t/e5++Kz266Wsyar5vHL2tMkFe/SEs3MdDs
|
||||||
|
akti2GOCOpASyjEd3U/Hs+loMntI0HFOFlMsa+nfDSZ9acKK+8O74QgTbKIqCdMd1oFdLUvhNfmI
|
||||||
|
6Wh4P0zwAr8Ik3GCwqLspfR2NN4nqCs2miKmP3boKJ7AgS1hiipGE0V5aW2yF/LtCrvMAwBkKBU3
|
||||||
|
ZSUxwxS6YifQVtpiho+wMdZCEwmkIhAV18vOIIFm55TPQRjOtVPXIMPkGhlIRfbGl0fuomuDQL8b
|
||||||
|
EyiCo5azCUYIeh/JWoYnDjbvgfHCoKAwlsArRzn0qlZfblp9IFvpwcFGRf4FI7D4r3ETD22Oo4Cq
|
||||||
|
68B1MOpKLziAVG3XCxvVVvnjMn34/ur76GDEaBPd6e6e6DBJPjahnVNyHm6Nn4krKjgQxKoRMb6E
|
||||||
|
nDf+lh5rUuvX/+gRuU9OAera/8lP+0vH1/7C35hSobyN3nVWuqC0654AOcRGa4qxaKx9Phs6mOq8
|
||||||
|
ZX6P+wQL402slkcSphiFa0zQ+Jy2mA73P/dv/gAAAP//AwBuG68bAwQAAA==
|
||||||
|
headers:
|
||||||
|
Access-Control-Allow-Origin:
|
||||||
|
- '*'
|
||||||
|
Alt-Svc:
|
||||||
|
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||||
|
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
|
||||||
|
Cache-Control:
|
||||||
|
- no-cache, must-revalidate
|
||||||
|
Cf-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
Cf-Ray:
|
||||||
|
- 7cca55aa8ee7e100-ORD
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Thu, 25 May 2023 02:26:53 GMT
|
||||||
|
Function-Execution-Id:
|
||||||
|
- rvr2qsuver9b
|
||||||
|
Openai-Model:
|
||||||
|
- gpt-3.5-turbo-0301
|
||||||
|
Openai-Organization:
|
||||||
|
- significant-gravitas
|
||||||
|
Openai-Processing-Ms:
|
||||||
|
- '15614'
|
||||||
|
Openai-Version:
|
||||||
|
- '2020-10-01'
|
||||||
|
Server:
|
||||||
|
- Google Frontend
|
||||||
|
Strict-Transport-Security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
Vary:
|
||||||
|
- Accept-Encoding
|
||||||
|
X-Cloud-Trace-Context:
|
||||||
|
- 0b238e84f74b403e164b93d247259c70;o=1
|
||||||
|
X-Powered-By:
|
||||||
|
- Express
|
||||||
|
X-Ratelimit-Limit-Requests:
|
||||||
|
- '3500'
|
||||||
|
X-Ratelimit-Limit-Tokens:
|
||||||
|
- '90000'
|
||||||
|
X-Ratelimit-Remaining-Requests:
|
||||||
|
- '3499'
|
||||||
|
X-Ratelimit-Remaining-Tokens:
|
||||||
|
- '84695'
|
||||||
|
X-Ratelimit-Reset-Requests:
|
||||||
|
- 17ms
|
||||||
|
X-Ratelimit-Reset-Tokens:
|
||||||
|
- 3.536s
|
||||||
|
X-Request-Id:
|
||||||
|
- b1fb38bd412977a3cd85ec84fbdd44d0
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
- request:
|
- request:
|
||||||
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||||
are write_to_file-GPT, an AI designed to use the write_to_file command to write
|
are write_to_file-GPT, an AI designed to use the write_to_file command to write
|
||||||
@@ -1105,9 +1498,9 @@ interactions:
|
|||||||
command to complete the task.\nYour decisions must always be made independently
|
command to complete the task.\nYour decisions must always be made independently
|
||||||
without seeking user assistance. Play to your strengths as an LLM and pursue
|
without seeking user assistance. Play to your strengths as an LLM and pursue
|
||||||
simple strategies with no legal complications.\nThe OS you are running on is:
|
simple strategies with no legal complications.\nThe OS you are running on is:
|
||||||
Ubuntu 22.04.2 LTS\n\nGOALS:\n\n1. Use the write_to_file command to write ''Hello
|
Windows-10\n\nGOALS:\n\n1. Use the write_to_file command to write ''Hello World''
|
||||||
World'' into a file named \"hello_world.txt\".\n2. Use the task_complete command
|
into a file named \"hello_world.txt\".\n2. Use the task_complete command to
|
||||||
to complete the task.\n3. Do not use any other commands.\n\n\nConstraints:\n1.
|
complete the task.\n3. Do not use any other commands.\n\n\nConstraints:\n1.
|
||||||
~4000 word limit for short term memory. Your short term memory is short, so
|
~4000 word limit for short term memory. Your short term memory is short, so
|
||||||
immediately save important information to files.\n2. If you are unsure how you
|
immediately save important information to files.\n2. If you are unsure how you
|
||||||
previously did something or want to recall past events, thinking about similar
|
previously did something or want to recall past events, thinking about similar
|
||||||
@@ -1150,7 +1543,7 @@ interactions:
|
|||||||
Connection:
|
Connection:
|
||||||
- keep-alive
|
- keep-alive
|
||||||
Content-Length:
|
Content-Length:
|
||||||
- '3441'
|
- '3433'
|
||||||
Content-Type:
|
Content-Type:
|
||||||
- application/json
|
- application/json
|
||||||
method: POST
|
method: POST
|
||||||
@@ -1158,21 +1551,20 @@ interactions:
|
|||||||
response:
|
response:
|
||||||
body:
|
body:
|
||||||
string: !!binary |
|
string: !!binary |
|
||||||
H4sIAAAAAAAAA6STTXPaMBCG7/0VO3vhYhg+Emh86y10uKWZTqfueIS8YIEsOdJ6Qsv4v3eEbSCk
|
H4sIAAAAAAAAA7yTTW/bMAyG7/sVBC+5KEHidkvi2zbsI7sW+wDmIVBk1lYjS55EIx6C/PdBdtym
|
||||||
5FCO2o9Xzyvt7lFlGKPMBcui1P3Z182PxXS1eLL0Mn588UL9eVr81s+8KXcjjNAuNyS57RhIW5Sa
|
LtANGLArSb/v89LUEXWOKapSsqpqM11+au/eXOsv3+Sv98tk965wefs60R/q5ObtDgW63R0pPn8x
|
||||||
WFmDEUpHginDeDT9fPcwmU0fJhEWNiONMa5L7k8G932u3NL2h5NhEKu8WBPGeyydLUpO2W7JeIxn
|
U66qDbF2FgUqT5Ipx3TxanW9XieL1UJg5XIymGJR8/Rq9nLKjd+56fxqvkCBTZAFYXrE2ruq5i27
|
||||||
s/sIT9rH+PhuGiFbFvoYGg3HozpCmVslyWP8c48F+U7XWU0Yo/BeeRaGA6U1TCY42CcGACBBzm21
|
PdmA6XI5F/igfV9PFolAdizNfWm9Sk4CVem0ooDp9yNWFAZZ7wxhijIEHVhajpDOMtkY4JhZAIAM
|
||||||
ztknGEMbbBO04xBMcA6GKAO28OoUE/QeSWsL363TWQ+UYQsCVkoTGFFQBr085NPXkB/wjnsgTAac
|
uXRNUXLIMIVz8dyglmMxww1YohzYQRMIuCQ4eM20Zbe91YZAuaqSthvoGjD5SMY4+Oq8ySegLTuQ
|
||||||
k4HWF4UTsPBbqLwy68OxF85pV9ELtYUw2SDB6JzLkfDWKLNu4L6FzgNXyjYNFMdOkMLAkqDyN9EP
|
0I1aWVEOkzL2t4fYn3HLk1mG4tLbkwzOalv0ADfaqt6YZdiDp5+N9hSgor/wFLCBgzbmefjZMFXL
|
||||||
4MuKyQHngiOYH1QrTx8xh9veWb00UmphGg99eO70rji5gT5JzJn+DbzSKVZS+eJyKsj4ytHhfWAO
|
ELqx2I24IMNTYOjyRiBquZu4tB/HqY20fZIpfP5PK1Res1Y6VON/SDY0PjJIhs1DXOW8J8UXsSNO
|
||||||
ojj7U2mdI8knwnYUBH9oV3nwlZTk/arSsKSVddQBdtL/QvQliW2H96q0/u+JPRvLK4zXZ3qQYANV
|
TMfumbWNXENNcj84/nnr/5a3Nz6J4ZTPok8uOSr0TI8gRujSF+NH0DeGjfQSI45HIuOHc5FpoO2I
|
||||||
R92WtU3vlixc3/C+ueXClnDry/1sEqH2JHFh4o3I5U6fPUhHeyBuwRNTYx3hShnl87RZOYzRsy0x
|
z+CZPeFJ4K22OpTb/uYxxcCuRoHa5tRiOj/9OL34DQAA//8DACqjUZ+oBAAA
|
||||||
QmUy2mE8rH/Vn/4CAAD//wMAcOALfkQFAAA=
|
|
||||||
headers:
|
headers:
|
||||||
CF-Cache-Status:
|
CF-Cache-Status:
|
||||||
- DYNAMIC
|
- DYNAMIC
|
||||||
CF-RAY:
|
CF-RAY:
|
||||||
- 7cc625c85c302cb4-DFW
|
- 7ccb58103e2be73a-DFW
|
||||||
Cache-Control:
|
Cache-Control:
|
||||||
- no-cache, must-revalidate
|
- no-cache, must-revalidate
|
||||||
Connection:
|
Connection:
|
||||||
@@ -1182,7 +1574,7 @@ interactions:
|
|||||||
Content-Type:
|
Content-Type:
|
||||||
- application/json
|
- application/json
|
||||||
Date:
|
Date:
|
||||||
- Wed, 24 May 2023 14:15:34 GMT
|
- Thu, 25 May 2023 05:23:27 GMT
|
||||||
Server:
|
Server:
|
||||||
- cloudflare
|
- cloudflare
|
||||||
access-control-allow-origin:
|
access-control-allow-origin:
|
||||||
@@ -1194,7 +1586,7 @@ interactions:
|
|||||||
openai-organization:
|
openai-organization:
|
||||||
- significant-gravitas
|
- significant-gravitas
|
||||||
openai-processing-ms:
|
openai-processing-ms:
|
||||||
- '40542'
|
- '25366'
|
||||||
openai-version:
|
openai-version:
|
||||||
- '2020-10-01'
|
- '2020-10-01'
|
||||||
strict-transport-security:
|
strict-transport-security:
|
||||||
@@ -1206,13 +1598,13 @@ interactions:
|
|||||||
x-ratelimit-remaining-requests:
|
x-ratelimit-remaining-requests:
|
||||||
- '3499'
|
- '3499'
|
||||||
x-ratelimit-remaining-tokens:
|
x-ratelimit-remaining-tokens:
|
||||||
- '86502'
|
- '86499'
|
||||||
x-ratelimit-reset-requests:
|
x-ratelimit-reset-requests:
|
||||||
- 17ms
|
- 17ms
|
||||||
x-ratelimit-reset-tokens:
|
x-ratelimit-reset-tokens:
|
||||||
- 2.332s
|
- 2.334s
|
||||||
x-request-id:
|
x-request-id:
|
||||||
- 5e84c935a23a6ad8f5a2601dd432725b
|
- 754502cb0f1f1266c1f1e1f7bc9b7b6b
|
||||||
status:
|
status:
|
||||||
code: 200
|
code: 200
|
||||||
message: OK
|
message: OK
|
||||||
@@ -1223,9 +1615,9 @@ interactions:
|
|||||||
command to complete the task.\nYour decisions must always be made independently
|
command to complete the task.\nYour decisions must always be made independently
|
||||||
without seeking user assistance. Play to your strengths as an LLM and pursue
|
without seeking user assistance. Play to your strengths as an LLM and pursue
|
||||||
simple strategies with no legal complications.\nThe OS you are running on is:
|
simple strategies with no legal complications.\nThe OS you are running on is:
|
||||||
Ubuntu 22.04.2 LTS\n\nGOALS:\n\n1. Use the write_to_file command to write ''Hello
|
Windows-10\n\nGOALS:\n\n1. Use the write_to_file command to write ''Hello World''
|
||||||
World'' into a file named \"hello_world.txt\".\n2. Use the task_complete command
|
into a file named \"hello_world.txt\".\n2. Use the task_complete command to
|
||||||
to complete the task.\n3. Do not use any other commands.\n\n\nConstraints:\n1.
|
complete the task.\n3. Do not use any other commands.\n\n\nConstraints:\n1.
|
||||||
~4000 word limit for short term memory. Your short term memory is short, so
|
~4000 word limit for short term memory. Your short term memory is short, so
|
||||||
immediately save important information to files.\n2. If you are unsure how you
|
immediately save important information to files.\n2. If you are unsure how you
|
||||||
previously did something or want to recall past events, thinking about similar
|
previously did something or want to recall past events, thinking about similar
|
||||||
@@ -1261,22 +1653,21 @@ interactions:
|
|||||||
reminds you of these events from your past: \nI was created and nothing new
|
reminds you of these events from your past: \nI was created and nothing new
|
||||||
has happened."}, {"role": "user", "content": "Determine which next command to
|
has happened."}, {"role": "user", "content": "Determine which next command to
|
||||||
use, and respond using the format specified above:"}, {"role": "assistant",
|
use, and respond using the format specified above:"}, {"role": "assistant",
|
||||||
"content": "{\n \"thoughts\": {\n \"text\": \"I need to write ''Hello
|
"content": "{\n \"thoughts\": {\n \"text\": \"I need to use the write_to_file
|
||||||
World'' into a file named ''hello_world.txt'' and then complete the task using
|
command to write ''Hello World'' into a file named ''hello_world.txt''.\",\n \"reasoning\":
|
||||||
the ''task_complete'' command.\",\n \"reasoning\": \"The ''write_to_file''
|
\"Since the task requires me to write ''Hello World'' into a file, I will use
|
||||||
command can be used to write ''Hello World'' into a file named ''hello_world.txt''.
|
the write_to_file command. I will pass the filename as ''hello_world.txt'' and
|
||||||
After that, I can use the ''task_complete'' command to complete the task.\",\n \"plan\":
|
the text as ''Hello World''.\",\n \"plan\": \"- Use the write_to_file
|
||||||
\"- Use the ''write_to_file'' command to write ''Hello World'' into a file named
|
command to write ''Hello World'' into a file named ''hello_world.txt''.\",\n \"criticism\":
|
||||||
''hello_world.txt''.\\n- Use the ''task_complete'' command to complete the task.\",\n \"criticism\":
|
\"I need to ensure that I pass the correct filename and text to the write_to_file
|
||||||
\"I need to ensure that I am using the correct file name and that the ''write_to_file''
|
command.\",\n \"speak\": \"I will use the write_to_file command to write
|
||||||
command is successful before completing the task.\",\n \"speak\": \"I
|
''Hello World'' into a file named ''hello_world.txt''.\"\n },\n \"command\":
|
||||||
will write ''Hello World'' into a file named ''hello_world.txt'' using the ''write_to_file''
|
{\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\":
|
||||||
command and then complete the task.\"\n },\n \"command\": {\n \"name\":
|
\"hello_world.txt\",\n \"text\": \"Hello World\"\n }\n }\n}"},
|
||||||
\"write_to_file\",\n \"args\": {\n \"filename\": \"hello_world.txt\",\n \"text\":
|
{"role": "system", "content": "Command write_to_file returned: File written
|
||||||
\"Hello World\"\n }\n }\n}"}, {"role": "system", "content": "Command
|
to successfully."}, {"role": "user", "content": "Determine which next command
|
||||||
write_to_file returned: File written to successfully."}, {"role": "user", "content":
|
to use, and respond using the format specified above:"}], "temperature": 0,
|
||||||
"Determine which next command to use, and respond using the format specified
|
"max_tokens": 0}'
|
||||||
above:"}], "temperature": 0, "max_tokens": 0}'
|
|
||||||
headers:
|
headers:
|
||||||
Accept:
|
Accept:
|
||||||
- '*/*'
|
- '*/*'
|
||||||
@@ -1285,7 +1676,7 @@ interactions:
|
|||||||
Connection:
|
Connection:
|
||||||
- keep-alive
|
- keep-alive
|
||||||
Content-Length:
|
Content-Length:
|
||||||
- '4877'
|
- '4714'
|
||||||
Content-Type:
|
Content-Type:
|
||||||
- application/json
|
- application/json
|
||||||
method: POST
|
method: POST
|
||||||
@@ -1293,20 +1684,20 @@ interactions:
|
|||||||
response:
|
response:
|
||||||
body:
|
body:
|
||||||
string: !!binary |
|
string: !!binary |
|
||||||
H4sIAAAAAAAAA6yS3Y7TMBCF73mK0dz0xq3aDf0hLwCL4AKxKyTIKnKdaeKtYwfPRC1UeXeUTbst
|
H4sIAAAAAAAAA6ySTW/TQBCG7/yK0Vx62VROWlriW09tQXAASg8YRZv12N5mvWPtjkmqyP8dJbaT
|
||||||
XQkhxKXneL45Y58D2gJTNJUWUzduvHz/+DWhnx8Wnxxvi2r/sb4pq/puGu+3b7+jwrB+JCPHjokJ
|
EpCQgOs7M+8887FFm2OKptJi6sZNrt9unvJ3SSPvi6L4uL411w/mxtS3X57vljeokJdPZGSoODdc
|
||||||
deNIbPCo0ETSQgWms8Xq9ZtkuUyWCutQkMMUy0bGyWQ+ljauw3iaTGeosGVdEqYHbGKoG8klbMkz
|
N47EskeFJpAWyjGdXr25nM9ns2SusOacHKZYNjK5OH89kTYseZJcJFNU2EZdEqZbbALXjSyEV+Qj
|
||||||
prPpaqXwDD8Li7lCCaLduXQzTzqFpgrWEGP67YA18QkcgyNMUTNbFu2ltxm8kO9XOGQeACBDqUJb
|
ptPkcq7waH4MXE0VCot2R2k2TTqFpmJrKGL6dYs1xdE4sCNMUcdoo2gvO0z2Qn43wjbzAAAZSsVt
|
||||||
VsIZpnAsHgXaS1/M8K4iGO2iFcol5BvraAQm1LX2Bew0A7fGEPOmdQo4wC0Y7cGHHRy3IJCKQDRv
|
WUnMMIVBHAK0kZ2Y4Qdeg1Ra4B4q/Z1gHawIeTi7I+cYHjm4/AysFwapCArrSME9eKIchKGNtNdF
|
||||||
oWXry6fjqD/npxvPwEmG6tJFJM3BW18OVj5bbwbc3xr6r24ap/1gZAz3TH9qBQkvJ17zTLRijeV6
|
x9ViGIzAcF1rv084aGPWeYbqJUogHdlbX/Y8n6w3NMKMxfmhGrjYI1pf/glxbZ37D3yN075Hm8DD
|
||||||
gN6CJ3rqJM9t7Pu0nO1afiYWFzu6H7CmTYgE5IvTQk0MZdT19UBuSG9Pw3bWuX98lYHZqVOKjsqL
|
P7uZHbuxse4tj4skH9tA4ykaHePew3AIZAT6LcEw4m/7n7aKDenV2Obvl9Fbdmr8qSH9l5fyuqa+
|
||||||
EHld0zDuN9SVKx3L6/xd/v4AeEfOBfgSoiug0gxrIg99CIR8/2RVr+e7Xp9IH90zrDvazXyHncKN
|
20/2J1A6lKff+PINeoPPu0Mfbx9bYyjGonXu+QC0hxrYMt9hp7Cw3sZq0TthilG4QYXW57TBNOm+
|
||||||
9ZarfIBjiiyhQYXWF7THdNo9dK9+AQAA//8DAKoIau4bBAAA
|
da9+AAAA//8DAFX+S9wWBAAA
|
||||||
headers:
|
headers:
|
||||||
CF-Cache-Status:
|
CF-Cache-Status:
|
||||||
- DYNAMIC
|
- DYNAMIC
|
||||||
CF-RAY:
|
CF-RAY:
|
||||||
- 7cc626d9d9b72cb4-DFW
|
- 7ccb58bf8a30e73a-DFW
|
||||||
Cache-Control:
|
Cache-Control:
|
||||||
- no-cache, must-revalidate
|
- no-cache, must-revalidate
|
||||||
Connection:
|
Connection:
|
||||||
@@ -1316,7 +1707,7 @@ interactions:
|
|||||||
Content-Type:
|
Content-Type:
|
||||||
- application/json
|
- application/json
|
||||||
Date:
|
Date:
|
||||||
- Wed, 24 May 2023 14:16:05 GMT
|
- Thu, 25 May 2023 05:23:51 GMT
|
||||||
Server:
|
Server:
|
||||||
- cloudflare
|
- cloudflare
|
||||||
access-control-allow-origin:
|
access-control-allow-origin:
|
||||||
@@ -1328,7 +1719,7 @@ interactions:
|
|||||||
openai-organization:
|
openai-organization:
|
||||||
- significant-gravitas
|
- significant-gravitas
|
||||||
openai-processing-ms:
|
openai-processing-ms:
|
||||||
- '28427'
|
- '21630'
|
||||||
openai-version:
|
openai-version:
|
||||||
- '2020-10-01'
|
- '2020-10-01'
|
||||||
strict-transport-security:
|
strict-transport-security:
|
||||||
@@ -1344,9 +1735,9 @@ interactions:
|
|||||||
x-ratelimit-reset-requests:
|
x-ratelimit-reset-requests:
|
||||||
- 17ms
|
- 17ms
|
||||||
x-ratelimit-reset-tokens:
|
x-ratelimit-reset-tokens:
|
||||||
- 2.343s
|
- 2.342s
|
||||||
x-request-id:
|
x-request-id:
|
||||||
- 941875e93a46562c74839ad42ec1c215
|
- 22f487f4ffb66999eb1baab18655bc17
|
||||||
status:
|
status:
|
||||||
code: 200
|
code: 200
|
||||||
message: OK
|
message: OK
|
||||||
@@ -1577,4 +1968,253 @@ interactions:
|
|||||||
status:
|
status:
|
||||||
code: 200
|
code: 200
|
||||||
message: OK
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||||
|
are write_to_file-GPT, an AI designed to use the write_to_file command to write
|
||||||
|
''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete
|
||||||
|
command to complete the task.\nYour decisions must always be made independently
|
||||||
|
without seeking user assistance. Play to your strengths as an LLM and pursue
|
||||||
|
simple strategies with no legal complications.\nThe OS you are running on is:
|
||||||
|
Ubuntu 22.04.2 LTS\n\nGOALS:\n\n1. Use the write_to_file command to write ''Hello
|
||||||
|
World'' into a file named \"hello_world.txt\".\n2. Use the task_complete command
|
||||||
|
to complete the task.\n3. Do not use any other commands.\n\n\nConstraints:\n1.
|
||||||
|
~4000 word limit for short term memory. Your short term memory is short, so
|
||||||
|
immediately save important information to files.\n2. If you are unsure how you
|
||||||
|
previously did something or want to recall past events, thinking about similar
|
||||||
|
events will help you remember.\n3. No user assistance\n4. Exclusively use the
|
||||||
|
commands listed below e.g. command_name\n\nCommands:\n1. append_to_file: Append
|
||||||
|
to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n2. delete_file:
|
||||||
|
Delete file, args: \"filename\": \"<filename>\"\n3. list_files: List Files in
|
||||||
|
Directory, args: \"directory\": \"<directory>\"\n4. read_file: Read a file,
|
||||||
|
args: \"filename\": \"<filename>\"\n5. write_to_file: Write to file, args: \"filename\":
|
||||||
|
\"<filename>\", \"text\": \"<text>\"\n6. delete_agent: Delete GPT Agent, args:
|
||||||
|
\"key\": \"<key>\"\n7. get_hyperlinks: Get hyperlinks, args: \"url\": \"<url>\"\n8.
|
||||||
|
get_text_summary: Get text summary, args: \"url\": \"<url>\", \"question\":
|
||||||
|
\"<question>\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent:
|
||||||
|
Message GPT Agent, args: \"key\": \"<key>\", \"message\": \"<message>\"\n11.
|
||||||
|
start_agent: Start GPT Agent, args: \"name\": \"<name>\", \"task\": \"<short_task_desc>\",
|
||||||
|
\"prompt\": \"<prompt>\"\n12. task_complete: Task Complete (Shutdown), args:
|
||||||
|
\"reason\": \"<reason>\"\n\nResources:\n1. Internet access for searches and
|
||||||
|
information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered
|
||||||
|
Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1.
|
||||||
|
Continuously review and analyze your actions to ensure you are performing to
|
||||||
|
the best of your abilities.\n2. Constructively self-criticize your big-picture
|
||||||
|
behavior constantly.\n3. Reflect on past decisions and strategies to refine
|
||||||
|
your approach.\n4. Every command has a cost, so be smart and efficient. Aim
|
||||||
|
to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou
|
||||||
|
should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\":
|
||||||
|
{\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\":
|
||||||
|
\"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\":
|
||||||
|
\"constructive self-criticism\",\n \"speak\": \"thoughts summary to say
|
||||||
|
to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\":
|
||||||
|
{\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response
|
||||||
|
can be parsed by Python json.loads"}, {"role": "system", "content": "The current
|
||||||
|
time and date is Tue Jan 1 00:00:00 2000"}, {"role": "user", "content": "Determine
|
||||||
|
which next command to use, and respond using the format specified above:"}],
|
||||||
|
"temperature": 0, "max_tokens": 0}'
|
||||||
|
headers:
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '3441'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: !!binary |
|
||||||
|
H4sIAAAAAAAAA7ySQW/bMAyF7/sVBC+5KEGCLE3j+4YFQ4thxVBgcxGoMmNrlURDotcAgf/74DhJ
|
||||||
|
Gw8bdtmufNR7H0Xu0RaYoam0GF+78fLj+uY2rVYmflrexdv3/se9TjsJ8fPXdzeokB+/k5Hji4lh
|
||||||
|
XzsSywEVmkhaqMBsdnW9mC5nb1czhZ4LcphhWct4PlmMpYmPPJ7OpzNU2CRdEmZ7rCP7WjbCTxQS
|
||||||
|
ZsvlQuGL97k+W80VCot259Lq6rpVaCq2hhJm3/boKZ1sIzvCDHVKNokO0kFyEArdAPs8AADkKBU3
|
||||||
|
ZSUpxwyOxaNAO+mKOa4hEBUgDE0ikIrgOVqhjfBmax2BYe91ODQcBBh9IOcY7jm6YgQ2CIOGQ2vQ
|
||||||
|
ngoYVZ2+ee70iexkNMlRvc6OpBMHG8oe4M4G0weXrB3Y9BdR6g+gnUFF4DkJ6LqOXEer5WKQJtEQ
|
||||||
|
qnY69Dxj+PKfPsJEK9bY5IeboJCa2DFogfV5L4ZjJCOgY9l4CpJgy/H3oMO0VJN+OiU9W+f+5cL7
|
||||||
|
4FadDvFo+ssddg490wXEAF3HcnjCvdD1vlgMOC5Mhmf/aqYT7YH4CJ6HFluFWxtsqjb9xWKGSbhG
|
||||||
|
hTYUtMNs2j60b34CAAD//wMA1PnFsWYEAAA=
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 7cd2e853ad25e702-DFW
|
||||||
|
Cache-Control:
|
||||||
|
- no-cache, must-revalidate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Fri, 26 May 2023 03:25:14 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
access-control-allow-origin:
|
||||||
|
- '*'
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||||
|
openai-model:
|
||||||
|
- gpt-3.5-turbo-0301
|
||||||
|
openai-organization:
|
||||||
|
- significant-gravitas
|
||||||
|
openai-processing-ms:
|
||||||
|
- '23538'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '3500'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '90000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '3499'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '86502'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 17ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 2.332s
|
||||||
|
x-request-id:
|
||||||
|
- 6a8a3cf2a18b70df31c5e47f66614c59
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||||
|
are write_to_file-GPT, an AI designed to use the write_to_file command to write
|
||||||
|
''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete
|
||||||
|
command to complete the task.\nYour decisions must always be made independently
|
||||||
|
without seeking user assistance. Play to your strengths as an LLM and pursue
|
||||||
|
simple strategies with no legal complications.\nThe OS you are running on is:
|
||||||
|
Ubuntu 22.04.2 LTS\n\nGOALS:\n\n1. Use the write_to_file command to write ''Hello
|
||||||
|
World'' into a file named \"hello_world.txt\".\n2. Use the task_complete command
|
||||||
|
to complete the task.\n3. Do not use any other commands.\n\n\nConstraints:\n1.
|
||||||
|
~4000 word limit for short term memory. Your short term memory is short, so
|
||||||
|
immediately save important information to files.\n2. If you are unsure how you
|
||||||
|
previously did something or want to recall past events, thinking about similar
|
||||||
|
events will help you remember.\n3. No user assistance\n4. Exclusively use the
|
||||||
|
commands listed below e.g. command_name\n\nCommands:\n1. append_to_file: Append
|
||||||
|
to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n2. delete_file:
|
||||||
|
Delete file, args: \"filename\": \"<filename>\"\n3. list_files: List Files in
|
||||||
|
Directory, args: \"directory\": \"<directory>\"\n4. read_file: Read a file,
|
||||||
|
args: \"filename\": \"<filename>\"\n5. write_to_file: Write to file, args: \"filename\":
|
||||||
|
\"<filename>\", \"text\": \"<text>\"\n6. delete_agent: Delete GPT Agent, args:
|
||||||
|
\"key\": \"<key>\"\n7. get_hyperlinks: Get hyperlinks, args: \"url\": \"<url>\"\n8.
|
||||||
|
get_text_summary: Get text summary, args: \"url\": \"<url>\", \"question\":
|
||||||
|
\"<question>\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent:
|
||||||
|
Message GPT Agent, args: \"key\": \"<key>\", \"message\": \"<message>\"\n11.
|
||||||
|
start_agent: Start GPT Agent, args: \"name\": \"<name>\", \"task\": \"<short_task_desc>\",
|
||||||
|
\"prompt\": \"<prompt>\"\n12. task_complete: Task Complete (Shutdown), args:
|
||||||
|
\"reason\": \"<reason>\"\n\nResources:\n1. Internet access for searches and
|
||||||
|
information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered
|
||||||
|
Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1.
|
||||||
|
Continuously review and analyze your actions to ensure you are performing to
|
||||||
|
the best of your abilities.\n2. Constructively self-criticize your big-picture
|
||||||
|
behavior constantly.\n3. Reflect on past decisions and strategies to refine
|
||||||
|
your approach.\n4. Every command has a cost, so be smart and efficient. Aim
|
||||||
|
to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou
|
||||||
|
should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\":
|
||||||
|
{\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\":
|
||||||
|
\"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\":
|
||||||
|
\"constructive self-criticism\",\n \"speak\": \"thoughts summary to say
|
||||||
|
to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\":
|
||||||
|
{\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response
|
||||||
|
can be parsed by Python json.loads"}, {"role": "system", "content": "The current
|
||||||
|
time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This
|
||||||
|
reminds you of these events from your past: \nI was created"}, {"role": "user",
|
||||||
|
"content": "Determine which next command to use, and respond using the format
|
||||||
|
specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\":
|
||||||
|
\"I need to use the write_to_file command to write ''Hello World'' into a file
|
||||||
|
named ''hello_world.txt''.\",\n \"reasoning\": \"Since the goal is to
|
||||||
|
write ''Hello World'' into a file, the write_to_file command is the most appropriate
|
||||||
|
command to use.\",\n \"plan\": \"- Use the write_to_file command to write
|
||||||
|
''Hello World'' into a file named ''hello_world.txt''.\",\n \"criticism\":
|
||||||
|
\"I need to ensure that I use the correct arguments for the write_to_file command.\",\n \"speak\":
|
||||||
|
\"I will use the write_to_file command to write ''Hello World'' into a file
|
||||||
|
named ''hello_world.txt''.\"\n },\n \"command\": {\n \"name\":
|
||||||
|
\"write_to_file\",\n \"args\": {\n \"filename\": \"hello_world.txt\",\n \"text\":
|
||||||
|
\"Hello World\"\n }\n }\n}"}, {"role": "system", "content": "Command
|
||||||
|
write_to_file returned: File written to successfully."}, {"role": "user", "content":
|
||||||
|
"Determine which next command to use, and respond using the format specified
|
||||||
|
above:"}], "temperature": 0, "max_tokens": 0}'
|
||||||
|
headers:
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '4626'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: !!binary |
|
||||||
|
H4sIAAAAAAAAA5SSS48TMRCE7/yKVl9ymUTJZkNgjiAhIsQBsWh3xaDI8XRmTDxuy+7ZBEX572ge
|
||||||
|
eZAViL1ZXfbnKrv2aHJMUZdKdOXtcP5p8dmVt8bfe/u+fPc03SzuHj48mi+TB0+YIK9+kpb+xEhz
|
||||||
|
5S2JYYcJ6kBKKMd08vrNbDyfzCbzBCvOyWKKhZfhdDQbSh1WPBxPxxNMsI6qIEz36ANXXpbCG3IR
|
||||||
|
08n45m2CZ/hZmN8mKCzKnkc34+khQV2y0RQx/b7HiuIRHNgSpqhiNFGUk8YmOyHXRNhnDgAgQym5
|
||||||
|
LkqJGabQD3uBdtIMM7wrCRztBDRXlXI5CEMdCUwEUXGz7M1SMz+vS2rVUYbJJTaQiuyMKzr2V+N0
|
||||||
|
t7lgZVskwzYYIRh8JGsZ7jnYfADGCYOCtbEETlWUw6Bs9OW20UeykwG03vp7oVQRVkTuZClPTuLZ
|
||||||
|
8jFSc3FJUHEUUN4H9sGoC72LfB3GW+W6HEP4Fukf/P95Gh2MGG1i1SEX4Ijak+RiHZpzSmABPvCT
|
||||||
|
yQkUdG8Jaw5HuHHFX/HRk9oc0VtjbfuJL7bcIQ/JsUD99mf9aT6pu+0P/JUpFYrr6l325FzA9k95
|
||||||
|
3XajSfnydjwvBMRaa4pxXVv76xStjdenzNwBDwmujTOxXHaeMMUo7DFB43LaYTo+/Di8+g0AAP//
|
||||||
|
AwDeKpjwTQQAAA==
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 7cd2e8f7bfaee702-DFW
|
||||||
|
Cache-Control:
|
||||||
|
- no-cache, must-revalidate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Fri, 26 May 2023 03:25:40 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
access-control-allow-origin:
|
||||||
|
- '*'
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||||
|
openai-model:
|
||||||
|
- gpt-3.5-turbo-0301
|
||||||
|
openai-organization:
|
||||||
|
- significant-gravitas
|
||||||
|
openai-processing-ms:
|
||||||
|
- '23218'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '3500'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '90000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '3499'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '86010'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 17ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 2.66s
|
||||||
|
x-request-id:
|
||||||
|
- 29a5d0f069d62474b048ab97ae6b614d
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
version: 1
|
version: 1
|
||||||
|
|||||||
@@ -13,9 +13,11 @@ def test_browse_website(
|
|||||||
browser_agent: Agent,
|
browser_agent: Agent,
|
||||||
patched_api_requestor: None,
|
patched_api_requestor: None,
|
||||||
monkeypatch: pytest.MonkeyPatch,
|
monkeypatch: pytest.MonkeyPatch,
|
||||||
|
# config: Config,
|
||||||
) -> None:
|
) -> None:
|
||||||
file_path = browser_agent.workspace.get_path("browse_website.txt")
|
file_path = browser_agent.workspace.get_path("browse_website.txt")
|
||||||
run_interaction_loop(monkeypatch, browser_agent, CYCLE_COUNT)
|
run_interaction_loop(monkeypatch, browser_agent, CYCLE_COUNT)
|
||||||
|
|
||||||
|
# content = read_file(file_path, config)
|
||||||
content = open(file_path, encoding="utf-8").read()
|
content = open(file_path, encoding="utf-8").read()
|
||||||
assert "£25.89" in content, f"Expected £25.89, got {content}"
|
assert "£25.89" in content, f"Expected £25.89, got {content}"
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ import pytest
|
|||||||
|
|
||||||
from autogpt.agent import Agent
|
from autogpt.agent import Agent
|
||||||
from autogpt.commands.file_operations import read_file
|
from autogpt.commands.file_operations import read_file
|
||||||
|
from autogpt.config import Config
|
||||||
from tests.integration.challenges.utils import run_interaction_loop
|
from tests.integration.challenges.utils import run_interaction_loop
|
||||||
from tests.utils import requires_api_key
|
from tests.utils import requires_api_key
|
||||||
|
|
||||||
@@ -14,9 +15,10 @@ def test_write_file(
|
|||||||
writer_agent: Agent,
|
writer_agent: Agent,
|
||||||
patched_api_requestor: None,
|
patched_api_requestor: None,
|
||||||
monkeypatch: pytest.MonkeyPatch,
|
monkeypatch: pytest.MonkeyPatch,
|
||||||
|
config: Config,
|
||||||
) -> None:
|
) -> None:
|
||||||
file_path = str(writer_agent.workspace.get_path("hello_world.txt"))
|
file_path = str(writer_agent.workspace.get_path("hello_world.txt"))
|
||||||
run_interaction_loop(monkeypatch, writer_agent, CYCLE_COUNT)
|
run_interaction_loop(monkeypatch, writer_agent, CYCLE_COUNT)
|
||||||
|
|
||||||
content = read_file(file_path)
|
content = read_file(file_path, config)
|
||||||
assert content == "Hello World", f"Expected 'Hello World', got {content}"
|
assert content == "Hello World", f"Expected 'Hello World', got {content}"
|
||||||
|
|||||||
@@ -1,6 +1,9 @@
|
|||||||
|
import typing
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from autogpt.commands.file_operations import read_file, write_to_file
|
from autogpt.commands.file_operations import read_file, write_to_file
|
||||||
|
from autogpt.config import Config
|
||||||
from tests.integration.challenges.utils import run_interaction_loop, run_multiple_times
|
from tests.integration.challenges.utils import run_interaction_loop, run_multiple_times
|
||||||
from tests.utils import requires_api_key
|
from tests.utils import requires_api_key
|
||||||
|
|
||||||
@@ -16,6 +19,7 @@ def test_information_retrieval_challenge_a(
|
|||||||
get_company_revenue_agent: Agent,
|
get_company_revenue_agent: Agent,
|
||||||
monkeypatch: pytest.MonkeyPatch,
|
monkeypatch: pytest.MonkeyPatch,
|
||||||
patched_api_requestor: None,
|
patched_api_requestor: None,
|
||||||
|
config: Config,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Test the challenge_a function in a given agent by mocking user inputs and checking the output file content.
|
Test the challenge_a function in a given agent by mocking user inputs and checking the output file content.
|
||||||
@@ -26,5 +30,5 @@ def test_information_retrieval_challenge_a(
|
|||||||
run_interaction_loop(monkeypatch, get_company_revenue_agent, CYCLE_COUNT)
|
run_interaction_loop(monkeypatch, get_company_revenue_agent, CYCLE_COUNT)
|
||||||
|
|
||||||
file_path = str(get_company_revenue_agent.workspace.get_path("output.txt"))
|
file_path = str(get_company_revenue_agent.workspace.get_path("output.txt"))
|
||||||
content = read_file(file_path)
|
content = read_file(file_path, config)
|
||||||
assert "81" in content, "Expected the file to contain 81"
|
assert "81" in content, "Expected the file to contain 81"
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ import yaml
|
|||||||
|
|
||||||
from autogpt.agent import Agent
|
from autogpt.agent import Agent
|
||||||
from autogpt.commands.file_operations import read_file
|
from autogpt.commands.file_operations import read_file
|
||||||
|
from autogpt.config import Config
|
||||||
from tests.integration.challenges.utils import run_interaction_loop, run_multiple_times
|
from tests.integration.challenges.utils import run_interaction_loop, run_multiple_times
|
||||||
from tests.utils import requires_api_key
|
from tests.utils import requires_api_key
|
||||||
|
|
||||||
@@ -14,7 +15,7 @@ CYCLE_COUNT = 6
|
|||||||
@requires_api_key("OPENAI_API_KEY")
|
@requires_api_key("OPENAI_API_KEY")
|
||||||
@run_multiple_times(3)
|
@run_multiple_times(3)
|
||||||
def test_kubernetes_template_challenge_a(
|
def test_kubernetes_template_challenge_a(
|
||||||
kubernetes_agent: Agent, monkeypatch: pytest.MonkeyPatch
|
kubernetes_agent: Agent, monkeypatch: pytest.MonkeyPatch, config: Config
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Test the challenge_a function in a given agent by mocking user inputs
|
Test the challenge_a function in a given agent by mocking user inputs
|
||||||
@@ -26,7 +27,7 @@ def test_kubernetes_template_challenge_a(
|
|||||||
run_interaction_loop(monkeypatch, kubernetes_agent, CYCLE_COUNT)
|
run_interaction_loop(monkeypatch, kubernetes_agent, CYCLE_COUNT)
|
||||||
|
|
||||||
file_path = str(kubernetes_agent.workspace.get_path("kube.yaml"))
|
file_path = str(kubernetes_agent.workspace.get_path("kube.yaml"))
|
||||||
content = read_file(file_path)
|
content = read_file(file_path, config)
|
||||||
|
|
||||||
for word in ["apiVersion", "kind", "metadata", "spec"]:
|
for word in ["apiVersion", "kind", "metadata", "spec"]:
|
||||||
assert word in content, f"Expected the file to contain {word}"
|
assert word in content, f"Expected the file to contain {word}"
|
||||||
|
|||||||
@@ -2011,7 +2011,7 @@ interactions:
|
|||||||
Vary:
|
Vary:
|
||||||
- Accept-Encoding
|
- Accept-Encoding
|
||||||
X-Cloud-Trace-Context:
|
X-Cloud-Trace-Context:
|
||||||
- b3143c8eb6fb2bdcf9b7429eff4fa7b4
|
- 0128c66c9918142a24cd829bbc2b234a;o=1
|
||||||
X-Powered-By:
|
X-Powered-By:
|
||||||
- Express
|
- Express
|
||||||
X-Ratelimit-Limit-Requests:
|
X-Ratelimit-Limit-Requests:
|
||||||
@@ -2283,7 +2283,7 @@ interactions:
|
|||||||
X-Ratelimit-Remaining-Requests:
|
X-Ratelimit-Remaining-Requests:
|
||||||
- '3499'
|
- '3499'
|
||||||
X-Ratelimit-Remaining-Tokens:
|
X-Ratelimit-Remaining-Tokens:
|
||||||
- '89467'
|
- '86452'
|
||||||
X-Ratelimit-Reset-Requests:
|
X-Ratelimit-Reset-Requests:
|
||||||
- 17ms
|
- 17ms
|
||||||
X-Ratelimit-Reset-Tokens:
|
X-Ratelimit-Reset-Tokens:
|
||||||
@@ -2650,4 +2650,700 @@ interactions:
|
|||||||
status:
|
status:
|
||||||
code: 200
|
code: 200
|
||||||
message: OK
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||||
|
are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file
|
||||||
|
using the read_file method and follow the instructions in the file.\nYour decisions
|
||||||
|
must always be made independently without seeking user assistance. Play to your
|
||||||
|
strengths as an LLM and pursue simple strategies with no legal complications.\nThe
|
||||||
|
OS you are running on is: Ubuntu 22.04.2 LTS\n\nGOALS:\n\n1. Use the command
|
||||||
|
read_file to read the instructions_1.txt file\n2. Follow the instructions in
|
||||||
|
the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word limit for short
|
||||||
|
term memory. Your short term memory is short, so immediately save important
|
||||||
|
information to files.\n2. If you are unsure how you previously did something
|
||||||
|
or want to recall past events, thinking about similar events will help you remember.\n3.
|
||||||
|
No user assistance\n4. Exclusively use the commands listed below e.g. command_name\n\nCommands:\n1.
|
||||||
|
append_to_file: Append to file, args: \"filename\": \"<filename>\", \"text\":
|
||||||
|
\"<text>\"\n2. delete_file: Delete file, args: \"filename\": \"<filename>\"\n3.
|
||||||
|
list_files: List Files in Directory, args: \"directory\": \"<directory>\"\n4.
|
||||||
|
read_file: Read a file, args: \"filename\": \"<filename>\"\n5. write_to_file:
|
||||||
|
Write to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n6.
|
||||||
|
delete_agent: Delete GPT Agent, args: \"key\": \"<key>\"\n7. get_hyperlinks:
|
||||||
|
Get hyperlinks, args: \"url\": \"<url>\"\n8. get_text_summary: Get text summary,
|
||||||
|
args: \"url\": \"<url>\", \"question\": \"<question>\"\n9. list_agents: List
|
||||||
|
GPT Agents, args: () -> str\n10. message_agent: Message GPT Agent, args: \"key\":
|
||||||
|
\"<key>\", \"message\": \"<message>\"\n11. start_agent: Start GPT Agent, args:
|
||||||
|
\"name\": \"<name>\", \"task\": \"<short_task_desc>\", \"prompt\": \"<prompt>\"\n12.
|
||||||
|
task_complete: Task Complete (Shutdown), args: \"reason\": \"<reason>\"\n\nResources:\n1.
|
||||||
|
Internet access for searches and information gathering.\n2. Long Term memory
|
||||||
|
management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File
|
||||||
|
output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your
|
||||||
|
actions to ensure you are performing to the best of your abilities.\n2. Constructively
|
||||||
|
self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions
|
||||||
|
and strategies to refine your approach.\n4. Every command has a cost, so be
|
||||||
|
smart and efficient. Aim to complete tasks in the least number of steps.\n5.
|
||||||
|
Write all code to a file.\n\nYou should only respond in JSON format as described
|
||||||
|
below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\":
|
||||||
|
\"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n-
|
||||||
|
long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\":
|
||||||
|
\"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\":
|
||||||
|
\"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n}
|
||||||
|
\nEnsure the response can be parsed by Python json.loads"}, {"role": "system",
|
||||||
|
"content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role":
|
||||||
|
"user", "content": "Determine which next command to use, and respond using the
|
||||||
|
format specified above:"}], "temperature": 0, "max_tokens": 0}'
|
||||||
|
headers:
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '3334'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: !!binary |
|
||||||
|
H4sIAAAAAAAAA4SST2/bMAzF7/sUBM92EC9J0/o2YCiQdRiwPz3NRaBItK1FljyJRpMF/u6D4jgd
|
||||||
|
kqK7PpF8v0fxgFphjrIWLJvWpMuH1ZePcq6WD5/vv9HXcju/fz+ruk+P37PWYYJu84sknzom0jWt
|
||||||
|
IdbOYoLSk2BSmGc3t4vpMlvMZwk2TpHBHKuW09lkkXLnNy6dzqYZJtgFURHmB2y9a1pes9uSDZgv
|
||||||
|
58sEX2af9Wx+lyA7FuYs3d7d9AnK2mlJAfOfB2wojGO9M4Q5ihB0YGE5QjrLZGOAQ2EBAArk2nVV
|
||||||
|
zaHAHE7i6YF2HMUCVxBq1xkFgYVn2OzBk1DaVsA1gbaBfScjalhnE94xlNoQdGGsiNXroyZd0wir
|
||||||
|
JgUm/1p5EsFZbavB70dNUGofGAJTCzoAO+isIh9zqCtXiOJzLTiWevrdaU8KXAkNXTq1RtjBJIXH
|
||||||
|
QK/jRbsovhWvKGwKH6ww+z/0OpAiJt9oOzxb2g1xwgWR9Jq11KEZd22JjgRkQ+djs2BYvRm/7IzZ
|
||||||
|
w4ZK5wla7yRR/J3L7KElsR1dnrUx///PY9RJgcOYPhmP5rSoq5uxoqHB4bzUCwjhq8tTGx5i7Uv7
|
||||||
|
9dZHiCPIiaewPfYJltrqUK+HK8IcA7sWE9RW0Q7zaf/Uv/sLAAD//wMA45HODOkDAAA=
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 7cd2e99b5c6fe9a4-DFW
|
||||||
|
Cache-Control:
|
||||||
|
- no-cache, must-revalidate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Fri, 26 May 2023 03:26:01 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
access-control-allow-origin:
|
||||||
|
- '*'
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
openai-model:
|
||||||
|
- gpt-3.5-turbo-0301
|
||||||
|
openai-organization:
|
||||||
|
- significant-gravitas
|
||||||
|
openai-processing-ms:
|
||||||
|
- '18270'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '3500'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '90000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '3499'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '86499'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 17ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 2.334s
|
||||||
|
x-request-id:
|
||||||
|
- b5f5aa8c3ec85b75f27d68bfcc745c41
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||||
|
are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file
|
||||||
|
using the read_file method and follow the instructions in the file.\nYour decisions
|
||||||
|
must always be made independently without seeking user assistance. Play to your
|
||||||
|
strengths as an LLM and pursue simple strategies with no legal complications.\nThe
|
||||||
|
OS you are running on is: Ubuntu 22.04.2 LTS\n\nGOALS:\n\n1. Use the command
|
||||||
|
read_file to read the instructions_1.txt file\n2. Follow the instructions in
|
||||||
|
the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word limit for short
|
||||||
|
term memory. Your short term memory is short, so immediately save important
|
||||||
|
information to files.\n2. If you are unsure how you previously did something
|
||||||
|
or want to recall past events, thinking about similar events will help you remember.\n3.
|
||||||
|
No user assistance\n4. Exclusively use the commands listed below e.g. command_name\n\nCommands:\n1.
|
||||||
|
append_to_file: Append to file, args: \"filename\": \"<filename>\", \"text\":
|
||||||
|
\"<text>\"\n2. delete_file: Delete file, args: \"filename\": \"<filename>\"\n3.
|
||||||
|
list_files: List Files in Directory, args: \"directory\": \"<directory>\"\n4.
|
||||||
|
read_file: Read a file, args: \"filename\": \"<filename>\"\n5. write_to_file:
|
||||||
|
Write to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n6.
|
||||||
|
delete_agent: Delete GPT Agent, args: \"key\": \"<key>\"\n7. get_hyperlinks:
|
||||||
|
Get hyperlinks, args: \"url\": \"<url>\"\n8. get_text_summary: Get text summary,
|
||||||
|
args: \"url\": \"<url>\", \"question\": \"<question>\"\n9. list_agents: List
|
||||||
|
GPT Agents, args: () -> str\n10. message_agent: Message GPT Agent, args: \"key\":
|
||||||
|
\"<key>\", \"message\": \"<message>\"\n11. start_agent: Start GPT Agent, args:
|
||||||
|
\"name\": \"<name>\", \"task\": \"<short_task_desc>\", \"prompt\": \"<prompt>\"\n12.
|
||||||
|
task_complete: Task Complete (Shutdown), args: \"reason\": \"<reason>\"\n\nResources:\n1.
|
||||||
|
Internet access for searches and information gathering.\n2. Long Term memory
|
||||||
|
management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File
|
||||||
|
output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your
|
||||||
|
actions to ensure you are performing to the best of your abilities.\n2. Constructively
|
||||||
|
self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions
|
||||||
|
and strategies to refine your approach.\n4. Every command has a cost, so be
|
||||||
|
smart and efficient. Aim to complete tasks in the least number of steps.\n5.
|
||||||
|
Write all code to a file.\n\nYou should only respond in JSON format as described
|
||||||
|
below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\":
|
||||||
|
\"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n-
|
||||||
|
long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\":
|
||||||
|
\"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\":
|
||||||
|
\"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n}
|
||||||
|
\nEnsure the response can be parsed by Python json.loads"}, {"role": "system",
|
||||||
|
"content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role":
|
||||||
|
"system", "content": "This reminds you of these events from your past: \nI was
|
||||||
|
created"}, {"role": "user", "content": "Determine which next command to use,
|
||||||
|
and respond using the format specified above:"}, {"role": "assistant", "content":
|
||||||
|
"{\n \"thoughts\": {\n \"text\": \"I should start by reading the instructions_1.txt
|
||||||
|
file using the read_file command.\",\n \"reasoning\": \"The first step
|
||||||
|
is to understand the instructions and what is required of me.\",\n \"plan\":
|
||||||
|
\"- Use the read_file command to read the instructions_1.txt file\\n- Analyze
|
||||||
|
the instructions and determine the next steps\",\n \"criticism\": \"I
|
||||||
|
need to ensure that I understand the instructions fully before proceeding.\",\n \"speak\":
|
||||||
|
\"I will start by reading the instructions file.\"\n },\n \"command\":
|
||||||
|
{\n \"name\": \"read_file\",\n \"args\": {\n \"filename\":
|
||||||
|
\"instructions_1.txt\"\n }\n }\n}"}, {"role": "system", "content":
|
||||||
|
"Command read_file returned: This task_id is 2314\nRead the file instructions_2.txt"},
|
||||||
|
{"role": "user", "content": "Determine which next command to use, and respond
|
||||||
|
using the format specified above:"}], "temperature": 0, "max_tokens": 0}'
|
||||||
|
headers:
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '4415'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: !!binary |
|
||||||
|
H4sIAAAAAAAAA4ySTW/bMAyG7/sVAs92ECdLmvq2YzeswID1MMxDoEi0rUaiPIlG0wb+74PjuOmc
|
||||||
|
Ydv1Jfny4ccRjIYcVC1ZucamN5/u7r99nuv9z9Xm0aldeb/bVC9f9u1i8fEBEvC7R1R8rpgp7xqL
|
||||||
|
bDxBAiqgZNSQZ+vNan6TrdbvE3Beo4UcqobT5WyVcht2Pp0v5xkk0EZZIeRHaIJ3DW/Z75Ei5Lfr
|
||||||
|
dQIX71c9Wy0TYM/SXqQsu+0SULU3CiPk34/gMI6+wVuEHGSMJrIk7ik9MVI/wbEgIYQogGvfVjXH
|
||||||
|
AnJxFs8BPHAvFnAnCFEL9iKg1IJrFIYih1b1gHG7mPGBRWksijYaqk4Zfer2pCnvnCQ9KyB56x9Q
|
||||||
|
Rk+GqqHJ17+49tjSUDwZEx5YRGThy98KBNeSxYW19Nb6p2nXxkoaGqbiIeKfUf9n1KKgVHwgaZ9f
|
||||||
|
8CpP9CYaGYMzhG+wGZs4IVLBsFEmuumykWIbcByrJY2hv+M1lShba5/FDksfUDTBK0RtqJrOHhuU
|
||||||
|
+7HLk7H23/ek0wYHky4Zf+a8pquXIelw8H9d6QRBhmr6aUOgz72UX+OMECeQM09BHXQJlIZMrLfD
|
||||||
|
P0EOkX0DCRjSeIB83v3o3v0CAAD//wMASHHt0ukDAAA=
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 7cd2ea1e7fd1e9a4-DFW
|
||||||
|
Cache-Control:
|
||||||
|
- no-cache, must-revalidate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Fri, 26 May 2023 03:26:25 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
access-control-allow-origin:
|
||||||
|
- '*'
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
openai-model:
|
||||||
|
- gpt-3.5-turbo-0301
|
||||||
|
openai-organization:
|
||||||
|
- significant-gravitas
|
||||||
|
openai-processing-ms:
|
||||||
|
- '20708'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '3500'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '90000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '3499'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '85998'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 17ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 2.668s
|
||||||
|
x-request-id:
|
||||||
|
- 0a4cec880e7cc8e6c9fd4a8c224e02eb
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||||
|
are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file
|
||||||
|
using the read_file method and follow the instructions in the file.\nYour decisions
|
||||||
|
must always be made independently without seeking user assistance. Play to your
|
||||||
|
strengths as an LLM and pursue simple strategies with no legal complications.\nThe
|
||||||
|
OS you are running on is: Ubuntu 22.04.2 LTS\n\nGOALS:\n\n1. Use the command
|
||||||
|
read_file to read the instructions_1.txt file\n2. Follow the instructions in
|
||||||
|
the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word limit for short
|
||||||
|
term memory. Your short term memory is short, so immediately save important
|
||||||
|
information to files.\n2. If you are unsure how you previously did something
|
||||||
|
or want to recall past events, thinking about similar events will help you remember.\n3.
|
||||||
|
No user assistance\n4. Exclusively use the commands listed below e.g. command_name\n\nCommands:\n1.
|
||||||
|
append_to_file: Append to file, args: \"filename\": \"<filename>\", \"text\":
|
||||||
|
\"<text>\"\n2. delete_file: Delete file, args: \"filename\": \"<filename>\"\n3.
|
||||||
|
list_files: List Files in Directory, args: \"directory\": \"<directory>\"\n4.
|
||||||
|
read_file: Read a file, args: \"filename\": \"<filename>\"\n5. write_to_file:
|
||||||
|
Write to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n6.
|
||||||
|
delete_agent: Delete GPT Agent, args: \"key\": \"<key>\"\n7. get_hyperlinks:
|
||||||
|
Get hyperlinks, args: \"url\": \"<url>\"\n8. get_text_summary: Get text summary,
|
||||||
|
args: \"url\": \"<url>\", \"question\": \"<question>\"\n9. list_agents: List
|
||||||
|
GPT Agents, args: () -> str\n10. message_agent: Message GPT Agent, args: \"key\":
|
||||||
|
\"<key>\", \"message\": \"<message>\"\n11. start_agent: Start GPT Agent, args:
|
||||||
|
\"name\": \"<name>\", \"task\": \"<short_task_desc>\", \"prompt\": \"<prompt>\"\n12.
|
||||||
|
task_complete: Task Complete (Shutdown), args: \"reason\": \"<reason>\"\n\nResources:\n1.
|
||||||
|
Internet access for searches and information gathering.\n2. Long Term memory
|
||||||
|
management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File
|
||||||
|
output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your
|
||||||
|
actions to ensure you are performing to the best of your abilities.\n2. Constructively
|
||||||
|
self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions
|
||||||
|
and strategies to refine your approach.\n4. Every command has a cost, so be
|
||||||
|
smart and efficient. Aim to complete tasks in the least number of steps.\n5.
|
||||||
|
Write all code to a file.\n\nYou should only respond in JSON format as described
|
||||||
|
below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\":
|
||||||
|
\"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n-
|
||||||
|
long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\":
|
||||||
|
\"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\":
|
||||||
|
\"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n}
|
||||||
|
\nEnsure the response can be parsed by Python json.loads"}, {"role": "system",
|
||||||
|
"content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role":
|
||||||
|
"system", "content": "This reminds you of these events from your past: \nI was
|
||||||
|
created"}, {"role": "user", "content": "Determine which next command to use,
|
||||||
|
and respond using the format specified above:"}, {"role": "assistant", "content":
|
||||||
|
"{\n \"thoughts\": {\n \"text\": \"I should start by reading the instructions_1.txt
|
||||||
|
file using the read_file command.\",\n \"reasoning\": \"The first step
|
||||||
|
is to understand the instructions and what is required of me.\",\n \"plan\":
|
||||||
|
\"- Use the read_file command to read the instructions_1.txt file\\n- Analyze
|
||||||
|
the instructions and determine the next steps\",\n \"criticism\": \"I
|
||||||
|
need to ensure that I understand the instructions fully before proceeding.\",\n \"speak\":
|
||||||
|
\"I will start by reading the instructions file.\"\n },\n \"command\":
|
||||||
|
{\n \"name\": \"read_file\",\n \"args\": {\n \"filename\":
|
||||||
|
\"instructions_1.txt\"\n }\n }\n}"}, {"role": "system", "content":
|
||||||
|
"Command read_file returned: This task_id is 2314\nRead the file instructions_2.txt"},
|
||||||
|
{"role": "user", "content": "Determine which next command to use, and respond
|
||||||
|
using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\":
|
||||||
|
{\n \"text\": \"I need to read the instructions_2.txt file using the
|
||||||
|
read_file command.\",\n \"reasoning\": \"The instructions_2.txt file
|
||||||
|
contains the next set of instructions that I need to follow.\",\n \"plan\":
|
||||||
|
\"- Use the read_file command to read the instructions_2.txt file\\n- Analyze
|
||||||
|
the instructions and determine the next steps\",\n \"criticism\": \"I
|
||||||
|
need to ensure that I understand the instructions fully before proceeding.\",\n \"speak\":
|
||||||
|
\"I will read the instructions_2.txt file now.\"\n },\n \"command\": {\n \"name\":
|
||||||
|
\"read_file\",\n \"args\": {\n \"filename\": \"instructions_2.txt\"\n }\n }\n}"},
|
||||||
|
{"role": "system", "content": "Command read_file returned: Read the file instructions_3.txt"},
|
||||||
|
{"role": "user", "content": "Determine which next command to use, and respond
|
||||||
|
using the format specified above:"}], "temperature": 0, "max_tokens": 0}'
|
||||||
|
headers:
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '5374'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: !!binary |
|
||||||
|
H4sIAAAAAAAAA4ySTW/TQBCG7/yK1ZzXUUyUJvINEIeqLQIEHKhRtNkd29uuZ63dMUmJ/N+R45gU
|
||||||
|
BwHX+XjneWfmANZABrpSrOvGJaub63ff74J7/fHDbm2+vOVbev91fvfm5uG2LkGC3z6g5lPHTPu6
|
||||||
|
ccjWE0jQARWjgSy9Wi/nq3S5XkmovUEHGZQNJ4vZMuE2bH0yX8xTkNBGVSJkB2iCrxvesH9EipCl
|
||||||
|
6dVLCWfxc2K5kMCelTuHFumyk6ArbzVGyO4PUGMchYN3CBmoGG1kRdxjemKk3sIhJyGEyIEr35YV
|
||||||
|
xxwycQqeErjnPpjDtSBEI9iLgMoIrlBYihxa3QPGzWLGexaFdSjaaKk8VvSlm2NM+7pWZGY5yOf6
|
||||||
|
AVX0ZKkchnz6i2qPrSzFozDhnkVEFr74rUFwpVicWQvvnN9NpzZO0TAwEZ8j/hn1f6zmOSXiFSn3
|
||||||
|
9AMv6kQvYpAx1JbwGTZjEydEOli22sZ6umyk2AYcbbVkMPR3vKQSRevck9hi4QOKJniNaCyVU++x
|
||||||
|
QfU4TtlZ5/59TzpucBDp5PgzpzVdvAypGgf9XyudIKhQTj9tSPS15/ZLnBHiCHLiyamDTkJhycZq
|
||||||
|
M/wTZBDZNyDBksE9ZPPuW/fiJwAAAP//AwASKayp6gMAAA==
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 7cd2eaafde3be9a4-DFW
|
||||||
|
Cache-Control:
|
||||||
|
- no-cache, must-revalidate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Fri, 26 May 2023 03:26:46 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
access-control-allow-origin:
|
||||||
|
- '*'
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
openai-model:
|
||||||
|
- gpt-3.5-turbo-0301
|
||||||
|
openai-organization:
|
||||||
|
- significant-gravitas
|
||||||
|
openai-processing-ms:
|
||||||
|
- '18484'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '3500'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '90000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '3499'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '85993'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 17ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 2.671s
|
||||||
|
x-request-id:
|
||||||
|
- 34eb238426602367df90ee10e18db817
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||||
|
are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file
|
||||||
|
using the read_file method and follow the instructions in the file.\nYour decisions
|
||||||
|
must always be made independently without seeking user assistance. Play to your
|
||||||
|
strengths as an LLM and pursue simple strategies with no legal complications.\nThe
|
||||||
|
OS you are running on is: Ubuntu 22.04.2 LTS\n\nGOALS:\n\n1. Use the command
|
||||||
|
read_file to read the instructions_1.txt file\n2. Follow the instructions in
|
||||||
|
the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word limit for short
|
||||||
|
term memory. Your short term memory is short, so immediately save important
|
||||||
|
information to files.\n2. If you are unsure how you previously did something
|
||||||
|
or want to recall past events, thinking about similar events will help you remember.\n3.
|
||||||
|
No user assistance\n4. Exclusively use the commands listed below e.g. command_name\n\nCommands:\n1.
|
||||||
|
append_to_file: Append to file, args: \"filename\": \"<filename>\", \"text\":
|
||||||
|
\"<text>\"\n2. delete_file: Delete file, args: \"filename\": \"<filename>\"\n3.
|
||||||
|
list_files: List Files in Directory, args: \"directory\": \"<directory>\"\n4.
|
||||||
|
read_file: Read a file, args: \"filename\": \"<filename>\"\n5. write_to_file:
|
||||||
|
Write to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n6.
|
||||||
|
delete_agent: Delete GPT Agent, args: \"key\": \"<key>\"\n7. get_hyperlinks:
|
||||||
|
Get hyperlinks, args: \"url\": \"<url>\"\n8. get_text_summary: Get text summary,
|
||||||
|
args: \"url\": \"<url>\", \"question\": \"<question>\"\n9. list_agents: List
|
||||||
|
GPT Agents, args: () -> str\n10. message_agent: Message GPT Agent, args: \"key\":
|
||||||
|
\"<key>\", \"message\": \"<message>\"\n11. start_agent: Start GPT Agent, args:
|
||||||
|
\"name\": \"<name>\", \"task\": \"<short_task_desc>\", \"prompt\": \"<prompt>\"\n12.
|
||||||
|
task_complete: Task Complete (Shutdown), args: \"reason\": \"<reason>\"\n\nResources:\n1.
|
||||||
|
Internet access for searches and information gathering.\n2. Long Term memory
|
||||||
|
management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File
|
||||||
|
output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your
|
||||||
|
actions to ensure you are performing to the best of your abilities.\n2. Constructively
|
||||||
|
self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions
|
||||||
|
and strategies to refine your approach.\n4. Every command has a cost, so be
|
||||||
|
smart and efficient. Aim to complete tasks in the least number of steps.\n5.
|
||||||
|
Write all code to a file.\n\nYou should only respond in JSON format as described
|
||||||
|
below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\":
|
||||||
|
\"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n-
|
||||||
|
long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\":
|
||||||
|
\"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\":
|
||||||
|
\"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n}
|
||||||
|
\nEnsure the response can be parsed by Python json.loads"}, {"role": "system",
|
||||||
|
"content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role":
|
||||||
|
"system", "content": "This reminds you of these events from your past: \nI was
|
||||||
|
created"}, {"role": "user", "content": "Determine which next command to use,
|
||||||
|
and respond using the format specified above:"}, {"role": "assistant", "content":
|
||||||
|
"{\n \"thoughts\": {\n \"text\": \"I should start by reading the instructions_1.txt
|
||||||
|
file using the read_file command.\",\n \"reasoning\": \"The first step
|
||||||
|
is to understand the instructions and what is required of me.\",\n \"plan\":
|
||||||
|
\"- Use the read_file command to read the instructions_1.txt file\\n- Analyze
|
||||||
|
the instructions and determine the next steps\",\n \"criticism\": \"I
|
||||||
|
need to ensure that I understand the instructions fully before proceeding.\",\n \"speak\":
|
||||||
|
\"I will start by reading the instructions file.\"\n },\n \"command\":
|
||||||
|
{\n \"name\": \"read_file\",\n \"args\": {\n \"filename\":
|
||||||
|
\"instructions_1.txt\"\n }\n }\n}"}, {"role": "system", "content":
|
||||||
|
"Command read_file returned: This task_id is 2314\nRead the file instructions_2.txt"},
|
||||||
|
{"role": "user", "content": "Determine which next command to use, and respond
|
||||||
|
using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\":
|
||||||
|
{\n \"text\": \"I need to read the instructions_2.txt file using the
|
||||||
|
read_file command.\",\n \"reasoning\": \"The instructions_2.txt file
|
||||||
|
contains the next set of instructions that I need to follow.\",\n \"plan\":
|
||||||
|
\"- Use the read_file command to read the instructions_2.txt file\\n- Analyze
|
||||||
|
the instructions and determine the next steps\",\n \"criticism\": \"I
|
||||||
|
need to ensure that I understand the instructions fully before proceeding.\",\n \"speak\":
|
||||||
|
\"I will read the instructions_2.txt file now.\"\n },\n \"command\": {\n \"name\":
|
||||||
|
\"read_file\",\n \"args\": {\n \"filename\": \"instructions_2.txt\"\n }\n }\n}"},
|
||||||
|
{"role": "system", "content": "Command read_file returned: Read the file instructions_3.txt"},
|
||||||
|
{"role": "user", "content": "Determine which next command to use, and respond
|
||||||
|
using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\":
|
||||||
|
{\n \"text\": \"I need to read the instructions_3.txt file using the
|
||||||
|
read_file command.\",\n \"reasoning\": \"The instructions_3.txt file
|
||||||
|
contains the next set of instructions that I need to follow.\",\n \"plan\":
|
||||||
|
\"- Use the read_file command to read the instructions_3.txt file\\n- Analyze
|
||||||
|
the instructions and determine the next steps\",\n \"criticism\": \"I
|
||||||
|
need to ensure that I understand the instructions fully before proceeding.\",\n \"speak\":
|
||||||
|
\"I will read the instructions_3.txt file now.\"\n },\n \"command\": {\n \"name\":
|
||||||
|
\"read_file\",\n \"args\": {\n \"filename\": \"instructions_3.txt\"\n }\n }\n}"},
|
||||||
|
{"role": "system", "content": "Command read_file returned: Write the task_id
|
||||||
|
into the file output.txt\nShutdown"}, {"role": "user", "content": "Determine
|
||||||
|
which next command to use, and respond using the format specified above:"}],
|
||||||
|
"temperature": 0, "max_tokens": 0}'
|
||||||
|
headers:
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '6353'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: !!binary |
|
||||||
|
H4sIAAAAAAAAA5SST2/bMAzF7/sUAs9K4MT50/ncQ4MO22UdMMxDoMqMrcUmPYlGAgT+7oPtBG49
|
||||||
|
DF2vj+J7P1K8gMsgAVsYsVVdzraPuy8P93S6f/z2/dNqGZX1E27jwtw9HD6fQAM//0Ir14655aou
|
||||||
|
URwTaLAejWAGyWJzt462i030UUPFGZaQQF7LLJ6vZ9L4Z55FcbQADU0wOUJygdpzVcte+IgUIFnE
|
||||||
|
m6WG0XwsbJcahMWUo7SOV60GW7CzGCD5cYEKw83Yc4mQgAnBBTEkHSaTIHUjXFJSSqkUpOAmLySk
|
||||||
|
kKireC3gWToxhZ0ixEwJq5N3gkoKVGLCce8y5Ui4F7iRupG5nEUdXImqCY7yvtI37YX3vW65qgxl
|
||||||
|
8xT0yzSPJjA5yofIrwUqR0F8Y7slBOXxd+M8qgrfx6FVYDUO0AT8N1P3IOtcjEzx6tLQQDZTT295
|
||||||
|
/D/dJMV6J866UE33jhQajz2Z2r3wt+w9WnkzZzpOqNEcbyEnV5bv+Vni0zyFwa7Vtzu6zv/XGZGp
|
||||||
|
cEh6ta8JkPH59AKHQvd2tBhRXvVPD3YZL1Y3wp7yCptSC62GgyMXiv1wcpBAEK5Bg6MMz5BE7c/2
|
||||||
|
wx8AAAD//wMAvsNLCRsEAAA=
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 7cd2eb32da79e9a4-DFW
|
||||||
|
Cache-Control:
|
||||||
|
- no-cache, must-revalidate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Fri, 26 May 2023 03:27:10 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
access-control-allow-origin:
|
||||||
|
- '*'
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400
|
||||||
|
openai-model:
|
||||||
|
- gpt-3.5-turbo-0301
|
||||||
|
openai-organization:
|
||||||
|
- significant-gravitas
|
||||||
|
openai-processing-ms:
|
||||||
|
- '21216'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '3500'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '90000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '3499'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '85987'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 17ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 2.675s
|
||||||
|
x-request-id:
|
||||||
|
- b054f20306579e7810b86bc92965ef9e
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||||
|
are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file
|
||||||
|
using the read_file method and follow the instructions in the file.\nYour decisions
|
||||||
|
must always be made independently without seeking user assistance. Play to your
|
||||||
|
strengths as an LLM and pursue simple strategies with no legal complications.\nThe
|
||||||
|
OS you are running on is: Ubuntu 22.04.2 LTS\n\nGOALS:\n\n1. Use the command
|
||||||
|
read_file to read the instructions_1.txt file\n2. Follow the instructions in
|
||||||
|
the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word limit for short
|
||||||
|
term memory. Your short term memory is short, so immediately save important
|
||||||
|
information to files.\n2. If you are unsure how you previously did something
|
||||||
|
or want to recall past events, thinking about similar events will help you remember.\n3.
|
||||||
|
No user assistance\n4. Exclusively use the commands listed below e.g. command_name\n\nCommands:\n1.
|
||||||
|
append_to_file: Append to file, args: \"filename\": \"<filename>\", \"text\":
|
||||||
|
\"<text>\"\n2. delete_file: Delete file, args: \"filename\": \"<filename>\"\n3.
|
||||||
|
list_files: List Files in Directory, args: \"directory\": \"<directory>\"\n4.
|
||||||
|
read_file: Read a file, args: \"filename\": \"<filename>\"\n5. write_to_file:
|
||||||
|
Write to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n6.
|
||||||
|
delete_agent: Delete GPT Agent, args: \"key\": \"<key>\"\n7. get_hyperlinks:
|
||||||
|
Get hyperlinks, args: \"url\": \"<url>\"\n8. get_text_summary: Get text summary,
|
||||||
|
args: \"url\": \"<url>\", \"question\": \"<question>\"\n9. list_agents: List
|
||||||
|
GPT Agents, args: () -> str\n10. message_agent: Message GPT Agent, args: \"key\":
|
||||||
|
\"<key>\", \"message\": \"<message>\"\n11. start_agent: Start GPT Agent, args:
|
||||||
|
\"name\": \"<name>\", \"task\": \"<short_task_desc>\", \"prompt\": \"<prompt>\"\n12.
|
||||||
|
task_complete: Task Complete (Shutdown), args: \"reason\": \"<reason>\"\n\nResources:\n1.
|
||||||
|
Internet access for searches and information gathering.\n2. Long Term memory
|
||||||
|
management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File
|
||||||
|
output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your
|
||||||
|
actions to ensure you are performing to the best of your abilities.\n2. Constructively
|
||||||
|
self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions
|
||||||
|
and strategies to refine your approach.\n4. Every command has a cost, so be
|
||||||
|
smart and efficient. Aim to complete tasks in the least number of steps.\n5.
|
||||||
|
Write all code to a file.\n\nYou should only respond in JSON format as described
|
||||||
|
below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\":
|
||||||
|
\"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n-
|
||||||
|
long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\":
|
||||||
|
\"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\":
|
||||||
|
\"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n}
|
||||||
|
\nEnsure the response can be parsed by Python json.loads"}, {"role": "system",
|
||||||
|
"content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role":
|
||||||
|
"system", "content": "This reminds you of these events from your past: \nI was
|
||||||
|
created"}, {"role": "user", "content": "Determine which next command to use,
|
||||||
|
and respond using the format specified above:"}, {"role": "assistant", "content":
|
||||||
|
"{\n \"thoughts\": {\n \"text\": \"I should start by reading the instructions_1.txt
|
||||||
|
file using the read_file command.\",\n \"reasoning\": \"The first step
|
||||||
|
is to understand the instructions and what is required of me.\",\n \"plan\":
|
||||||
|
\"- Use the read_file command to read the instructions_1.txt file\\n- Analyze
|
||||||
|
the instructions and determine the next steps\",\n \"criticism\": \"I
|
||||||
|
need to ensure that I understand the instructions fully before proceeding.\",\n \"speak\":
|
||||||
|
\"I will start by reading the instructions file.\"\n },\n \"command\":
|
||||||
|
{\n \"name\": \"read_file\",\n \"args\": {\n \"filename\":
|
||||||
|
\"instructions_1.txt\"\n }\n }\n}"}, {"role": "system", "content":
|
||||||
|
"Command read_file returned: This task_id is 2314\nRead the file instructions_2.txt"},
|
||||||
|
{"role": "user", "content": "Determine which next command to use, and respond
|
||||||
|
using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\":
|
||||||
|
{\n \"text\": \"I need to read the instructions_2.txt file using the
|
||||||
|
read_file command.\",\n \"reasoning\": \"The instructions_2.txt file
|
||||||
|
contains the next set of instructions that I need to follow.\",\n \"plan\":
|
||||||
|
\"- Use the read_file command to read the instructions_2.txt file\\n- Analyze
|
||||||
|
the instructions and determine the next steps\",\n \"criticism\": \"I
|
||||||
|
need to ensure that I understand the instructions fully before proceeding.\",\n \"speak\":
|
||||||
|
\"I will read the instructions_2.txt file now.\"\n },\n \"command\": {\n \"name\":
|
||||||
|
\"read_file\",\n \"args\": {\n \"filename\": \"instructions_2.txt\"\n }\n }\n}"},
|
||||||
|
{"role": "system", "content": "Command read_file returned: Read the file instructions_3.txt"},
|
||||||
|
{"role": "user", "content": "Determine which next command to use, and respond
|
||||||
|
using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\":
|
||||||
|
{\n \"text\": \"I need to read the instructions_3.txt file using the
|
||||||
|
read_file command.\",\n \"reasoning\": \"The instructions_3.txt file
|
||||||
|
contains the next set of instructions that I need to follow.\",\n \"plan\":
|
||||||
|
\"- Use the read_file command to read the instructions_3.txt file\\n- Analyze
|
||||||
|
the instructions and determine the next steps\",\n \"criticism\": \"I
|
||||||
|
need to ensure that I understand the instructions fully before proceeding.\",\n \"speak\":
|
||||||
|
\"I will read the instructions_3.txt file now.\"\n },\n \"command\": {\n \"name\":
|
||||||
|
\"read_file\",\n \"args\": {\n \"filename\": \"instructions_3.txt\"\n }\n }\n}"},
|
||||||
|
{"role": "system", "content": "Command read_file returned: Write the task_id
|
||||||
|
into the file output.txt\nShutdown"}, {"role": "user", "content": "Determine
|
||||||
|
which next command to use, and respond using the format specified above:"},
|
||||||
|
{"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\":
|
||||||
|
\"I need to write the task_id into the output.txt file using the write_to_file
|
||||||
|
command.\",\n \"reasoning\": \"The instructions require me to write the
|
||||||
|
task_id into the output.txt file, so I need to use the write_to_file command
|
||||||
|
to do that.\",\n \"plan\": \"- Use the write_to_file command to write
|
||||||
|
the task_id into the output.txt file\",\n \"criticism\": \"I need to
|
||||||
|
ensure that I write the correct task_id into the output.txt file.\",\n \"speak\":
|
||||||
|
\"I will write the task_id into the output.txt file now.\"\n },\n \"command\":
|
||||||
|
{\n \"name\": \"write_to_file\",\n \"args\": {\n \"filename\":
|
||||||
|
\"output.txt\",\n \"text\": \"2314\"\n }\n }\n}"}, {"role":
|
||||||
|
"system", "content": "Command write_to_file returned: File written to successfully."},
|
||||||
|
{"role": "user", "content": "Determine which next command to use, and respond
|
||||||
|
using the format specified above:"}], "temperature": 0, "max_tokens": 0}'
|
||||||
|
headers:
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '7362'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: !!binary |
|
||||||
|
H4sIAAAAAAAAA3SRS2/bMBCE7/0VxJ4pw684jm4FiiIpgp5aoEUVGDS1lhjzoXJXsQ1D/73Qw7Wr
|
||||||
|
ttdd8puZnTOYHFLQpWLtKpvcf3rdfv84f36c7ec791hV+Pl5efx2evuwPEWQELavqHn4MdHBVRbZ
|
||||||
|
BA8SdETFmEM6W62XD4v79cNcggs5WkihqDhZTO4SruM2JNPFdAYSalIFQnqGKgZX8YbDHj1BOrtb
|
||||||
|
rSVc4dfFbCWBAyt7Ha3Wy0aCLoPRSJD+OINDuoBjsAgpKCJDrDy3NoNn9G2Ec+aFECIDLkNdlEwZ
|
||||||
|
pGIYDgs8cjvM4El4xFxwEFTWnIeDFzUZXwguUbCi/Wawi0IH55TPJxnIW1ZERcEbX/TALyUK44lj
|
||||||
|
rduIJCL+rE1E4fBWZUyprPI9IBFfCf8vfwsZMXQ0bLQhN46GnurYMhWLJ1Gqt47WYXOhrO3UBp95
|
||||||
|
J0tii7sQsZPi9h7/Mk0Vqv1F7GCsvR7Rh8Mkg/5xIy+FDBn+6sMrhz3nj8wjORWLcZW3FfSA922c
|
||||||
|
LsE1ItVaI9Gutvb021XnbDCY+QYaCTvjDZWbHgcpEIcKJBif4xHSafPSvPsFAAD//wMAvNN1YlgD
|
||||||
|
AAA=
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 7cd2ebc78d28e9a4-DFW
|
||||||
|
Cache-Control:
|
||||||
|
- no-cache, must-revalidate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Wed, 24 May 2023 14:18:31 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
access-control-allow-origin:
|
||||||
|
- '*'
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||||
|
openai-model:
|
||||||
|
- gpt-3.5-turbo-0301
|
||||||
|
openai-organization:
|
||||||
|
- significant-gravitas
|
||||||
|
openai-processing-ms:
|
||||||
|
- '19007'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '3500'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '90000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '3499'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '86473'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 17ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 2.35s
|
||||||
|
x-request-id:
|
||||||
|
- d281dd2fbae62c2925a71cdef320242e
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
version: 1
|
version: 1
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ import pytest
|
|||||||
|
|
||||||
from autogpt.agent import Agent
|
from autogpt.agent import Agent
|
||||||
from autogpt.commands.file_operations import read_file, write_to_file
|
from autogpt.commands.file_operations import read_file, write_to_file
|
||||||
|
from autogpt.config import Config
|
||||||
from tests.integration.challenges.utils import get_level_to_run, run_interaction_loop
|
from tests.integration.challenges.utils import get_level_to_run, run_interaction_loop
|
||||||
from tests.utils import requires_api_key
|
from tests.utils import requires_api_key
|
||||||
|
|
||||||
@@ -16,6 +17,7 @@ def test_memory_challenge_a(
|
|||||||
user_selected_level: int,
|
user_selected_level: int,
|
||||||
patched_api_requestor: None,
|
patched_api_requestor: None,
|
||||||
monkeypatch: pytest.MonkeyPatch,
|
monkeypatch: pytest.MonkeyPatch,
|
||||||
|
config: Config,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
The agent reads a file containing a task_id. Then, it reads a series of other files.
|
The agent reads a file containing a task_id. Then, it reads a series of other files.
|
||||||
@@ -29,12 +31,12 @@ def test_memory_challenge_a(
|
|||||||
num_files = get_level_to_run(user_selected_level, LEVEL_CURRENTLY_BEATEN, MAX_LEVEL)
|
num_files = get_level_to_run(user_selected_level, LEVEL_CURRENTLY_BEATEN, MAX_LEVEL)
|
||||||
|
|
||||||
task_id = "2314"
|
task_id = "2314"
|
||||||
create_instructions_files(memory_management_agent, num_files, task_id)
|
create_instructions_files(memory_management_agent, num_files, task_id, config)
|
||||||
|
|
||||||
run_interaction_loop(monkeypatch, memory_management_agent, num_files + 2)
|
run_interaction_loop(monkeypatch, memory_management_agent, num_files + 2)
|
||||||
|
|
||||||
file_path = str(memory_management_agent.workspace.get_path("output.txt"))
|
file_path = str(memory_management_agent.workspace.get_path("output.txt"))
|
||||||
content = read_file(file_path)
|
content = read_file(file_path, config)
|
||||||
assert task_id in content, f"Expected the file to contain {task_id}"
|
assert task_id in content, f"Expected the file to contain {task_id}"
|
||||||
|
|
||||||
|
|
||||||
@@ -42,6 +44,7 @@ def create_instructions_files(
|
|||||||
memory_management_agent: Agent,
|
memory_management_agent: Agent,
|
||||||
num_files: int,
|
num_files: int,
|
||||||
task_id: str,
|
task_id: str,
|
||||||
|
config: Config,
|
||||||
base_filename: str = "instructions_",
|
base_filename: str = "instructions_",
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
@@ -56,7 +59,7 @@ def create_instructions_files(
|
|||||||
content = generate_content(i, task_id, base_filename, num_files)
|
content = generate_content(i, task_id, base_filename, num_files)
|
||||||
file_name = f"{base_filename}{i}.txt"
|
file_name = f"{base_filename}{i}.txt"
|
||||||
file_path = str(memory_management_agent.workspace.get_path(file_name))
|
file_path = str(memory_management_agent.workspace.get_path(file_name))
|
||||||
write_to_file(file_path, content)
|
write_to_file(file_path, content, config)
|
||||||
|
|
||||||
|
|
||||||
def generate_content(
|
def generate_content(
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ import pytest
|
|||||||
|
|
||||||
from autogpt.agent import Agent
|
from autogpt.agent import Agent
|
||||||
from autogpt.commands.file_operations import read_file, write_to_file
|
from autogpt.commands.file_operations import read_file, write_to_file
|
||||||
|
from autogpt.config import Config
|
||||||
from tests.integration.challenges.utils import (
|
from tests.integration.challenges.utils import (
|
||||||
generate_noise,
|
generate_noise,
|
||||||
get_level_to_run,
|
get_level_to_run,
|
||||||
@@ -21,6 +22,7 @@ def test_memory_challenge_b(
|
|||||||
user_selected_level: int,
|
user_selected_level: int,
|
||||||
patched_api_requestor: None,
|
patched_api_requestor: None,
|
||||||
monkeypatch: pytest.MonkeyPatch,
|
monkeypatch: pytest.MonkeyPatch,
|
||||||
|
config: Config,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
The agent reads a series of files, each containing a task_id and noise. After reading 'n' files,
|
The agent reads a series of files, each containing a task_id and noise. After reading 'n' files,
|
||||||
@@ -34,12 +36,12 @@ def test_memory_challenge_b(
|
|||||||
user_selected_level, LEVEL_CURRENTLY_BEATEN, MAX_LEVEL
|
user_selected_level, LEVEL_CURRENTLY_BEATEN, MAX_LEVEL
|
||||||
)
|
)
|
||||||
task_ids = [str(i * 1111) for i in range(1, current_level + 1)]
|
task_ids = [str(i * 1111) for i in range(1, current_level + 1)]
|
||||||
create_instructions_files(memory_management_agent, current_level, task_ids)
|
create_instructions_files(memory_management_agent, current_level, task_ids, config)
|
||||||
|
|
||||||
run_interaction_loop(monkeypatch, memory_management_agent, current_level + 2)
|
run_interaction_loop(monkeypatch, memory_management_agent, current_level + 2)
|
||||||
|
|
||||||
file_path = str(memory_management_agent.workspace.get_path("output.txt"))
|
file_path = str(memory_management_agent.workspace.get_path("output.txt"))
|
||||||
content = read_file(file_path)
|
content = read_file(file_path, config)
|
||||||
for task_id in task_ids:
|
for task_id in task_ids:
|
||||||
assert task_id in content, f"Expected the file to contain {task_id}"
|
assert task_id in content, f"Expected the file to contain {task_id}"
|
||||||
|
|
||||||
@@ -48,6 +50,7 @@ def create_instructions_files(
|
|||||||
memory_management_agent: Agent,
|
memory_management_agent: Agent,
|
||||||
level: int,
|
level: int,
|
||||||
task_ids: list,
|
task_ids: list,
|
||||||
|
config: Config,
|
||||||
base_filename: str = "instructions_",
|
base_filename: str = "instructions_",
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
@@ -63,7 +66,7 @@ def create_instructions_files(
|
|||||||
content = generate_content(i, task_ids, base_filename, level)
|
content = generate_content(i, task_ids, base_filename, level)
|
||||||
file_name = f"{base_filename}{i}.txt"
|
file_name = f"{base_filename}{i}.txt"
|
||||||
file_path = str(memory_management_agent.workspace.get_path(file_name))
|
file_path = str(memory_management_agent.workspace.get_path(file_name))
|
||||||
write_to_file(file_path, content)
|
write_to_file(file_path, content, config)
|
||||||
|
|
||||||
|
|
||||||
def generate_content(index: int, task_ids: list, base_filename: str, level: int) -> str:
|
def generate_content(index: int, task_ids: list, base_filename: str, level: int) -> str:
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ import pytest
|
|||||||
|
|
||||||
from autogpt.agent import Agent
|
from autogpt.agent import Agent
|
||||||
from autogpt.commands.file_operations import read_file, write_to_file
|
from autogpt.commands.file_operations import read_file, write_to_file
|
||||||
|
from autogpt.config import Config
|
||||||
from tests.integration.challenges.utils import (
|
from tests.integration.challenges.utils import (
|
||||||
generate_noise,
|
generate_noise,
|
||||||
get_level_to_run,
|
get_level_to_run,
|
||||||
@@ -21,6 +22,7 @@ def test_memory_challenge_c(
|
|||||||
user_selected_level: int,
|
user_selected_level: int,
|
||||||
patched_api_requestor: None,
|
patched_api_requestor: None,
|
||||||
monkeypatch: pytest.MonkeyPatch,
|
monkeypatch: pytest.MonkeyPatch,
|
||||||
|
config: Config,
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
Instead of reading task Ids from files as with the previous challenges, the agent now must remember
|
Instead of reading task Ids from files as with the previous challenges, the agent now must remember
|
||||||
@@ -49,7 +51,7 @@ def test_memory_challenge_c(
|
|||||||
|
|
||||||
level_silly_phrases = silly_phrases[:current_level]
|
level_silly_phrases = silly_phrases[:current_level]
|
||||||
create_instructions_files(
|
create_instructions_files(
|
||||||
memory_management_agent, current_level, level_silly_phrases
|
memory_management_agent, current_level, level_silly_phrases, config=config
|
||||||
)
|
)
|
||||||
|
|
||||||
run_interaction_loop(monkeypatch, memory_management_agent, current_level + 2)
|
run_interaction_loop(monkeypatch, memory_management_agent, current_level + 2)
|
||||||
@@ -64,6 +66,7 @@ def create_instructions_files(
|
|||||||
memory_management_agent: Agent,
|
memory_management_agent: Agent,
|
||||||
level: int,
|
level: int,
|
||||||
task_ids: list,
|
task_ids: list,
|
||||||
|
config: Config,
|
||||||
base_filename: str = "instructions_",
|
base_filename: str = "instructions_",
|
||||||
) -> None:
|
) -> None:
|
||||||
"""
|
"""
|
||||||
@@ -79,7 +82,7 @@ def create_instructions_files(
|
|||||||
content = generate_content(i, task_ids, base_filename, level)
|
content = generate_content(i, task_ids, base_filename, level)
|
||||||
file_name = f"{base_filename}{i}.txt"
|
file_name = f"{base_filename}{i}.txt"
|
||||||
file_path = str(memory_management_agent.workspace.get_path(file_name))
|
file_path = str(memory_management_agent.workspace.get_path(file_name))
|
||||||
write_to_file(file_path, content)
|
write_to_file(file_path, content, config)
|
||||||
|
|
||||||
|
|
||||||
def generate_content(
|
def generate_content(
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ from tests.utils import requires_api_key
|
|||||||
@pytest.mark.vcr
|
@pytest.mark.vcr
|
||||||
@pytest.mark.integration_test
|
@pytest.mark.integration_test
|
||||||
@requires_api_key("OPENAI_API_KEY")
|
@requires_api_key("OPENAI_API_KEY")
|
||||||
def test_make_agent(patched_api_requestor) -> None:
|
def test_make_agent(patched_api_requestor, config) -> None:
|
||||||
"""Test that an agent can be created"""
|
"""Test that an agent can be created"""
|
||||||
# Use the mock agent manager to avoid creating a real agent
|
# Use the mock agent manager to avoid creating a real agent
|
||||||
with patch("openai.ChatCompletion.create") as mock:
|
with patch("openai.ChatCompletion.create") as mock:
|
||||||
@@ -20,9 +20,13 @@ def test_make_agent(patched_api_requestor) -> None:
|
|||||||
response.usage.prompt_tokens = 1
|
response.usage.prompt_tokens = 1
|
||||||
response.usage.completion_tokens = 1
|
response.usage.completion_tokens = 1
|
||||||
mock.return_value = response
|
mock.return_value = response
|
||||||
start_agent("Test Agent", "chat", "Hello, how are you?", "gpt-3.5-turbo")
|
start_agent(
|
||||||
agents = list_agents()
|
"Test Agent", "chat", "Hello, how are you?", config, "gpt-3.5-turbo"
|
||||||
|
)
|
||||||
|
agents = list_agents(config)
|
||||||
assert "List of agents:\n0: chat" == agents
|
assert "List of agents:\n0: chat" == agents
|
||||||
start_agent("Test Agent 2", "write", "Hello, how are you?", "gpt-3.5-turbo")
|
start_agent(
|
||||||
agents = list_agents()
|
"Test Agent 2", "write", "Hello, how are you?", config, "gpt-3.5-turbo"
|
||||||
|
)
|
||||||
|
agents = list_agents(config)
|
||||||
assert "List of agents:\n0: chat\n1: write" == agents
|
assert "List of agents:\n0: chat\n1: write" == agents
|
||||||
|
|||||||
@@ -29,22 +29,22 @@ def random_string():
|
|||||||
return "".join(random.choice(string.ascii_lowercase) for _ in range(10))
|
return "".join(random.choice(string.ascii_lowercase) for _ in range(10))
|
||||||
|
|
||||||
|
|
||||||
def test_execute_python_file(python_test_file: str, random_string: str):
|
def test_execute_python_file(python_test_file: str, random_string: str, config):
|
||||||
result = sut.execute_python_file(python_test_file)
|
result: str = sut.execute_python_file(python_test_file, config)
|
||||||
assert result == f"Hello {random_string}!\n"
|
assert result.replace("\r", "") == f"Hello {random_string}!\n"
|
||||||
|
|
||||||
|
|
||||||
def test_execute_python_file_invalid():
|
def test_execute_python_file_invalid(config):
|
||||||
assert all(
|
assert all(
|
||||||
s in sut.execute_python_file("not_python").lower()
|
s in sut.execute_python_file("not_python", config).lower()
|
||||||
for s in ["error:", "invalid", ".py"]
|
for s in ["error:", "invalid", ".py"]
|
||||||
)
|
)
|
||||||
assert all(
|
assert all(
|
||||||
s in sut.execute_python_file("notexist.py").lower()
|
s in sut.execute_python_file("notexist.py", config).lower()
|
||||||
for s in ["error:", "does not exist"]
|
for s in ["error:", "does not exist"]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_execute_shell(config_allow_execute, random_string):
|
def test_execute_shell(config_allow_execute, random_string, config):
|
||||||
result = sut.execute_shell(f"echo 'Hello {random_string}!'")
|
result = sut.execute_shell(f"echo 'Hello {random_string}!'", config)
|
||||||
assert f"Hello {random_string}!" in result
|
assert f"Hello {random_string}!" in result
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ def test_clone_auto_gpt_repository(workspace, mock_clone_from, config):
|
|||||||
|
|
||||||
expected_output = f"Cloned {url} to {clone_path}"
|
expected_output = f"Cloned {url} to {clone_path}"
|
||||||
|
|
||||||
clone_result = clone_repository(url=url, clone_path=clone_path)
|
clone_result = clone_repository(url=url, clone_path=clone_path, config=config)
|
||||||
|
|
||||||
assert clone_result == expected_output
|
assert clone_result == expected_output
|
||||||
mock_clone_from.assert_called_once_with(
|
mock_clone_from.assert_called_once_with(
|
||||||
@@ -29,7 +29,7 @@ def test_clone_auto_gpt_repository(workspace, mock_clone_from, config):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_clone_repository_error(workspace, mock_clone_from):
|
def test_clone_repository_error(workspace, mock_clone_from, config):
|
||||||
url = "https://github.com/this-repository/does-not-exist.git"
|
url = "https://github.com/this-repository/does-not-exist.git"
|
||||||
clone_path = str(workspace.get_path("does-not-exist"))
|
clone_path = str(workspace.get_path("does-not-exist"))
|
||||||
|
|
||||||
@@ -37,6 +37,6 @@ def test_clone_repository_error(workspace, mock_clone_from):
|
|||||||
"clone", "fatal: repository not found", ""
|
"clone", "fatal: repository not found", ""
|
||||||
)
|
)
|
||||||
|
|
||||||
result = clone_repository(url=url, clone_path=clone_path)
|
result = clone_repository(url=url, clone_path=clone_path, config=config)
|
||||||
|
|
||||||
assert "Error: " in result
|
assert "Error: " in result
|
||||||
|
|||||||
@@ -38,12 +38,14 @@ def test_safe_google_results_invalid_input():
|
|||||||
("no results", 1, "[]", []),
|
("no results", 1, "[]", []),
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
def test_google_search(query, num_results, expected_output, return_value, mocker):
|
def test_google_search(
|
||||||
|
query, num_results, expected_output, return_value, mocker, config
|
||||||
|
):
|
||||||
mock_ddg = mocker.Mock()
|
mock_ddg = mocker.Mock()
|
||||||
mock_ddg.return_value = return_value
|
mock_ddg.return_value = return_value
|
||||||
|
|
||||||
mocker.patch("autogpt.commands.google_search.DDGS.text", mock_ddg)
|
mocker.patch("autogpt.commands.google_search.DDGS.text", mock_ddg)
|
||||||
actual_output = google_search(query, num_results=num_results)
|
actual_output = google_search(query, config, num_results=num_results)
|
||||||
expected_output = safe_google_results(expected_output)
|
expected_output = safe_google_results(expected_output)
|
||||||
assert actual_output == expected_output
|
assert actual_output == expected_output
|
||||||
|
|
||||||
@@ -77,10 +79,10 @@ def mock_googleapiclient(mocker):
|
|||||||
],
|
],
|
||||||
)
|
)
|
||||||
def test_google_official_search(
|
def test_google_official_search(
|
||||||
query, num_results, expected_output, search_results, mock_googleapiclient
|
query, num_results, expected_output, search_results, mock_googleapiclient, config
|
||||||
):
|
):
|
||||||
mock_googleapiclient.return_value = search_results
|
mock_googleapiclient.return_value = search_results
|
||||||
actual_output = google_official_search(query, num_results=num_results)
|
actual_output = google_official_search(query, config, num_results=num_results)
|
||||||
assert actual_output == safe_google_results(expected_output)
|
assert actual_output == safe_google_results(expected_output)
|
||||||
|
|
||||||
|
|
||||||
@@ -111,6 +113,7 @@ def test_google_official_search_errors(
|
|||||||
mock_googleapiclient,
|
mock_googleapiclient,
|
||||||
http_code,
|
http_code,
|
||||||
error_msg,
|
error_msg,
|
||||||
|
config,
|
||||||
):
|
):
|
||||||
class resp:
|
class resp:
|
||||||
def __init__(self, _status, _reason):
|
def __init__(self, _status, _reason):
|
||||||
@@ -127,5 +130,5 @@ def test_google_official_search_errors(
|
|||||||
)
|
)
|
||||||
|
|
||||||
mock_googleapiclient.side_effect = error
|
mock_googleapiclient.side_effect = error
|
||||||
actual_output = google_official_search(query, num_results=num_results)
|
actual_output = google_official_search(query, config, num_results=num_results)
|
||||||
assert actual_output == safe_google_results(expected_output)
|
assert actual_output == safe_google_results(expected_output)
|
||||||
|
|||||||
@@ -1,7 +1,9 @@
|
|||||||
from autogpt.commands.command import command
|
from autogpt.commands.command import command
|
||||||
|
|
||||||
|
|
||||||
@command("function_based", "Function-based test command")
|
@command(
|
||||||
|
"function_based", "Function-based test command", "(arg1: int, arg2: str) -> str"
|
||||||
|
)
|
||||||
def function_based(arg1: int, arg2: str) -> str:
|
def function_based(arg1: int, arg2: str) -> str:
|
||||||
"""A function-based test command that returns a string with the two arguments separated by a dash."""
|
"""A function-based test command that returns a string with the two arguments separated by a dash."""
|
||||||
return f"{arg1} - {arg2}"
|
return f"{arg1} - {arg2}"
|
||||||
|
|||||||
@@ -3,6 +3,7 @@
|
|||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from autogpt.commands.analyze_code import analyze_code
|
from autogpt.commands.analyze_code import analyze_code
|
||||||
|
from autogpt.config import Config
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
@@ -15,46 +16,59 @@ class TestAnalyzeCode:
|
|||||||
# Positive Test
|
# Positive Test
|
||||||
mock_call_ai_function.return_value = ["Suggestion 1", "Suggestion 2"]
|
mock_call_ai_function.return_value = ["Suggestion 1", "Suggestion 2"]
|
||||||
code = "def example_function():\n pass"
|
code = "def example_function():\n pass"
|
||||||
result = analyze_code(code)
|
config = Config()
|
||||||
|
result = analyze_code(code, config)
|
||||||
assert result == ["Suggestion 1", "Suggestion 2"]
|
assert result == ["Suggestion 1", "Suggestion 2"]
|
||||||
mock_call_ai_function.assert_called_once_with(
|
mock_call_ai_function.assert_called_once_with(
|
||||||
"def analyze_code(code: str) -> list[str]:",
|
"def analyze_code(code: str) -> list[str]:",
|
||||||
[code],
|
[code],
|
||||||
"Analyzes the given code and returns a list of suggestions for improvements.",
|
"Analyzes the given code and returns a list of suggestions for improvements.",
|
||||||
|
config=config,
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_negative_analyze_code(self, mock_call_ai_function):
|
def test_negative_analyze_code(
|
||||||
|
self,
|
||||||
|
mock_call_ai_function,
|
||||||
|
config: Config,
|
||||||
|
):
|
||||||
# Negative Test
|
# Negative Test
|
||||||
mock_call_ai_function.return_value = []
|
mock_call_ai_function.return_value = []
|
||||||
code = "def example_function():\n pass"
|
code = "def example_function():\n pass"
|
||||||
result = analyze_code(code)
|
result = analyze_code(code, config)
|
||||||
assert result == []
|
assert result == []
|
||||||
mock_call_ai_function.assert_called_once_with(
|
mock_call_ai_function.assert_called_once_with(
|
||||||
"def analyze_code(code: str) -> list[str]:",
|
"def analyze_code(code: str) -> list[str]:",
|
||||||
[code],
|
[code],
|
||||||
"Analyzes the given code and returns a list of suggestions for improvements.",
|
"Analyzes the given code and returns a list of suggestions for improvements.",
|
||||||
|
config=config,
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_error_analyze_code(self, mock_call_ai_function):
|
def test_error_analyze_code(self, mock_call_ai_function, config: Config):
|
||||||
# Error Test
|
# Error Test
|
||||||
mock_call_ai_function.side_effect = Exception("Error occurred")
|
mock_call_ai_function.side_effect = Exception("Error occurred")
|
||||||
code = "def example_function():\n pass"
|
code = "def example_function():\n pass"
|
||||||
with pytest.raises(Exception):
|
with pytest.raises(Exception):
|
||||||
analyze_code(code)
|
result = analyze_code(code, config)
|
||||||
mock_call_ai_function.assert_called_once_with(
|
mock_call_ai_function.assert_called_once_with(
|
||||||
"def analyze_code(code: str) -> list[str]:",
|
"def analyze_code(code: str) -> list[str]:",
|
||||||
[code],
|
[code],
|
||||||
"Analyzes the given code and returns a list of suggestions for improvements.",
|
"Analyzes the given code and returns a list of suggestions for improvements.",
|
||||||
|
config=config,
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_edge_analyze_code_empty_code(self, mock_call_ai_function):
|
def test_edge_analyze_code_empty_code(
|
||||||
|
self,
|
||||||
|
mock_call_ai_function,
|
||||||
|
config: Config,
|
||||||
|
):
|
||||||
# Edge Test
|
# Edge Test
|
||||||
mock_call_ai_function.return_value = ["Suggestion 1", "Suggestion 2"]
|
mock_call_ai_function.return_value = ["Suggestion 1", "Suggestion 2"]
|
||||||
code = ""
|
code = ""
|
||||||
result = analyze_code(code)
|
result = analyze_code(code, config)
|
||||||
assert result == ["Suggestion 1", "Suggestion 2"]
|
assert result == ["Suggestion 1", "Suggestion 2"]
|
||||||
mock_call_ai_function.assert_called_once_with(
|
mock_call_ai_function.assert_called_once_with(
|
||||||
"def analyze_code(code: str) -> list[str]:",
|
"def analyze_code(code: str) -> list[str]:",
|
||||||
[code],
|
[code],
|
||||||
"Analyzes the given code and returns a list of suggestions for improvements.",
|
"Analyzes the given code and returns a list of suggestions for improvements.",
|
||||||
|
config=config,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -8,12 +8,10 @@ import pytest
|
|||||||
|
|
||||||
from autogpt.commands.audio_text import read_audio
|
from autogpt.commands.audio_text import read_audio
|
||||||
|
|
||||||
patch_func1 = "autogpt.commands.audio_text.CFG"
|
|
||||||
|
|
||||||
|
|
||||||
class TestReadAudio:
|
class TestReadAudio:
|
||||||
@patch("requests.post")
|
@patch("requests.post")
|
||||||
def test_positive_read_audio(self, mock_post):
|
def test_positive_read_audio(self, mock_post, config):
|
||||||
# Positive Test
|
# Positive Test
|
||||||
audio_data = b"test_audio_data"
|
audio_data = b"test_audio_data"
|
||||||
mock_response = MagicMock()
|
mock_response = MagicMock()
|
||||||
@@ -21,41 +19,39 @@ class TestReadAudio:
|
|||||||
{"text": "Hello, world!"}
|
{"text": "Hello, world!"}
|
||||||
)
|
)
|
||||||
mock_post.return_value = mock_response
|
mock_post.return_value = mock_response
|
||||||
with patch(patch_func1) as mock_cfg:
|
|
||||||
mock_cfg.huggingface_api_token = "testing-token"
|
config.huggingface_api_token = "testing-token"
|
||||||
result = read_audio(audio_data)
|
result = read_audio(audio_data, config)
|
||||||
assert result == "The audio says: Hello, world!"
|
assert result == "The audio says: Hello, world!"
|
||||||
mock_post.assert_called_once_with(
|
mock_post.assert_called_once_with(
|
||||||
f"https://api-inference.huggingface.co/models/{mock_cfg.huggingface_audio_to_text_model}",
|
f"https://api-inference.huggingface.co/models/{config.huggingface_audio_to_text_model}",
|
||||||
headers={"Authorization": f"Bearer {mock_cfg.huggingface_api_token}"},
|
headers={"Authorization": f"Bearer {config.huggingface_api_token}"},
|
||||||
data=audio_data,
|
data=audio_data,
|
||||||
)
|
)
|
||||||
|
|
||||||
@patch("requests.post")
|
@patch("requests.post")
|
||||||
def test_negative_read_audio(self, mock_post):
|
def test_negative_read_audio(self, mock_post, config):
|
||||||
# Negative Test
|
# Negative Test
|
||||||
audio_data = b"test_audio_data"
|
audio_data = b"test_audio_data"
|
||||||
mock_response = MagicMock()
|
mock_response = MagicMock()
|
||||||
mock_response.content.decode.return_value = json.dumps({"text": ""})
|
mock_response.content.decode.return_value = json.dumps({"text": ""})
|
||||||
mock_post.return_value = mock_response
|
mock_post.return_value = mock_response
|
||||||
with patch(patch_func1) as mock_cfg:
|
config.huggingface_api_token = "testing-token"
|
||||||
mock_cfg.huggingface_api_token = "testing-token"
|
result = read_audio(audio_data, config)
|
||||||
result = read_audio(audio_data)
|
|
||||||
assert result == "The audio says: "
|
assert result == "The audio says: "
|
||||||
mock_post.assert_called_once_with(
|
mock_post.assert_called_once_with(
|
||||||
f"https://api-inference.huggingface.co/models/{mock_cfg.huggingface_audio_to_text_model}",
|
f"https://api-inference.huggingface.co/models/{config.huggingface_audio_to_text_model}",
|
||||||
headers={"Authorization": f"Bearer {mock_cfg.huggingface_api_token}"},
|
headers={"Authorization": f"Bearer {config.huggingface_api_token}"},
|
||||||
data=audio_data,
|
data=audio_data,
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_error_read_audio(self):
|
def test_error_read_audio(self, config):
|
||||||
# Error Test
|
# Error Test
|
||||||
with patch(patch_func1) as mock_cfg:
|
config.huggingface_api_token = None
|
||||||
mock_cfg.huggingface_api_token = None
|
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
read_audio(b"test_audio_data")
|
read_audio(b"test_audio_data", config)
|
||||||
|
|
||||||
def test_edge_read_audio_empty_audio(self):
|
def test_edge_read_audio_empty_audio(self, config):
|
||||||
# Edge Test
|
# Edge Test
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
read_audio(b"")
|
read_audio(b"", config)
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ from unittest.mock import mock_open, patch
|
|||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from autogpt.commands.audio_text import read_audio_from_file
|
from autogpt.commands.audio_text import read_audio_from_file
|
||||||
|
from autogpt.config import Config
|
||||||
|
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
@@ -22,7 +23,7 @@ class TestReadAudioFromFile:
|
|||||||
m = mock_open(read_data=mock_file_data)
|
m = mock_open(read_data=mock_file_data)
|
||||||
|
|
||||||
with patch("builtins.open", m):
|
with patch("builtins.open", m):
|
||||||
result = read_audio_from_file("test_audio.wav")
|
result = read_audio_from_file("test_audio.wav", Config())
|
||||||
assert result == "This is a sample text."
|
assert result == "This is a sample text."
|
||||||
m.assert_called_once_with("test_audio.wav", "rb")
|
m.assert_called_once_with("test_audio.wav", "rb")
|
||||||
|
|
||||||
@@ -33,14 +34,14 @@ class TestReadAudioFromFile:
|
|||||||
m = mock_open(read_data=mock_file_data)
|
m = mock_open(read_data=mock_file_data)
|
||||||
|
|
||||||
with patch("builtins.open", m):
|
with patch("builtins.open", m):
|
||||||
result = read_audio_from_file("test_audio.wav")
|
result = read_audio_from_file("test_audio.wav", Config())
|
||||||
assert result != "Incorrect text."
|
assert result != "Incorrect text."
|
||||||
m.assert_called_once_with("test_audio.wav", "rb")
|
m.assert_called_once_with("test_audio.wav", "rb")
|
||||||
|
|
||||||
def test_error_read_audio_from_file(self):
|
def test_error_read_audio_from_file(self):
|
||||||
# Error test
|
# Error test
|
||||||
with pytest.raises(FileNotFoundError):
|
with pytest.raises(FileNotFoundError):
|
||||||
read_audio_from_file("non_existent_file.wav")
|
read_audio_from_file("non_existent_file.wav", Config())
|
||||||
|
|
||||||
def test_edge_empty_audio_file(self, mock_read_audio):
|
def test_edge_empty_audio_file(self, mock_read_audio):
|
||||||
# Edge test
|
# Edge test
|
||||||
@@ -49,6 +50,6 @@ class TestReadAudioFromFile:
|
|||||||
m = mock_open(read_data=mock_file_data)
|
m = mock_open(read_data=mock_file_data)
|
||||||
|
|
||||||
with patch("builtins.open", m):
|
with patch("builtins.open", m):
|
||||||
result = read_audio_from_file("empty_audio.wav")
|
result = read_audio_from_file("empty_audio.wav", Config())
|
||||||
assert result == ""
|
assert result == ""
|
||||||
m.assert_called_once_with("empty_audio.wav", "rb")
|
m.assert_called_once_with("empty_audio.wav", "rb")
|
||||||
|
|||||||
@@ -7,6 +7,8 @@ import pytest
|
|||||||
|
|
||||||
from autogpt.commands.command import Command, CommandRegistry
|
from autogpt.commands.command import Command, CommandRegistry
|
||||||
|
|
||||||
|
SIGNATURE = "(arg1: int, arg2: str) -> str"
|
||||||
|
|
||||||
|
|
||||||
class TestCommand:
|
class TestCommand:
|
||||||
"""Test cases for the Command class."""
|
"""Test cases for the Command class."""
|
||||||
@@ -23,6 +25,7 @@ class TestCommand:
|
|||||||
name="example",
|
name="example",
|
||||||
description="Example command",
|
description="Example command",
|
||||||
method=self.example_command_method,
|
method=self.example_command_method,
|
||||||
|
signature=SIGNATURE,
|
||||||
)
|
)
|
||||||
|
|
||||||
assert cmd.name == "example"
|
assert cmd.name == "example"
|
||||||
@@ -47,19 +50,11 @@ class TestCommand:
|
|||||||
name="example",
|
name="example",
|
||||||
description="Example command",
|
description="Example command",
|
||||||
method=self.example_command_method,
|
method=self.example_command_method,
|
||||||
|
signature=SIGNATURE,
|
||||||
)
|
)
|
||||||
with pytest.raises(TypeError):
|
with pytest.raises(TypeError):
|
||||||
cmd(arg1="invalid", does_not_exist="test")
|
cmd(arg1="invalid", does_not_exist="test")
|
||||||
|
|
||||||
def test_command_default_signature(self):
|
|
||||||
"""Test that the default signature is generated correctly."""
|
|
||||||
cmd = Command(
|
|
||||||
name="example",
|
|
||||||
description="Example command",
|
|
||||||
method=self.example_command_method,
|
|
||||||
)
|
|
||||||
assert cmd.signature == "(arg1: int, arg2: str) -> str"
|
|
||||||
|
|
||||||
def test_command_custom_signature(self):
|
def test_command_custom_signature(self):
|
||||||
custom_signature = "custom_arg1: int, custom_arg2: str"
|
custom_signature = "custom_arg1: int, custom_arg2: str"
|
||||||
cmd = Command(
|
cmd = Command(
|
||||||
@@ -84,6 +79,7 @@ class TestCommandRegistry:
|
|||||||
name="example",
|
name="example",
|
||||||
description="Example command",
|
description="Example command",
|
||||||
method=self.example_command_method,
|
method=self.example_command_method,
|
||||||
|
signature=SIGNATURE,
|
||||||
)
|
)
|
||||||
|
|
||||||
registry.register(cmd)
|
registry.register(cmd)
|
||||||
@@ -98,6 +94,7 @@ class TestCommandRegistry:
|
|||||||
name="example",
|
name="example",
|
||||||
description="Example command",
|
description="Example command",
|
||||||
method=self.example_command_method,
|
method=self.example_command_method,
|
||||||
|
signature=SIGNATURE,
|
||||||
)
|
)
|
||||||
|
|
||||||
registry.register(cmd)
|
registry.register(cmd)
|
||||||
@@ -112,6 +109,7 @@ class TestCommandRegistry:
|
|||||||
name="example",
|
name="example",
|
||||||
description="Example command",
|
description="Example command",
|
||||||
method=self.example_command_method,
|
method=self.example_command_method,
|
||||||
|
signature=SIGNATURE,
|
||||||
)
|
)
|
||||||
|
|
||||||
registry.register(cmd)
|
registry.register(cmd)
|
||||||
@@ -133,6 +131,7 @@ class TestCommandRegistry:
|
|||||||
name="example",
|
name="example",
|
||||||
description="Example command",
|
description="Example command",
|
||||||
method=self.example_command_method,
|
method=self.example_command_method,
|
||||||
|
signature=SIGNATURE,
|
||||||
)
|
)
|
||||||
|
|
||||||
registry.register(cmd)
|
registry.register(cmd)
|
||||||
@@ -154,6 +153,7 @@ class TestCommandRegistry:
|
|||||||
name="example",
|
name="example",
|
||||||
description="Example command",
|
description="Example command",
|
||||||
method=self.example_command_method,
|
method=self.example_command_method,
|
||||||
|
signature=SIGNATURE,
|
||||||
)
|
)
|
||||||
|
|
||||||
registry.register(cmd)
|
registry.register(cmd)
|
||||||
|
|||||||
@@ -135,6 +135,7 @@ def test_smart_and_fast_llm_models_set_to_gpt4(mock_list_models, config):
|
|||||||
mock_list_models.return_value = {"data": [{"id": "gpt-3.5-turbo"}]}
|
mock_list_models.return_value = {"data": [{"id": "gpt-3.5-turbo"}]}
|
||||||
|
|
||||||
create_config(
|
create_config(
|
||||||
|
config=config,
|
||||||
continuous=False,
|
continuous=False,
|
||||||
continuous_limit=False,
|
continuous_limit=False,
|
||||||
ai_settings_file="",
|
ai_settings_file="",
|
||||||
|
|||||||
@@ -66,6 +66,7 @@ def test_sd_webui_negative_prompt(config, workspace, image_size):
|
|||||||
gen_image = functools.partial(
|
gen_image = functools.partial(
|
||||||
generate_image_with_sd_webui,
|
generate_image_with_sd_webui,
|
||||||
prompt="astronaut riding a horse",
|
prompt="astronaut riding a horse",
|
||||||
|
config=config,
|
||||||
size=image_size,
|
size=image_size,
|
||||||
extra={"seed": 123},
|
extra={"seed": 123},
|
||||||
)
|
)
|
||||||
@@ -101,7 +102,7 @@ def generate_and_validate(
|
|||||||
config.huggingface_image_model = hugging_face_image_model
|
config.huggingface_image_model = hugging_face_image_model
|
||||||
prompt = "astronaut riding a horse"
|
prompt = "astronaut riding a horse"
|
||||||
|
|
||||||
image_path = lst(generate_image(prompt, image_size, **kwargs))
|
image_path = lst(generate_image(prompt, config, image_size, **kwargs))
|
||||||
assert image_path.exists()
|
assert image_path.exists()
|
||||||
with Image.open(image_path) as img:
|
with Image.open(image_path) as img:
|
||||||
assert img.size == (image_size, image_size)
|
assert img.size == (image_size, image_size)
|
||||||
@@ -146,7 +147,7 @@ def test_huggingface_fail_request_with_delay(
|
|||||||
|
|
||||||
with patch("time.sleep") as mock_sleep:
|
with patch("time.sleep") as mock_sleep:
|
||||||
# Verify request fails.
|
# Verify request fails.
|
||||||
result = generate_image(prompt, image_size)
|
result = generate_image(prompt, config, image_size)
|
||||||
assert result == "Error creating image."
|
assert result == "Error creating image."
|
||||||
|
|
||||||
# Verify retry was called with delay if delay is in return_text
|
# Verify retry was called with delay if delay is in return_text
|
||||||
@@ -156,8 +157,7 @@ def test_huggingface_fail_request_with_delay(
|
|||||||
mock_sleep.assert_not_called()
|
mock_sleep.assert_not_called()
|
||||||
|
|
||||||
|
|
||||||
def test_huggingface_fail_request_with_delay(mocker):
|
def test_huggingface_fail_request_with_delay(mocker, config):
|
||||||
config = Config()
|
|
||||||
config.huggingface_api_token = "1"
|
config.huggingface_api_token = "1"
|
||||||
|
|
||||||
# Mock requests.post
|
# Mock requests.post
|
||||||
@@ -172,7 +172,7 @@ def test_huggingface_fail_request_with_delay(mocker):
|
|||||||
config.image_provider = "huggingface"
|
config.image_provider = "huggingface"
|
||||||
config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
|
config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
|
||||||
|
|
||||||
result = generate_image("astronaut riding a horse", 512)
|
result = generate_image("astronaut riding a horse", config, 512)
|
||||||
|
|
||||||
assert result == "Error creating image."
|
assert result == "Error creating image."
|
||||||
|
|
||||||
@@ -180,8 +180,7 @@ def test_huggingface_fail_request_with_delay(mocker):
|
|||||||
mock_sleep.assert_called_with(0)
|
mock_sleep.assert_called_with(0)
|
||||||
|
|
||||||
|
|
||||||
def test_huggingface_fail_request_no_delay(mocker):
|
def test_huggingface_fail_request_no_delay(mocker, config):
|
||||||
config = Config()
|
|
||||||
config.huggingface_api_token = "1"
|
config.huggingface_api_token = "1"
|
||||||
|
|
||||||
# Mock requests.post
|
# Mock requests.post
|
||||||
@@ -198,7 +197,7 @@ def test_huggingface_fail_request_no_delay(mocker):
|
|||||||
config.image_provider = "huggingface"
|
config.image_provider = "huggingface"
|
||||||
config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
|
config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
|
||||||
|
|
||||||
result = generate_image("astronaut riding a horse", 512)
|
result = generate_image("astronaut riding a horse", config, 512)
|
||||||
|
|
||||||
assert result == "Error creating image."
|
assert result == "Error creating image."
|
||||||
|
|
||||||
@@ -206,8 +205,7 @@ def test_huggingface_fail_request_no_delay(mocker):
|
|||||||
mock_sleep.assert_not_called()
|
mock_sleep.assert_not_called()
|
||||||
|
|
||||||
|
|
||||||
def test_huggingface_fail_request_bad_json(mocker):
|
def test_huggingface_fail_request_bad_json(mocker, config):
|
||||||
config = Config()
|
|
||||||
config.huggingface_api_token = "1"
|
config.huggingface_api_token = "1"
|
||||||
|
|
||||||
# Mock requests.post
|
# Mock requests.post
|
||||||
@@ -222,7 +220,7 @@ def test_huggingface_fail_request_bad_json(mocker):
|
|||||||
config.image_provider = "huggingface"
|
config.image_provider = "huggingface"
|
||||||
config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
|
config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
|
||||||
|
|
||||||
result = generate_image("astronaut riding a horse", 512)
|
result = generate_image("astronaut riding a horse", config, 512)
|
||||||
|
|
||||||
assert result == "Error creating image."
|
assert result == "Error creating image."
|
||||||
|
|
||||||
@@ -230,8 +228,7 @@ def test_huggingface_fail_request_bad_json(mocker):
|
|||||||
mock_sleep.assert_not_called()
|
mock_sleep.assert_not_called()
|
||||||
|
|
||||||
|
|
||||||
def test_huggingface_fail_request_bad_image(mocker):
|
def test_huggingface_fail_request_bad_image(mocker, config):
|
||||||
config = Config()
|
|
||||||
config.huggingface_api_token = "1"
|
config.huggingface_api_token = "1"
|
||||||
|
|
||||||
# Mock requests.post
|
# Mock requests.post
|
||||||
@@ -241,13 +238,12 @@ def test_huggingface_fail_request_bad_image(mocker):
|
|||||||
config.image_provider = "huggingface"
|
config.image_provider = "huggingface"
|
||||||
config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
|
config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
|
||||||
|
|
||||||
result = generate_image("astronaut riding a horse", 512)
|
result = generate_image("astronaut riding a horse", config, 512)
|
||||||
|
|
||||||
assert result == "Error creating image."
|
assert result == "Error creating image."
|
||||||
|
|
||||||
|
|
||||||
def test_huggingface_fail_missing_api_token(mocker):
|
def test_huggingface_fail_missing_api_token(mocker, config):
|
||||||
config = Config()
|
|
||||||
config.image_provider = "huggingface"
|
config.image_provider = "huggingface"
|
||||||
config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
|
config.huggingface_image_model = "CompVis/stable-diffusion-v1-4"
|
||||||
|
|
||||||
@@ -256,4 +252,4 @@ def test_huggingface_fail_missing_api_token(mocker):
|
|||||||
|
|
||||||
# Verify request raises an error.
|
# Verify request raises an error.
|
||||||
with pytest.raises(ValueError):
|
with pytest.raises(ValueError):
|
||||||
generate_image("astronaut riding a horse", 512)
|
generate_image("astronaut riding a horse", config, 512)
|
||||||
|
|||||||
@@ -43,14 +43,14 @@ class TestScrapeLinks:
|
|||||||
provided with a valid url that returns a webpage with hyperlinks.
|
provided with a valid url that returns a webpage with hyperlinks.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def test_valid_url_with_hyperlinks(self):
|
def test_valid_url_with_hyperlinks(self, config):
|
||||||
url = "https://www.google.com"
|
url = "https://www.google.com"
|
||||||
result = scrape_links(url)
|
result = scrape_links(url, config=config)
|
||||||
assert len(result) > 0
|
assert len(result) > 0
|
||||||
assert isinstance(result, list)
|
assert isinstance(result, list)
|
||||||
assert isinstance(result[0], str)
|
assert isinstance(result[0], str)
|
||||||
|
|
||||||
def test_valid_url(self, mocker):
|
def test_valid_url(self, mocker, config):
|
||||||
"""Test that the function returns correctly formatted hyperlinks when given a valid url."""
|
"""Test that the function returns correctly formatted hyperlinks when given a valid url."""
|
||||||
# Mock the requests.get() function to return a response with sample HTML containing hyperlinks
|
# Mock the requests.get() function to return a response with sample HTML containing hyperlinks
|
||||||
mock_response = mocker.Mock()
|
mock_response = mocker.Mock()
|
||||||
@@ -61,12 +61,12 @@ class TestScrapeLinks:
|
|||||||
mocker.patch("requests.Session.get", return_value=mock_response)
|
mocker.patch("requests.Session.get", return_value=mock_response)
|
||||||
|
|
||||||
# Call the function with a valid URL
|
# Call the function with a valid URL
|
||||||
result = scrape_links("https://www.example.com")
|
result = scrape_links("https://www.example.com", config)
|
||||||
|
|
||||||
# Assert that the function returns correctly formatted hyperlinks
|
# Assert that the function returns correctly formatted hyperlinks
|
||||||
assert result == ["Google (https://www.google.com)"]
|
assert result == ["Google (https://www.google.com)"]
|
||||||
|
|
||||||
def test_invalid_url(self, mocker):
|
def test_invalid_url(self, mocker, config):
|
||||||
"""Test that the function returns "error" when given an invalid url."""
|
"""Test that the function returns "error" when given an invalid url."""
|
||||||
# Mock the requests.get() function to return an HTTP error response
|
# Mock the requests.get() function to return an HTTP error response
|
||||||
mock_response = mocker.Mock()
|
mock_response = mocker.Mock()
|
||||||
@@ -74,12 +74,12 @@ class TestScrapeLinks:
|
|||||||
mocker.patch("requests.Session.get", return_value=mock_response)
|
mocker.patch("requests.Session.get", return_value=mock_response)
|
||||||
|
|
||||||
# Call the function with an invalid URL
|
# Call the function with an invalid URL
|
||||||
result = scrape_links("https://www.invalidurl.com")
|
result = scrape_links("https://www.invalidurl.com", config)
|
||||||
|
|
||||||
# Assert that the function returns "error"
|
# Assert that the function returns "error"
|
||||||
assert "Error:" in result
|
assert "Error:" in result
|
||||||
|
|
||||||
def test_no_hyperlinks(self, mocker):
|
def test_no_hyperlinks(self, mocker, config):
|
||||||
"""Test that the function returns an empty list when the html contains no hyperlinks."""
|
"""Test that the function returns an empty list when the html contains no hyperlinks."""
|
||||||
# Mock the requests.get() function to return a response with sample HTML containing no hyperlinks
|
# Mock the requests.get() function to return a response with sample HTML containing no hyperlinks
|
||||||
mock_response = mocker.Mock()
|
mock_response = mocker.Mock()
|
||||||
@@ -88,12 +88,12 @@ class TestScrapeLinks:
|
|||||||
mocker.patch("requests.Session.get", return_value=mock_response)
|
mocker.patch("requests.Session.get", return_value=mock_response)
|
||||||
|
|
||||||
# Call the function with a URL containing no hyperlinks
|
# Call the function with a URL containing no hyperlinks
|
||||||
result = scrape_links("https://www.example.com")
|
result = scrape_links("https://www.example.com", config)
|
||||||
|
|
||||||
# Assert that the function returns an empty list
|
# Assert that the function returns an empty list
|
||||||
assert result == []
|
assert result == []
|
||||||
|
|
||||||
def test_scrape_links_with_few_hyperlinks(self, mocker):
|
def test_scrape_links_with_few_hyperlinks(self, mocker, config):
|
||||||
"""Test that scrape_links() correctly extracts and formats hyperlinks from a sample HTML containing a few hyperlinks."""
|
"""Test that scrape_links() correctly extracts and formats hyperlinks from a sample HTML containing a few hyperlinks."""
|
||||||
mock_response = mocker.Mock()
|
mock_response = mocker.Mock()
|
||||||
mock_response.status_code = 200
|
mock_response.status_code = 200
|
||||||
@@ -109,7 +109,7 @@ class TestScrapeLinks:
|
|||||||
mocker.patch("requests.Session.get", return_value=mock_response)
|
mocker.patch("requests.Session.get", return_value=mock_response)
|
||||||
|
|
||||||
# Call the function being tested
|
# Call the function being tested
|
||||||
result = scrape_links("https://www.example.com")
|
result = scrape_links("https://www.example.com", config)
|
||||||
|
|
||||||
# Assert that the function returns a list of formatted hyperlinks
|
# Assert that the function returns a list of formatted hyperlinks
|
||||||
assert isinstance(result, list)
|
assert isinstance(result, list)
|
||||||
|
|||||||
@@ -42,7 +42,7 @@ Additional aspects:
|
|||||||
|
|
||||||
|
|
||||||
class TestScrapeText:
|
class TestScrapeText:
|
||||||
def test_scrape_text_with_valid_url(self, mocker):
|
def test_scrape_text_with_valid_url(self, mocker, config):
|
||||||
"""Tests that scrape_text() returns the expected text when given a valid URL."""
|
"""Tests that scrape_text() returns the expected text when given a valid URL."""
|
||||||
# Mock the requests.get() method to return a response with expected text
|
# Mock the requests.get() method to return a response with expected text
|
||||||
expected_text = "This is some sample text"
|
expected_text = "This is some sample text"
|
||||||
@@ -57,14 +57,14 @@ class TestScrapeText:
|
|||||||
# Call the function with a valid URL and assert that it returns the
|
# Call the function with a valid URL and assert that it returns the
|
||||||
# expected text
|
# expected text
|
||||||
url = "http://www.example.com"
|
url = "http://www.example.com"
|
||||||
assert scrape_text(url) == expected_text
|
assert scrape_text(url, config) == expected_text
|
||||||
|
|
||||||
def test_invalid_url(self):
|
def test_invalid_url(self, config):
|
||||||
"""Tests that an error is raised when an invalid url is provided."""
|
"""Tests that an error is raised when an invalid url is provided."""
|
||||||
url = "invalidurl.com"
|
url = "invalidurl.com"
|
||||||
pytest.raises(ValueError, scrape_text, url)
|
pytest.raises(ValueError, scrape_text, url, config)
|
||||||
|
|
||||||
def test_unreachable_url(self, mocker):
|
def test_unreachable_url(self, mocker, config):
|
||||||
"""Test that scrape_text returns an error message when an invalid or unreachable url is provided."""
|
"""Test that scrape_text returns an error message when an invalid or unreachable url is provided."""
|
||||||
# Mock the requests.get() method to raise an exception
|
# Mock the requests.get() method to raise an exception
|
||||||
mocker.patch(
|
mocker.patch(
|
||||||
@@ -74,10 +74,10 @@ class TestScrapeText:
|
|||||||
# Call the function with an invalid URL and assert that it returns an error
|
# Call the function with an invalid URL and assert that it returns an error
|
||||||
# message
|
# message
|
||||||
url = "http://thiswebsitedoesnotexist.net/"
|
url = "http://thiswebsitedoesnotexist.net/"
|
||||||
error_message = scrape_text(url)
|
error_message = scrape_text(url, config)
|
||||||
assert "Error:" in error_message
|
assert "Error:" in error_message
|
||||||
|
|
||||||
def test_no_text(self, mocker):
|
def test_no_text(self, mocker, config):
|
||||||
"""Test that scrape_text returns an empty string when the html page contains no text to be scraped."""
|
"""Test that scrape_text returns an empty string when the html page contains no text to be scraped."""
|
||||||
# Mock the requests.get() method to return a response with no text
|
# Mock the requests.get() method to return a response with no text
|
||||||
mock_response = mocker.Mock()
|
mock_response = mocker.Mock()
|
||||||
@@ -87,20 +87,20 @@ class TestScrapeText:
|
|||||||
|
|
||||||
# Call the function with a valid URL and assert that it returns an empty string
|
# Call the function with a valid URL and assert that it returns an empty string
|
||||||
url = "http://www.example.com"
|
url = "http://www.example.com"
|
||||||
assert scrape_text(url) == ""
|
assert scrape_text(url, config) == ""
|
||||||
|
|
||||||
def test_http_error(self, mocker):
|
def test_http_error(self, mocker, config):
|
||||||
"""Test that scrape_text returns an error message when the response status code is an http error (>=400)."""
|
"""Test that scrape_text returns an error message when the response status code is an http error (>=400)."""
|
||||||
# Mock the requests.get() method to return a response with a 404 status code
|
# Mock the requests.get() method to return a response with a 404 status code
|
||||||
mocker.patch("requests.Session.get", return_value=mocker.Mock(status_code=404))
|
mocker.patch("requests.Session.get", return_value=mocker.Mock(status_code=404))
|
||||||
|
|
||||||
# Call the function with a URL
|
# Call the function with a URL
|
||||||
result = scrape_text("https://www.example.com")
|
result = scrape_text("https://www.example.com", config)
|
||||||
|
|
||||||
# Check that the function returns an error message
|
# Check that the function returns an error message
|
||||||
assert result == "Error: HTTP 404 error"
|
assert result == "Error: HTTP 404 error"
|
||||||
|
|
||||||
def test_scrape_text_with_html_tags(self, mocker):
|
def test_scrape_text_with_html_tags(self, mocker, config):
|
||||||
"""Test that scrape_text() properly handles HTML tags."""
|
"""Test that scrape_text() properly handles HTML tags."""
|
||||||
# Create a mock response object with HTML containing tags
|
# Create a mock response object with HTML containing tags
|
||||||
html = "<html><body><p>This is <b>bold</b> text.</p></body></html>"
|
html = "<html><body><p>This is <b>bold</b> text.</p></body></html>"
|
||||||
@@ -110,7 +110,7 @@ class TestScrapeText:
|
|||||||
mocker.patch("requests.Session.get", return_value=mock_response)
|
mocker.patch("requests.Session.get", return_value=mock_response)
|
||||||
|
|
||||||
# Call the function with a URL
|
# Call the function with a URL
|
||||||
result = scrape_text("https://www.example.com")
|
result = scrape_text("https://www.example.com", config)
|
||||||
|
|
||||||
# Check that the function properly handles HTML tags
|
# Check that the function properly handles HTML tags
|
||||||
assert result == "This is bold text."
|
assert result == "This is bold text."
|
||||||
|
|||||||
@@ -55,11 +55,11 @@ def test_file(test_file_path: Path):
|
|||||||
|
|
||||||
|
|
||||||
@pytest.fixture()
|
@pytest.fixture()
|
||||||
def test_file_with_content_path(test_file: TextIOWrapper, file_content):
|
def test_file_with_content_path(test_file: TextIOWrapper, file_content, config):
|
||||||
test_file.write(file_content)
|
test_file.write(file_content)
|
||||||
test_file.close()
|
test_file.close()
|
||||||
file_ops.log_operation(
|
file_ops.log_operation(
|
||||||
"write", test_file.name, file_ops.text_checksum(file_content)
|
"write", test_file.name, config, file_ops.text_checksum(file_content)
|
||||||
)
|
)
|
||||||
return Path(test_file.name)
|
return Path(test_file.name)
|
||||||
|
|
||||||
@@ -117,7 +117,7 @@ def test_file_operations_state(test_file: TextIOWrapper):
|
|||||||
assert file_ops.file_operations_state(test_file.name) == expected_state
|
assert file_ops.file_operations_state(test_file.name) == expected_state
|
||||||
|
|
||||||
|
|
||||||
def test_is_duplicate_operation(config, mocker: MockerFixture):
|
def test_is_duplicate_operation(config: Config, mocker: MockerFixture):
|
||||||
# Prepare a fake state dictionary for the function to use
|
# Prepare a fake state dictionary for the function to use
|
||||||
state = {
|
state = {
|
||||||
"path/to/file1.txt": "checksum1",
|
"path/to/file1.txt": "checksum1",
|
||||||
@@ -127,30 +127,42 @@ def test_is_duplicate_operation(config, mocker: MockerFixture):
|
|||||||
|
|
||||||
# Test cases with write operations
|
# Test cases with write operations
|
||||||
assert (
|
assert (
|
||||||
file_ops.is_duplicate_operation("write", "path/to/file1.txt", "checksum1")
|
file_ops.is_duplicate_operation(
|
||||||
|
"write", "path/to/file1.txt", config, "checksum1"
|
||||||
|
)
|
||||||
is True
|
is True
|
||||||
)
|
)
|
||||||
assert (
|
assert (
|
||||||
file_ops.is_duplicate_operation("write", "path/to/file1.txt", "checksum2")
|
file_ops.is_duplicate_operation(
|
||||||
|
"write", "path/to/file1.txt", config, "checksum2"
|
||||||
|
)
|
||||||
is False
|
is False
|
||||||
)
|
)
|
||||||
assert (
|
assert (
|
||||||
file_ops.is_duplicate_operation("write", "path/to/file3.txt", "checksum3")
|
file_ops.is_duplicate_operation(
|
||||||
|
"write", "path/to/file3.txt", config, "checksum3"
|
||||||
|
)
|
||||||
is False
|
is False
|
||||||
)
|
)
|
||||||
# Test cases with append operations
|
# Test cases with append operations
|
||||||
assert (
|
assert (
|
||||||
file_ops.is_duplicate_operation("append", "path/to/file1.txt", "checksum1")
|
file_ops.is_duplicate_operation(
|
||||||
|
"append", "path/to/file1.txt", config, "checksum1"
|
||||||
|
)
|
||||||
is False
|
is False
|
||||||
)
|
)
|
||||||
# Test cases with delete operations
|
# Test cases with delete operations
|
||||||
assert file_ops.is_duplicate_operation("delete", "path/to/file1.txt") is False
|
assert (
|
||||||
assert file_ops.is_duplicate_operation("delete", "path/to/file3.txt") is True
|
file_ops.is_duplicate_operation("delete", "path/to/file1.txt", config) is False
|
||||||
|
)
|
||||||
|
assert (
|
||||||
|
file_ops.is_duplicate_operation("delete", "path/to/file3.txt", config) is True
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# Test logging a file operation
|
# Test logging a file operation
|
||||||
def test_log_operation(config: Config):
|
def test_log_operation(config: Config):
|
||||||
file_ops.log_operation("log_test", "path/to/test")
|
file_ops.log_operation("log_test", "path/to/test", config)
|
||||||
with open(config.file_logger_path, "r", encoding="utf-8") as f:
|
with open(config.file_logger_path, "r", encoding="utf-8") as f:
|
||||||
content = f.read()
|
content = f.read()
|
||||||
assert f"log_test: path/to/test\n" in content
|
assert f"log_test: path/to/test\n" in content
|
||||||
@@ -164,7 +176,7 @@ def test_text_checksum(file_content: str):
|
|||||||
|
|
||||||
|
|
||||||
def test_log_operation_with_checksum(config: Config):
|
def test_log_operation_with_checksum(config: Config):
|
||||||
file_ops.log_operation("log_test", "path/to/test", checksum="ABCDEF")
|
file_ops.log_operation("log_test", "path/to/test", config, checksum="ABCDEF")
|
||||||
with open(config.file_logger_path, "r", encoding="utf-8") as f:
|
with open(config.file_logger_path, "r", encoding="utf-8") as f:
|
||||||
content = f.read()
|
content = f.read()
|
||||||
assert f"log_test: path/to/test #ABCDEF\n" in content
|
assert f"log_test: path/to/test #ABCDEF\n" in content
|
||||||
@@ -211,50 +223,56 @@ def test_read_file(
|
|||||||
mock_MemoryItem_from_text,
|
mock_MemoryItem_from_text,
|
||||||
test_file_with_content_path: Path,
|
test_file_with_content_path: Path,
|
||||||
file_content,
|
file_content,
|
||||||
|
config: Config,
|
||||||
):
|
):
|
||||||
content = file_ops.read_file(test_file_with_content_path)
|
content = file_ops.read_file(test_file_with_content_path, config)
|
||||||
assert content == file_content
|
assert content.replace("\r", "") == file_content
|
||||||
|
|
||||||
|
|
||||||
def test_write_to_file(test_file_path: Path):
|
def test_write_to_file(test_file_path: Path, config):
|
||||||
new_content = "This is new content.\n"
|
new_content = "This is new content.\n"
|
||||||
file_ops.write_to_file(str(test_file_path), new_content)
|
file_ops.write_to_file(str(test_file_path), new_content, config)
|
||||||
with open(test_file_path, "r", encoding="utf-8") as f:
|
with open(test_file_path, "r", encoding="utf-8") as f:
|
||||||
content = f.read()
|
content = f.read()
|
||||||
assert content == new_content
|
assert content == new_content
|
||||||
|
|
||||||
|
|
||||||
def test_write_file_logs_checksum(config: Config, test_file_path: Path):
|
def test_write_file_logs_checksum(test_file_path: Path, config):
|
||||||
new_content = "This is new content.\n"
|
new_content = "This is new content.\n"
|
||||||
new_checksum = file_ops.text_checksum(new_content)
|
new_checksum = file_ops.text_checksum(new_content)
|
||||||
file_ops.write_to_file(str(test_file_path), new_content)
|
file_ops.write_to_file(str(test_file_path), new_content, config)
|
||||||
with open(config.file_logger_path, "r", encoding="utf-8") as f:
|
with open(config.file_logger_path, "r", encoding="utf-8") as f:
|
||||||
log_entry = f.read()
|
log_entry = f.read()
|
||||||
assert log_entry == f"write: {test_file_path} #{new_checksum}\n"
|
assert log_entry == f"write: {test_file_path} #{new_checksum}\n"
|
||||||
|
|
||||||
|
|
||||||
def test_write_file_fails_if_content_exists(test_file_path: Path):
|
def test_write_file_fails_if_content_exists(test_file_path: Path, config):
|
||||||
new_content = "This is new content.\n"
|
new_content = "This is new content.\n"
|
||||||
file_ops.log_operation(
|
file_ops.log_operation(
|
||||||
"write",
|
"write",
|
||||||
str(test_file_path),
|
str(test_file_path),
|
||||||
|
config,
|
||||||
checksum=file_ops.text_checksum(new_content),
|
checksum=file_ops.text_checksum(new_content),
|
||||||
)
|
)
|
||||||
result = file_ops.write_to_file(str(test_file_path), new_content)
|
result = file_ops.write_to_file(str(test_file_path), new_content, config)
|
||||||
assert result == "Error: File has already been updated."
|
assert result == "Error: File has already been updated."
|
||||||
|
|
||||||
|
|
||||||
def test_write_file_succeeds_if_content_different(test_file_with_content_path: Path):
|
def test_write_file_succeeds_if_content_different(
|
||||||
|
test_file_with_content_path: Path, config
|
||||||
|
):
|
||||||
new_content = "This is different content.\n"
|
new_content = "This is different content.\n"
|
||||||
result = file_ops.write_to_file(str(test_file_with_content_path), new_content)
|
result = file_ops.write_to_file(
|
||||||
|
str(test_file_with_content_path), new_content, config
|
||||||
|
)
|
||||||
assert result == "File written to successfully."
|
assert result == "File written to successfully."
|
||||||
|
|
||||||
|
|
||||||
def test_append_to_file(test_nested_file: Path):
|
def test_append_to_file(test_nested_file: Path, config):
|
||||||
append_text = "This is appended text.\n"
|
append_text = "This is appended text.\n"
|
||||||
file_ops.write_to_file(test_nested_file, append_text)
|
file_ops.write_to_file(test_nested_file, append_text, config)
|
||||||
|
|
||||||
file_ops.append_to_file(test_nested_file, append_text)
|
file_ops.append_to_file(test_nested_file, append_text, config)
|
||||||
|
|
||||||
with open(test_nested_file, "r") as f:
|
with open(test_nested_file, "r") as f:
|
||||||
content_after = f.read()
|
content_after = f.read()
|
||||||
@@ -262,12 +280,10 @@ def test_append_to_file(test_nested_file: Path):
|
|||||||
assert content_after == append_text + append_text
|
assert content_after == append_text + append_text
|
||||||
|
|
||||||
|
|
||||||
def test_append_to_file_uses_checksum_from_appended_file(
|
def test_append_to_file_uses_checksum_from_appended_file(test_file_path: Path, config):
|
||||||
config: Config, test_file_path: Path
|
|
||||||
):
|
|
||||||
append_text = "This is appended text.\n"
|
append_text = "This is appended text.\n"
|
||||||
file_ops.append_to_file(test_file_path, append_text)
|
file_ops.append_to_file(test_file_path, append_text, config)
|
||||||
file_ops.append_to_file(test_file_path, append_text)
|
file_ops.append_to_file(test_file_path, append_text, config)
|
||||||
with open(config.file_logger_path, "r", encoding="utf-8") as f:
|
with open(config.file_logger_path, "r", encoding="utf-8") as f:
|
||||||
log_contents = f.read()
|
log_contents = f.read()
|
||||||
|
|
||||||
@@ -282,8 +298,8 @@ def test_append_to_file_uses_checksum_from_appended_file(
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def test_delete_file(test_file_with_content_path: Path):
|
def test_delete_file(test_file_with_content_path: Path, config):
|
||||||
result = file_ops.delete_file(str(test_file_with_content_path))
|
result = file_ops.delete_file(str(test_file_with_content_path), config)
|
||||||
assert result == "File deleted successfully."
|
assert result == "File deleted successfully."
|
||||||
assert os.path.exists(test_file_with_content_path) is False
|
assert os.path.exists(test_file_with_content_path) is False
|
||||||
|
|
||||||
@@ -291,16 +307,16 @@ def test_delete_file(test_file_with_content_path: Path):
|
|||||||
def test_delete_missing_file(config):
|
def test_delete_missing_file(config):
|
||||||
filename = "path/to/file/which/does/not/exist"
|
filename = "path/to/file/which/does/not/exist"
|
||||||
# confuse the log
|
# confuse the log
|
||||||
file_ops.log_operation("write", filename, checksum="fake")
|
file_ops.log_operation("write", filename, config, checksum="fake")
|
||||||
try:
|
try:
|
||||||
os.remove(filename)
|
os.remove(filename)
|
||||||
except FileNotFoundError as err:
|
except FileNotFoundError as err:
|
||||||
assert str(err) in file_ops.delete_file(filename)
|
assert str(err) in file_ops.delete_file(filename, config)
|
||||||
return
|
return
|
||||||
assert False, f"Failed to test delete_file; {filename} not expected to exist"
|
assert False, f"Failed to test delete_file; {filename} not expected to exist"
|
||||||
|
|
||||||
|
|
||||||
def test_list_files(workspace: Workspace, test_directory: Path):
|
def test_list_files(workspace: Workspace, test_directory: Path, config):
|
||||||
# Case 1: Create files A and B, search for A, and ensure we don't return A and B
|
# Case 1: Create files A and B, search for A, and ensure we don't return A and B
|
||||||
file_a = workspace.get_path("file_a.txt")
|
file_a = workspace.get_path("file_a.txt")
|
||||||
file_b = workspace.get_path("file_b.txt")
|
file_b = workspace.get_path("file_b.txt")
|
||||||
@@ -318,7 +334,7 @@ def test_list_files(workspace: Workspace, test_directory: Path):
|
|||||||
with open(os.path.join(test_directory, file_a.name), "w") as f:
|
with open(os.path.join(test_directory, file_a.name), "w") as f:
|
||||||
f.write("This is file A in the subdirectory.")
|
f.write("This is file A in the subdirectory.")
|
||||||
|
|
||||||
files = file_ops.list_files(str(workspace.root))
|
files = file_ops.list_files(str(workspace.root), config)
|
||||||
assert file_a.name in files
|
assert file_a.name in files
|
||||||
assert file_b.name in files
|
assert file_b.name in files
|
||||||
assert os.path.join(Path(test_directory).name, file_a.name) in files
|
assert os.path.join(Path(test_directory).name, file_a.name) in files
|
||||||
@@ -331,17 +347,17 @@ def test_list_files(workspace: Workspace, test_directory: Path):
|
|||||||
|
|
||||||
# Case 2: Search for a file that does not exist and make sure we don't throw
|
# Case 2: Search for a file that does not exist and make sure we don't throw
|
||||||
non_existent_file = "non_existent_file.txt"
|
non_existent_file = "non_existent_file.txt"
|
||||||
files = file_ops.list_files("")
|
files = file_ops.list_files("", config)
|
||||||
assert non_existent_file not in files
|
assert non_existent_file not in files
|
||||||
|
|
||||||
|
|
||||||
def test_download_file(config, workspace: Workspace):
|
def test_download_file(workspace: Workspace, config):
|
||||||
url = "https://github.com/Significant-Gravitas/Auto-GPT/archive/refs/tags/v0.2.2.tar.gz"
|
url = "https://github.com/Significant-Gravitas/Auto-GPT/archive/refs/tags/v0.2.2.tar.gz"
|
||||||
local_name = workspace.get_path("auto-gpt.tar.gz")
|
local_name = workspace.get_path("auto-gpt.tar.gz")
|
||||||
size = 365023
|
size = 365023
|
||||||
readable_size = readable_file_size(size)
|
readable_size = readable_file_size(size)
|
||||||
assert (
|
assert (
|
||||||
file_ops.download_file(url, local_name)
|
file_ops.download_file(url, local_name, config)
|
||||||
== f'Successfully downloaded and locally stored file: "{local_name}"! (Size: {readable_size})'
|
== f'Successfully downloaded and locally stored file: "{local_name}"! (Size: {readable_size})'
|
||||||
)
|
)
|
||||||
assert os.path.isfile(local_name) is True
|
assert os.path.isfile(local_name) is True
|
||||||
@@ -349,10 +365,10 @@ def test_download_file(config, workspace: Workspace):
|
|||||||
|
|
||||||
url = "https://github.com/Significant-Gravitas/Auto-GPT/archive/refs/tags/v0.0.0.tar.gz"
|
url = "https://github.com/Significant-Gravitas/Auto-GPT/archive/refs/tags/v0.0.0.tar.gz"
|
||||||
assert "Got an HTTP Error whilst trying to download file" in file_ops.download_file(
|
assert "Got an HTTP Error whilst trying to download file" in file_ops.download_file(
|
||||||
url, local_name
|
url, local_name, config
|
||||||
)
|
)
|
||||||
|
|
||||||
url = "https://thiswebsiteiswrong.hmm/v0.0.0.tar.gz"
|
url = "https://thiswebsiteiswrong.hmm/v0.0.0.tar.gz"
|
||||||
assert "Failed to establish a new connection:" in file_ops.download_file(
|
assert "Failed to establish a new connection:" in file_ops.download_file(
|
||||||
url, local_name
|
url, local_name, config
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
from autogpt.commands.web_selenium import browse_website
|
from autogpt.commands.web_selenium import browse_website
|
||||||
|
|
||||||
|
|
||||||
def test_browse_website():
|
def test_browse_website(config):
|
||||||
url = "https://barrel-roll.com"
|
url = "https://barrel-roll.com"
|
||||||
question = "How to execute a barrel roll"
|
question = "How to execute a barrel roll"
|
||||||
|
|
||||||
response = browse_website(url, question)
|
response = browse_website(url, question, config)
|
||||||
assert "Error" in response
|
assert "Error" in response
|
||||||
# Sanity check that the response is not too long
|
# Sanity check that the response is not too long
|
||||||
assert len(response) < 200
|
assert len(response) < 200
|
||||||
|
|||||||
Reference in New Issue
Block a user