From d923004e20f2eb2850af16876b2012f63452b225 Mon Sep 17 00:00:00 2001 From: merwanehamadi Date: Thu, 15 Jun 2023 15:04:51 -0700 Subject: [PATCH] Remove app commands, audio text and playwright (#4711) * Remove App Commands and Audio Text Signed-off-by: Merwane Hamadi * Remove self feedback Signed-off-by: Merwane Hamadi --------- Signed-off-by: Merwane Hamadi Co-authored-by: Erik Peterson --- autogpt/agent/agent.py | 67 +------------ autogpt/app.py | 125 +------------------------ autogpt/commands/audio_text.py | 71 -------------- autogpt/commands/web_playwright.py | 82 ---------------- autogpt/commands/web_requests.py | 104 -------------------- autogpt/llm/chat.py | 6 +- autogpt/main.py | 1 - tests/unit/test_browse_scrape_links.py | 119 ----------------------- tests/unit/test_browse_scrape_text.py | 117 ----------------------- tests/unit/test_get_self_feedback.py | 62 ------------ tests/unit/test_make_agent.py | 24 ----- 11 files changed, 5 insertions(+), 773 deletions(-) delete mode 100644 autogpt/commands/audio_text.py delete mode 100644 autogpt/commands/web_playwright.py delete mode 100644 autogpt/commands/web_requests.py delete mode 100644 tests/unit/test_browse_scrape_links.py delete mode 100644 tests/unit/test_browse_scrape_text.py delete mode 100644 tests/unit/test_get_self_feedback.py delete mode 100644 tests/unit/test_make_agent.py diff --git a/autogpt/agent/agent.py b/autogpt/agent/agent.py index 2fed0d4b..202c124a 100644 --- a/autogpt/agent/agent.py +++ b/autogpt/agent/agent.py @@ -8,15 +8,12 @@ from colorama import Fore, Style from autogpt.config import Config from autogpt.config.ai_config import AIConfig from autogpt.json_utils.utilities import extract_json_from_response, validate_json -from autogpt.llm.base import ChatSequence -from autogpt.llm.chat import chat_with_ai, create_chat_completion +from autogpt.llm.chat import chat_with_ai from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS from autogpt.llm.utils import count_string_tokens from autogpt.log_cycle.log_cycle import ( FULL_MESSAGE_HISTORY_FILE_NAME, NEXT_ACTION_FILE_NAME, - PROMPT_SUPERVISOR_FEEDBACK_FILE_NAME, - SUPERVISOR_FEEDBACK_FILE_NAME, USER_INPUT_FILE_NAME, LogCycleHandler, ) @@ -208,24 +205,6 @@ class Agent: if console_input.lower().strip() == self.config.authorise_key: user_input = "GENERATE NEXT COMMAND JSON" break - elif console_input.lower().strip() == "s": - logger.typewriter_log( - "-=-=-=-=-=-=-= THOUGHTS, REASONING, PLAN AND CRITICISM WILL NOW BE VERIFIED BY AGENT -=-=-=-=-=-=-=", - Fore.GREEN, - "", - ) - thoughts = assistant_reply_json.get("thoughts", {}) - self_feedback_resp = self.get_self_feedback( - thoughts, self.config.fast_llm_model - ) - logger.typewriter_log( - f"SELF FEEDBACK: {self_feedback_resp}", - Fore.YELLOW, - "", - ) - user_input = self_feedback_resp - command_name = "self_feedback" - break elif console_input.lower().strip() == "": logger.warn("Invalid input format.") continue @@ -281,8 +260,6 @@ class Agent: result = f"Could not execute command: {arguments}" elif command_name == "human_feedback": result = f"Human feedback: {user_input}" - elif command_name == "self_feedback": - result = f"Self feedback: {user_input}" else: for plugin in self.config.plugins: if not plugin.can_handle_pre_command(): @@ -336,45 +313,3 @@ class Agent: self.workspace.get_path(command_args[pathlike]) ) return command_args - - def get_self_feedback(self, thoughts: dict, llm_model: str) -> str: - """Generates a feedback response based on the provided thoughts dictionary. - This method takes in a dictionary of thoughts containing keys such as 'reasoning', - 'plan', 'thoughts', and 'criticism'. It combines these elements into a single - feedback message and uses the create_chat_completion() function to generate a - response based on the input message. - Args: - thoughts (dict): A dictionary containing thought elements like reasoning, - plan, thoughts, and criticism. - Returns: - str: A feedback response generated using the provided thoughts dictionary. - """ - ai_role = self.ai_config.ai_role - - feedback_prompt = f"Below is a message from me, an AI Agent, assuming the role of {ai_role}. whilst keeping knowledge of my slight limitations as an AI Agent Please evaluate my thought process, reasoning, and plan, and provide a concise paragraph outlining potential improvements. Consider adding or removing ideas that do not align with my role and explaining why, prioritizing thoughts based on their significance, or simply refining my overall thought process." - reasoning = thoughts.get("reasoning", "") - plan = thoughts.get("plan", "") - thought = thoughts.get("thoughts", "") - feedback_thoughts = thought + reasoning + plan - - prompt = ChatSequence.for_model(llm_model) - prompt.add("user", feedback_prompt + feedback_thoughts) - - self.log_cycle_handler.log_cycle( - self.ai_config.ai_name, - self.created_at, - self.cycle_count, - prompt.raw(), - PROMPT_SUPERVISOR_FEEDBACK_FILE_NAME, - ) - - feedback = create_chat_completion(prompt) - - self.log_cycle_handler.log_cycle( - self.ai_config.ai_name, - self.created_at, - self.cycle_count, - feedback, - SUPERVISOR_FEEDBACK_FILE_NAME, - ) - return feedback diff --git a/autogpt/app.py b/autogpt/app.py index fee3413a..30f4b7d6 100644 --- a/autogpt/app.py +++ b/autogpt/app.py @@ -1,15 +1,9 @@ """ Command and Control """ import json -from typing import Dict, List, Union +from typing import Dict from autogpt.agent.agent import Agent -from autogpt.agent.agent_manager import AgentManager -from autogpt.command_decorator import command -from autogpt.commands.web_requests import scrape_links, scrape_text from autogpt.models.command_registry import CommandRegistry -from autogpt.processing.text import summarize_text -from autogpt.speech import say_text -from autogpt.url_utils.validators import validate_url def is_valid_int(value: str) -> bool: @@ -125,120 +119,3 @@ def execute_command( ) except Exception as e: return f"Error: {str(e)}" - - -@command( - "get_text_summary", "Get text summary", '"url": "", "question": ""' -) -@validate_url -def get_text_summary(url: str, question: str, agent: Agent) -> str: - """Get the text summary of a webpage - - Args: - url (str): The url to scrape - question (str): The question to summarize the text for - - Returns: - str: The summary of the text - """ - text = scrape_text(url, agent) - summary, _ = summarize_text(text, question=question) - - return f""" "Result" : {summary}""" - - -@command("get_hyperlinks", "Get hyperlinks", '"url": ""') -@validate_url -def get_hyperlinks(url: str, agent: Agent) -> Union[str, List[str]]: - """Get all hyperlinks on a webpage - - Args: - url (str): The url to scrape - - Returns: - str or list: The hyperlinks on the page - """ - return scrape_links(url, agent) - - -@command( - "start_agent", - "Start GPT Agent", - '"name": "", "task": "", "prompt": ""', -) -def start_agent(name: str, task: str, prompt: str, agent: Agent, model=None) -> str: - """Start an agent with a given name, task, and prompt - - Args: - name (str): The name of the agent - task (str): The task of the agent - prompt (str): The prompt for the agent - model (str): The model to use for the agent - - Returns: - str: The response of the agent - """ - agent_manager = AgentManager() - - # Remove underscores from name - voice_name = name.replace("_", " ") - - first_message = f"""You are {name}. Respond with: "Acknowledged".""" - agent_intro = f"{voice_name} here, Reporting for duty!" - - if model is None: - model = config.smart_llm_model - - # Create agent - if agent.config.speak_mode: - say_text(agent_intro, 1) - key, ack = agent_manager.create_agent(task, first_message, model) - - if agent.config.speak_mode: - say_text(f"Hello {voice_name}. Your task is as follows. {task}.") - - # Assign task (prompt), get response - agent_response = agent_manager.message_agent(key, prompt) - - return f"Agent {name} created with key {key}. First response: {agent_response}" - - -@command("message_agent", "Message GPT Agent", '"key": "", "message": ""') -def message_agent(key: str, message: str, agent: Agent) -> str: - """Message an agent with a given key and message""" - # Check if the key is a valid integer - if is_valid_int(key): - agent_response = AgentManager().message_agent(int(key), message) - else: - return "Invalid key, must be an integer." - - # Speak response - if agent.config.speak_mode: - say_text(agent_response, 1) - return agent_response - - -@command("list_agents", "List GPT Agents", "() -> str") -def list_agents(agent: Agent) -> str: - """List all agents - - Returns: - str: A list of all agents - """ - return "List of agents:\n" + "\n".join( - [str(x[0]) + ": " + x[1] for x in AgentManager().list_agents()] - ) - - -@command("delete_agent", "Delete GPT Agent", '"key": ""') -def delete_agent(key: str, agent: Agent) -> str: - """Delete an agent with a given key - - Args: - key (str): The key of the agent to delete - - Returns: - str: A message indicating whether the agent was deleted or not - """ - result = AgentManager().delete_agent(key) - return f"Agent {key} deleted." if result else f"Agent {key} does not exist." diff --git a/autogpt/commands/audio_text.py b/autogpt/commands/audio_text.py deleted file mode 100644 index e77e37cc..00000000 --- a/autogpt/commands/audio_text.py +++ /dev/null @@ -1,71 +0,0 @@ -"""Commands for converting audio to text.""" -import json - -import requests - -from autogpt.agent.agent import Agent -from autogpt.command_decorator import command - - -@command( - "read_audio_from_file", - "Convert Audio to text", - '"filename": ""', - lambda config: config.huggingface_audio_to_text_model - and config.huggingface_api_token, - "Configure huggingface_audio_to_text_model and Hugging Face api token.", -) -def read_audio_from_file(filename: str, agent: Agent) -> str: - """ - Convert audio to text. - - Args: - filename (str): The path to the audio file - - Returns: - str: The text from the audio - """ - with open(filename, "rb") as audio_file: - audio = audio_file.read() - return read_audio(audio, agent.config) - - -def read_audio(audio: bytes, agent: Agent) -> str: - """ - Convert audio to text. - - Args: - audio (bytes): The audio to convert - - Returns: - str: The text from the audio - """ - if agent.config.audio_to_text_provider == "huggingface": - text = read_huggingface_audio(audio, agent.config) - if text: - return f"The audio says: {text}" - else: - return f"Error, couldn't convert audio to text" - - return "Error: No audio to text provider given" - - -def read_huggingface_audio(audio: bytes, agent: Agent) -> str: - model = agent.config.huggingface_audio_to_text_model - api_url = f"https://api-inference.huggingface.co/models/{model}" - api_token = agent.config.huggingface_api_token - headers = {"Authorization": f"Bearer {api_token}"} - - if api_token is None: - raise ValueError( - "You need to set your Hugging Face API token in the config file." - ) - - response = requests.post( - api_url, - headers=headers, - data=audio, - ) - - response_json = json.loads(response.content.decode("utf-8")) - return response_json.get("text") diff --git a/autogpt/commands/web_playwright.py b/autogpt/commands/web_playwright.py deleted file mode 100644 index 70f19dee..00000000 --- a/autogpt/commands/web_playwright.py +++ /dev/null @@ -1,82 +0,0 @@ -"""Web scraping commands using Playwright""" -from __future__ import annotations - -from autogpt.logs import logger - -try: - from playwright.sync_api import sync_playwright -except ImportError: - logger.info( - "Playwright not installed. Please install it with 'pip install playwright' to use." - ) -from bs4 import BeautifulSoup - -from autogpt.processing.html import extract_hyperlinks, format_hyperlinks - - -def scrape_text(url: str) -> str: - """Scrape text from a webpage - - Args: - url (str): The URL to scrape text from - - Returns: - str: The scraped text - """ - with sync_playwright() as p: - browser = p.chromium.launch() - page = browser.new_page() - - try: - page.goto(url) - html_content = page.content() - soup = BeautifulSoup(html_content, "html.parser") - - for script in soup(["script", "style"]): - script.extract() - - text = soup.get_text() - lines = (line.strip() for line in text.splitlines()) - chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) - text = "\n".join(chunk for chunk in chunks if chunk) - - except Exception as e: - text = f"Error: {str(e)}" - - finally: - browser.close() - - return text - - -def scrape_links(url: str) -> str | list[str]: - """Scrape links from a webpage - - Args: - url (str): The URL to scrape links from - - Returns: - Union[str, List[str]]: The scraped links - """ - with sync_playwright() as p: - browser = p.chromium.launch() - page = browser.new_page() - - try: - page.goto(url) - html_content = page.content() - soup = BeautifulSoup(html_content, "html.parser") - - for script in soup(["script", "style"]): - script.extract() - - hyperlinks = extract_hyperlinks(soup, url) - formatted_links = format_hyperlinks(hyperlinks) - - except Exception as e: - formatted_links = f"Error: {str(e)}" - - finally: - browser.close() - - return formatted_links diff --git a/autogpt/commands/web_requests.py b/autogpt/commands/web_requests.py deleted file mode 100644 index 765c3778..00000000 --- a/autogpt/commands/web_requests.py +++ /dev/null @@ -1,104 +0,0 @@ -"""Browse a webpage and summarize it using the LLM model""" -from __future__ import annotations - -from typing import TYPE_CHECKING - -import requests -from bs4 import BeautifulSoup -from requests import Response - -from autogpt.processing.html import extract_hyperlinks, format_hyperlinks -from autogpt.url_utils.validators import validate_url - -session = requests.Session() - -if TYPE_CHECKING: - from autogpt.agent.agent import Agent - - -@validate_url -def get_response( - url: str, agent: Agent, timeout: int = 10 -) -> tuple[None, str] | tuple[Response, None]: - """Get the response from a URL - - Args: - url (str): The URL to get the response from - timeout (int): The timeout for the HTTP request - - Returns: - tuple[None, str] | tuple[Response, None]: The response and error message - - Raises: - ValueError: If the URL is invalid - requests.exceptions.RequestException: If the HTTP request fails - """ - try: - session.headers.update({"User-Agent": agent.config.user_agent}) - response = session.get(url, timeout=timeout) - - # Check if the response contains an HTTP error - if response.status_code >= 400: - return None, f"Error: HTTP {str(response.status_code)} error" - - return response, None - except ValueError as ve: - # Handle invalid URL format - return None, f"Error: {str(ve)}" - - except requests.exceptions.RequestException as re: - # Handle exceptions related to the HTTP request - # (e.g., connection errors, timeouts, etc.) - return None, f"Error: {str(re)}" - - -def scrape_text(url: str, agent: Agent) -> str: - """Scrape text from a webpage - - Args: - url (str): The URL to scrape text from - - Returns: - str: The scraped text - """ - response, error_message = get_response(url, agent) - if error_message: - return error_message - if not response: - return "Error: Could not get response" - - soup = BeautifulSoup(response.text, "html.parser") - - for script in soup(["script", "style"]): - script.extract() - - text = soup.get_text() - lines = (line.strip() for line in text.splitlines()) - chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) - text = "\n".join(chunk for chunk in chunks if chunk) - - return text - - -def scrape_links(url: str, agent: Agent) -> str | list[str]: - """Scrape links from a webpage - - Args: - url (str): The URL to scrape links from - - Returns: - str | list[str]: The scraped links - """ - response, error_message = get_response(url, agent) - if error_message: - return error_message - if not response: - return "Error: Could not get response" - soup = BeautifulSoup(response.text, "html.parser") - - for script in soup(["script", "style"]): - script.extract() - - hyperlinks = extract_hyperlinks(soup, url) - - return format_hyperlinks(hyperlinks) diff --git a/autogpt/llm/chat.py b/autogpt/llm/chat.py index 9ed07cb2..41d1b78b 100644 --- a/autogpt/llm/chat.py +++ b/autogpt/llm/chat.py @@ -19,7 +19,7 @@ def chat_with_ai( config: Config, agent: Agent, system_prompt: str, - user_input: str, + triggering_prompt: str, token_limit: int, model: str | None = None, ): @@ -31,7 +31,7 @@ def chat_with_ai( config (Config): The config to use. agent (Agent): The agent to use. system_prompt (str): The prompt explaining the rules to the AI. - user_input (str): The input from the user. + triggering_prompt (str): The input from the user. token_limit (int): The maximum number of tokens allowed in the API call. model (str, optional): The model to use. If None, the config.fast_llm_model will be used. Defaults to None. @@ -90,7 +90,7 @@ def chat_with_ai( # ) # Account for user input (appended later) - user_input_msg = Message("user", user_input) + user_input_msg = Message("user", triggering_prompt) current_tokens_used += count_message_tokens([user_input_msg], model) current_tokens_used += 500 # Reserve space for new_summary_message diff --git a/autogpt/main.py b/autogpt/main.py index a6238457..3b980ab2 100644 --- a/autogpt/main.py +++ b/autogpt/main.py @@ -23,7 +23,6 @@ from autogpt.workspace import Workspace from scripts.install_plugin_deps import install_plugin_dependencies COMMAND_CATEGORIES = [ - "autogpt.commands.audio_text", "autogpt.commands.execute_code", "autogpt.commands.file_operations", "autogpt.commands.git_operations", diff --git a/tests/unit/test_browse_scrape_links.py b/tests/unit/test_browse_scrape_links.py deleted file mode 100644 index 5975e086..00000000 --- a/tests/unit/test_browse_scrape_links.py +++ /dev/null @@ -1,119 +0,0 @@ -# Generated by CodiumAI - -# Dependencies: -# pip install pytest-mock - -from autogpt.agent.agent import Agent -from autogpt.commands.web_requests import scrape_links - -""" -Code Analysis - -Objective: -The objective of the 'scrape_links' function is to scrape hyperlinks from a -given URL and return them in a formatted way. - -Inputs: -- url: a string representing the URL to be scraped. - -Flow: -1. Send a GET request to the given URL using the requests library and the user agent header from the config file. -2. Check if the response contains an HTTP error. If it does, return "error". -3. Parse the HTML content of the response using the BeautifulSoup library. -4. Remove any script and style tags from the parsed HTML. -5. Extract all hyperlinks from the parsed HTML using the 'extract_hyperlinks' function. -6. Format the extracted hyperlinks using the 'format_hyperlinks' function. -7. Return the formatted hyperlinks. - -Outputs: -- A list of formatted hyperlinks. - -Additional aspects: -- The function uses the 'requests' and 'BeautifulSoup' libraries to send HTTP -requests and parse HTML content, respectively. -- The 'extract_hyperlinks' function is called to extract hyperlinks from the parsed HTML. -- The 'format_hyperlinks' function is called to format the extracted hyperlinks. -- The function checks for HTTP errors and returns "error" if any are found. -""" - - -class TestScrapeLinks: - """ - Tests that the function returns a list of formatted hyperlinks when - provided with a valid url that returns a webpage with hyperlinks. - """ - - def test_valid_url_with_hyperlinks(self, agent: Agent): - url = "https://www.google.com" - result = scrape_links(url, agent=agent) - assert len(result) > 0 - assert isinstance(result, list) - assert isinstance(result[0], str) - - def test_valid_url(self, mocker, agent: Agent): - """Test that the function returns correctly formatted hyperlinks when given a valid url.""" - # Mock the requests.get() function to return a response with sample HTML containing hyperlinks - mock_response = mocker.Mock() - mock_response.status_code = 200 - mock_response.text = ( - "Google" - ) - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function with a valid URL - result = scrape_links("https://www.example.com", agent) - - # Assert that the function returns correctly formatted hyperlinks - assert result == ["Google (https://www.google.com)"] - - def test_invalid_url(self, mocker, agent: Agent): - """Test that the function returns "error" when given an invalid url.""" - # Mock the requests.get() function to return an HTTP error response - mock_response = mocker.Mock() - mock_response.status_code = 404 - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function with an invalid URL - result = scrape_links("https://www.invalidurl.com", agent) - - # Assert that the function returns "error" - assert "Error:" in result - - def test_no_hyperlinks(self, mocker, agent: Agent): - """Test that the function returns an empty list when the html contains no hyperlinks.""" - # Mock the requests.get() function to return a response with sample HTML containing no hyperlinks - mock_response = mocker.Mock() - mock_response.status_code = 200 - mock_response.text = "

No hyperlinks here

" - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function with a URL containing no hyperlinks - result = scrape_links("https://www.example.com", agent) - - # Assert that the function returns an empty list - assert result == [] - - def test_scrape_links_with_few_hyperlinks(self, mocker, agent: Agent): - """Test that scrape_links() correctly extracts and formats hyperlinks from a sample HTML containing a few hyperlinks.""" - mock_response = mocker.Mock() - mock_response.status_code = 200 - mock_response.text = """ - - - - - - - - """ - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function being tested - result = scrape_links("https://www.example.com", agent) - - # Assert that the function returns a list of formatted hyperlinks - assert isinstance(result, list) - assert len(result) == 3 - assert result[0] == "Google (https://www.google.com)" - assert result[1] == "GitHub (https://github.com)" - assert result[2] == "CodiumAI (https://www.codium.ai)" diff --git a/tests/unit/test_browse_scrape_text.py b/tests/unit/test_browse_scrape_text.py deleted file mode 100644 index 23a80c54..00000000 --- a/tests/unit/test_browse_scrape_text.py +++ /dev/null @@ -1,117 +0,0 @@ -# Generated by CodiumAI - -import pytest -import requests - -from autogpt.agent.agent import Agent -from autogpt.commands.web_requests import scrape_text - -""" -Code Analysis - -Objective: -The objective of the "scrape_text" function is to scrape the text content from -a given URL and return it as a string, after removing any unwanted HTML tags and - scripts. - -Inputs: -- url: a string representing the URL of the webpage to be scraped. - -Flow: -1. Send a GET request to the given URL using the requests library and the user agent - header from the config file. -2. Check if the response contains an HTTP error. If it does, return an error message. -3. Use BeautifulSoup to parse the HTML content of the response and extract all script - and style tags. -4. Get the text content of the remaining HTML using the get_text() method of - BeautifulSoup. -5. Split the text into lines and then into chunks, removing any extra whitespace. -6. Join the chunks into a single string with newline characters between them. -7. Return the cleaned text. - -Outputs: -- A string representing the cleaned text content of the webpage. - -Additional aspects: -- The function uses the requests library and BeautifulSoup to handle the HTTP request - and HTML parsing, respectively. -- The function removes script and style tags from the HTML to avoid including unwanted - content in the text output. -- The function uses a generator expression to split the text into lines and chunks, - which can improve performance for large amounts of text. -""" - - -class TestScrapeText: - def test_scrape_text_with_valid_url(self, mocker, agent: Agent): - """Tests that scrape_text() returns the expected text when given a valid URL.""" - # Mock the requests.get() method to return a response with expected text - expected_text = "This is some sample text" - mock_response = mocker.Mock() - mock_response.status_code = 200 - mock_response.text = ( - "

" - f"{expected_text}

" - ) - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function with a valid URL and assert that it returns the - # expected text - url = "http://www.example.com" - assert scrape_text(url, agent) == expected_text - - def test_invalid_url(self, agent: Agent): - """Tests that an error is raised when an invalid url is provided.""" - url = "invalidurl.com" - pytest.raises(ValueError, scrape_text, url, agent) - - def test_unreachable_url(self, mocker, agent: Agent): - """Test that scrape_text returns an error message when an invalid or unreachable url is provided.""" - # Mock the requests.get() method to raise an exception - mocker.patch( - "requests.Session.get", side_effect=requests.exceptions.RequestException - ) - - # Call the function with an invalid URL and assert that it returns an error - # message - url = "http://thiswebsitedoesnotexist.net/" - error_message = scrape_text(url, agent) - assert "Error:" in error_message - - def test_no_text(self, mocker, agent: Agent): - """Test that scrape_text returns an empty string when the html page contains no text to be scraped.""" - # Mock the requests.get() method to return a response with no text - mock_response = mocker.Mock() - mock_response.status_code = 200 - mock_response.text = "" - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function with a valid URL and assert that it returns an empty string - url = "http://www.example.com" - assert scrape_text(url, agent) == "" - - def test_http_error(self, mocker, agent: Agent): - """Test that scrape_text returns an error message when the response status code is an http error (>=400).""" - # Mock the requests.get() method to return a response with a 404 status code - mocker.patch("requests.Session.get", return_value=mocker.Mock(status_code=404)) - - # Call the function with a URL - result = scrape_text("https://www.example.com", agent) - - # Check that the function returns an error message - assert result == "Error: HTTP 404 error" - - def test_scrape_text_with_html_tags(self, mocker, agent: Agent): - """Test that scrape_text() properly handles HTML tags.""" - # Create a mock response object with HTML containing tags - html = "

This is bold text.

" - mock_response = mocker.Mock() - mock_response.status_code = 200 - mock_response.text = html - mocker.patch("requests.Session.get", return_value=mock_response) - - # Call the function with a URL - result = scrape_text("https://www.example.com", agent) - - # Check that the function properly handles HTML tags - assert result == "This is bold text." diff --git a/tests/unit/test_get_self_feedback.py b/tests/unit/test_get_self_feedback.py deleted file mode 100644 index ba3e10fe..00000000 --- a/tests/unit/test_get_self_feedback.py +++ /dev/null @@ -1,62 +0,0 @@ -from datetime import datetime - -from pytest_mock import MockerFixture - -from autogpt.agent.agent import Agent -from autogpt.config import AIConfig -from autogpt.config.config import Config -from autogpt.llm.chat import create_chat_completion -from autogpt.log_cycle.log_cycle import LogCycleHandler - - -def test_get_self_feedback(config: Config, mocker: MockerFixture): - # Define a sample thoughts dictionary - thoughts = { - "reasoning": "Sample reasoning.", - "plan": "Sample plan.", - "thoughts": "Sample thoughts.", - } - - # Define a fake response for the create_chat_completion function - fake_response = ( - "The AI Agent has demonstrated a reasonable thought process, but there is room for improvement. " - "For example, the reasoning could be elaborated to better justify the plan, and the plan itself " - "could be more detailed to ensure its effectiveness. In addition, the AI Agent should focus more " - "on its core role and prioritize thoughts that align with that role." - ) - - # Mock the create_chat_completion function - mock_create_chat_completion = mocker.patch( - "autogpt.agent.agent.create_chat_completion", wraps=create_chat_completion - ) - mock_create_chat_completion.return_value = fake_response - - # Create a MagicMock object to replace the Agent instance - agent_mock = mocker.MagicMock(spec=Agent) - - # Mock the config attribute of the Agent instance - agent_mock.config = config - agent_mock.ai_config = AIConfig() - - # Mock the log_cycle_handler attribute of the Agent instance - agent_mock.log_cycle_handler = LogCycleHandler() - - # Mock the create_nested_directory method of the LogCycleHandler instance - agent_mock.created_at = datetime.now().strftime("%Y%m%d_%H%M%S") - - # Mock the cycle_count attribute of the Agent instance - agent_mock.cycle_count = 0 - - # Call the get_self_feedback method - feedback = Agent.get_self_feedback( - agent_mock, - thoughts, - "gpt-3.5-turbo", - ) - - # Check if the response is a non-empty string - assert isinstance(feedback, str) and len(feedback) > 0 - - # Check if certain keywords from input thoughts are present in the feedback response - for keyword in ["reasoning", "plan", "thoughts"]: - assert keyword in feedback diff --git a/tests/unit/test_make_agent.py b/tests/unit/test_make_agent.py deleted file mode 100644 index 9939d79c..00000000 --- a/tests/unit/test_make_agent.py +++ /dev/null @@ -1,24 +0,0 @@ -from unittest.mock import MagicMock - -from pytest_mock import MockerFixture - -from autogpt.agent.agent import Agent -from autogpt.app import list_agents, start_agent - - -def test_make_agent(agent: Agent, mocker: MockerFixture) -> None: - """Test that an agent can be created""" - mock = mocker.patch("openai.ChatCompletion.create") - - response = MagicMock() - del response.error - response.choices[0].messages[0].content = "Test message" - response.usage.prompt_tokens = 1 - response.usage.completion_tokens = 1 - mock.return_value = response - start_agent("Test Agent", "chat", "Hello, how are you?", agent, "gpt-3.5-turbo") - agents = list_agents(agent) - assert "List of agents:\n0: chat" == agents - start_agent("Test Agent 2", "write", "Hello, how are you?", agent, "gpt-3.5-turbo") - agents = list_agents(agent.config) - assert "List of agents:\n0: chat\n1: write" == agents