diff --git a/autogpt/commands/user_interaction.py b/autogpt/commands/user_interaction.py index 484150fb..7de179f5 100644 --- a/autogpt/commands/user_interaction.py +++ b/autogpt/commands/user_interaction.py @@ -23,7 +23,7 @@ from autogpt.command_decorator import command "required": True, } }, - enabled=lambda config: not config.noninteractive_mode + enabled=lambda config: not config.noninteractive_mode, ) def ask_user(question: str, agent: Agent) -> str: resp = clean_input(agent.config, f"{agent.ai_config.ai_name} asks: '{question}': ") diff --git a/mypy.ini b/mypy.ini index 275cd260..0b189bc7 100644 --- a/mypy.ini +++ b/mypy.ini @@ -2,7 +2,9 @@ follow_imports = skip check_untyped_defs = True disallow_untyped_defs = True -files = tests/challenges/**/*.py + +[mypy-tests.*] +disallow_untyped_defs = False [mypy-requests.*] ignore_missing_imports = True diff --git a/tests/conftest.py b/tests/conftest.py index 0631ee50..05d0f256 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -45,9 +45,7 @@ def temp_plugins_config_file(): @pytest.fixture() -def config( - temp_plugins_config_file: str, mocker: MockerFixture, workspace: Workspace -) -> Config: +def config(temp_plugins_config_file: str, mocker: MockerFixture, workspace: Workspace): config = ConfigBuilder.build_config_from_env(workspace.root.parent) if not os.environ.get("OPENAI_API_KEY"): os.environ["OPENAI_API_KEY"] = "sk-dummy" diff --git a/tests/integration/test_provider_openai.py b/tests/integration/test_provider_openai.py index f51ad9ac..7fa57ca9 100644 --- a/tests/integration/test_provider_openai.py +++ b/tests/integration/test_provider_openai.py @@ -14,41 +14,20 @@ def reset_api_manager(): yield -class TestProviderOpenAI: - @staticmethod - def test_create_chat_completion_debug_mode(caplog): - """Test if debug mode logs response.""" - messages = [ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": "Who won the world series in 2020?"}, - ] - model = "gpt-3.5-turbo" - with patch("openai.ChatCompletion.create") as mock_create: - mock_response = MagicMock() - del mock_response.error - mock_response.usage.prompt_tokens = 10 - mock_response.usage.completion_tokens = 20 - mock_create.return_value = mock_response +def test_create_chat_completion_empty_messages(): + """Test if empty messages result in zero tokens and cost.""" + messages = [] + model = "gpt-3.5-turbo" - openai.create_chat_completion(messages, model=model) + with patch("openai.ChatCompletion.create") as mock_create: + mock_response = MagicMock() + del mock_response.error + mock_response.usage.prompt_tokens = 0 + mock_response.usage.completion_tokens = 0 + mock_create.return_value = mock_response - assert "Response" in caplog.text + openai.create_chat_completion(messages, model=model) - @staticmethod - def test_create_chat_completion_empty_messages(): - """Test if empty messages result in zero tokens and cost.""" - messages = [] - model = "gpt-3.5-turbo" - - with patch("openai.ChatCompletion.create") as mock_create: - mock_response = MagicMock() - del mock_response.error - mock_response.usage.prompt_tokens = 0 - mock_response.usage.completion_tokens = 0 - mock_create.return_value = mock_response - - openai.create_chat_completion(messages, model=model) - - assert api_manager.get_total_prompt_tokens() == 0 - assert api_manager.get_total_completion_tokens() == 0 - assert api_manager.get_total_cost() == 0 + assert api_manager.get_total_prompt_tokens() == 0 + assert api_manager.get_total_completion_tokens() == 0 + assert api_manager.get_total_cost() == 0 diff --git a/tests/integration/test_web_selenium.py b/tests/integration/test_web_selenium.py index 15dcefa9..a4b945af 100644 --- a/tests/integration/test_web_selenium.py +++ b/tests/integration/test_web_selenium.py @@ -7,10 +7,10 @@ from autogpt.commands.web_selenium import BrowsingError, browse_website @pytest.mark.vcr @pytest.mark.requires_openai_api_key def test_browse_website_nonexistent_url(agent: Agent, patched_api_requestor: None): - url = "https://barrel-roll.com" + url = "https://auto-gpt-thinks-this-website-does-not-exist.com" question = "How to execute a barrel roll" - with pytest.raises(BrowsingError, match=r"CONNECTION_CLOSED") as raised: + with pytest.raises(BrowsingError, match=r"NAME_NOT_RESOLVED") as raised: browse_website(url, question, agent) # Sanity check that the response is not too long diff --git a/tests/unit/test_text_file_parsers.py b/tests/unit/test_text_file_parsers.py index 08b47d33..ae2ad3ce 100644 --- a/tests/unit/test_text_file_parsers.py +++ b/tests/unit/test_text_file_parsers.py @@ -1,7 +1,7 @@ import json import logging import tempfile -from unittest import TestCase +from pathlib import Path from xml.etree import ElementTree import docx @@ -136,17 +136,18 @@ respective_file_creation_functions = { ".md": mock_md_file, ".tex": mock_latex_file, } +binary_files_extensions = [".pdf", ".docx"] -class TestConfig(TestCase): - def test_parsers(self): - binary_files_extensions = [".pdf", ".docx"] - for ( - file_extension, - c_file_creator, - ) in respective_file_creation_functions.items(): - created_filepath = c_file_creator() - loaded_text = read_textual_file(created_filepath, logger) - self.assertIn(plain_text_str, loaded_text) - should_be_binary = file_extension in binary_files_extensions - self.assertEqual(should_be_binary, is_file_binary_fn(created_filepath)) +def test_parsers(): + for ( + file_extension, + c_file_creator, + ) in respective_file_creation_functions.items(): + created_file_path = Path(c_file_creator()) + loaded_text = read_textual_file(created_file_path, logger) + + assert plain_text_str in loaded_text + + should_be_binary = file_extension in binary_files_extensions + assert should_be_binary == is_file_binary_fn(created_file_path)