mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2026-01-12 10:44:31 +01:00
Fix some more broken tests
This commit is contained in:
@@ -23,7 +23,7 @@ from autogpt.command_decorator import command
|
||||
"required": True,
|
||||
}
|
||||
},
|
||||
enabled=lambda config: not config.noninteractive_mode
|
||||
enabled=lambda config: not config.noninteractive_mode,
|
||||
)
|
||||
def ask_user(question: str, agent: Agent) -> str:
|
||||
resp = clean_input(agent.config, f"{agent.ai_config.ai_name} asks: '{question}': ")
|
||||
|
||||
4
mypy.ini
4
mypy.ini
@@ -2,7 +2,9 @@
|
||||
follow_imports = skip
|
||||
check_untyped_defs = True
|
||||
disallow_untyped_defs = True
|
||||
files = tests/challenges/**/*.py
|
||||
|
||||
[mypy-tests.*]
|
||||
disallow_untyped_defs = False
|
||||
|
||||
[mypy-requests.*]
|
||||
ignore_missing_imports = True
|
||||
|
||||
@@ -45,9 +45,7 @@ def temp_plugins_config_file():
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def config(
|
||||
temp_plugins_config_file: str, mocker: MockerFixture, workspace: Workspace
|
||||
) -> Config:
|
||||
def config(temp_plugins_config_file: str, mocker: MockerFixture, workspace: Workspace):
|
||||
config = ConfigBuilder.build_config_from_env(workspace.root.parent)
|
||||
if not os.environ.get("OPENAI_API_KEY"):
|
||||
os.environ["OPENAI_API_KEY"] = "sk-dummy"
|
||||
|
||||
@@ -14,41 +14,20 @@ def reset_api_manager():
|
||||
yield
|
||||
|
||||
|
||||
class TestProviderOpenAI:
|
||||
@staticmethod
|
||||
def test_create_chat_completion_debug_mode(caplog):
|
||||
"""Test if debug mode logs response."""
|
||||
messages = [
|
||||
{"role": "system", "content": "You are a helpful assistant."},
|
||||
{"role": "user", "content": "Who won the world series in 2020?"},
|
||||
]
|
||||
model = "gpt-3.5-turbo"
|
||||
with patch("openai.ChatCompletion.create") as mock_create:
|
||||
mock_response = MagicMock()
|
||||
del mock_response.error
|
||||
mock_response.usage.prompt_tokens = 10
|
||||
mock_response.usage.completion_tokens = 20
|
||||
mock_create.return_value = mock_response
|
||||
def test_create_chat_completion_empty_messages():
|
||||
"""Test if empty messages result in zero tokens and cost."""
|
||||
messages = []
|
||||
model = "gpt-3.5-turbo"
|
||||
|
||||
openai.create_chat_completion(messages, model=model)
|
||||
with patch("openai.ChatCompletion.create") as mock_create:
|
||||
mock_response = MagicMock()
|
||||
del mock_response.error
|
||||
mock_response.usage.prompt_tokens = 0
|
||||
mock_response.usage.completion_tokens = 0
|
||||
mock_create.return_value = mock_response
|
||||
|
||||
assert "Response" in caplog.text
|
||||
openai.create_chat_completion(messages, model=model)
|
||||
|
||||
@staticmethod
|
||||
def test_create_chat_completion_empty_messages():
|
||||
"""Test if empty messages result in zero tokens and cost."""
|
||||
messages = []
|
||||
model = "gpt-3.5-turbo"
|
||||
|
||||
with patch("openai.ChatCompletion.create") as mock_create:
|
||||
mock_response = MagicMock()
|
||||
del mock_response.error
|
||||
mock_response.usage.prompt_tokens = 0
|
||||
mock_response.usage.completion_tokens = 0
|
||||
mock_create.return_value = mock_response
|
||||
|
||||
openai.create_chat_completion(messages, model=model)
|
||||
|
||||
assert api_manager.get_total_prompt_tokens() == 0
|
||||
assert api_manager.get_total_completion_tokens() == 0
|
||||
assert api_manager.get_total_cost() == 0
|
||||
assert api_manager.get_total_prompt_tokens() == 0
|
||||
assert api_manager.get_total_completion_tokens() == 0
|
||||
assert api_manager.get_total_cost() == 0
|
||||
|
||||
@@ -7,10 +7,10 @@ from autogpt.commands.web_selenium import BrowsingError, browse_website
|
||||
@pytest.mark.vcr
|
||||
@pytest.mark.requires_openai_api_key
|
||||
def test_browse_website_nonexistent_url(agent: Agent, patched_api_requestor: None):
|
||||
url = "https://barrel-roll.com"
|
||||
url = "https://auto-gpt-thinks-this-website-does-not-exist.com"
|
||||
question = "How to execute a barrel roll"
|
||||
|
||||
with pytest.raises(BrowsingError, match=r"CONNECTION_CLOSED") as raised:
|
||||
with pytest.raises(BrowsingError, match=r"NAME_NOT_RESOLVED") as raised:
|
||||
browse_website(url, question, agent)
|
||||
|
||||
# Sanity check that the response is not too long
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import json
|
||||
import logging
|
||||
import tempfile
|
||||
from unittest import TestCase
|
||||
from pathlib import Path
|
||||
from xml.etree import ElementTree
|
||||
|
||||
import docx
|
||||
@@ -136,17 +136,18 @@ respective_file_creation_functions = {
|
||||
".md": mock_md_file,
|
||||
".tex": mock_latex_file,
|
||||
}
|
||||
binary_files_extensions = [".pdf", ".docx"]
|
||||
|
||||
|
||||
class TestConfig(TestCase):
|
||||
def test_parsers(self):
|
||||
binary_files_extensions = [".pdf", ".docx"]
|
||||
for (
|
||||
file_extension,
|
||||
c_file_creator,
|
||||
) in respective_file_creation_functions.items():
|
||||
created_filepath = c_file_creator()
|
||||
loaded_text = read_textual_file(created_filepath, logger)
|
||||
self.assertIn(plain_text_str, loaded_text)
|
||||
should_be_binary = file_extension in binary_files_extensions
|
||||
self.assertEqual(should_be_binary, is_file_binary_fn(created_filepath))
|
||||
def test_parsers():
|
||||
for (
|
||||
file_extension,
|
||||
c_file_creator,
|
||||
) in respective_file_creation_functions.items():
|
||||
created_file_path = Path(c_file_creator())
|
||||
loaded_text = read_textual_file(created_file_path, logger)
|
||||
|
||||
assert plain_text_str in loaded_text
|
||||
|
||||
should_be_binary = file_extension in binary_files_extensions
|
||||
assert should_be_binary == is_file_binary_fn(created_file_path)
|
||||
|
||||
Reference in New Issue
Block a user