mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2026-02-19 05:04:22 +01:00
Added tests for create_chat_message and generate_context methods of the chat module
This commit is contained in:
116
tests/test_chat.py
Normal file
116
tests/test_chat.py
Normal file
@@ -0,0 +1,116 @@
|
||||
|
||||
# Generated by CodiumAI
|
||||
import unittest
|
||||
import time
|
||||
from unittest.mock import patch
|
||||
|
||||
from autogpt.chat import create_chat_message, generate_context
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Code Analysis
|
||||
|
||||
Objective:
|
||||
The objective of the function is to create a chat message with the given role and content and return it as a dictionary.
|
||||
|
||||
Inputs:
|
||||
The function takes two inputs:
|
||||
- role (str): The role of the message sender, e.g., "system", "user", or "assistant".
|
||||
- content (str): The content of the message.
|
||||
|
||||
Flow:
|
||||
The function takes the role and content as input and creates a dictionary containing the role and content of the message. It then returns the dictionary as output.
|
||||
|
||||
Outputs:
|
||||
The function returns a dictionary containing the role and content of the message.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class TestChat(unittest.TestCase):
|
||||
|
||||
# Tests that the function returns a dictionary with the correct keys and values when valid strings are provided for role and content.
|
||||
def test_happy_path_role_content(self):
|
||||
result = create_chat_message("system", "Hello, world!")
|
||||
self.assertEqual(result, {"role": "system", "content": "Hello, world!"})
|
||||
|
||||
# Tests that the function returns a dictionary with the correct keys and values when empty strings are provided for role and content.
|
||||
def test_empty_role_content(self):
|
||||
result = create_chat_message("", "")
|
||||
self.assertEqual(result, {"role": "", "content": ""})
|
||||
|
||||
# Tests the behavior of the generate_context function when all input parameters are empty.
|
||||
@patch("time.strftime")
|
||||
def test_generate_context_empty_inputs(self, mock_strftime):
|
||||
# Mock the time.strftime function to return a fixed value
|
||||
mock_strftime.return_value = "Sat Apr 15 00:00:00 2023"
|
||||
# Arrange
|
||||
prompt = ""
|
||||
relevant_memory = ""
|
||||
full_message_history = []
|
||||
model = "gpt-3.5-turbo-0301"
|
||||
|
||||
# Act
|
||||
result = generate_context(prompt, relevant_memory, full_message_history, model)
|
||||
|
||||
# Assert
|
||||
expected_result = (-1, 47, 3, [
|
||||
{"role": "system", "content": ""},
|
||||
{"role": "system", "content": f"The current time and date is {time.strftime('%c')}"},
|
||||
{"role": "system", "content": f"This reminds you of these events from your past:\n\n\n"},
|
||||
])
|
||||
self.assertEqual(result, expected_result)
|
||||
|
||||
# Tests that the function successfully generates a current_context given valid inputs.
|
||||
def test_generate_context_valid_inputs(self):
|
||||
# Given
|
||||
prompt = "What is your favorite color?"
|
||||
relevant_memory = "You once painted your room blue."
|
||||
full_message_history = [
|
||||
create_chat_message("user", "Hi there!"),
|
||||
create_chat_message("assistant", "Hello! How can I assist you today?"),
|
||||
create_chat_message("user", "Can you tell me a joke?"),
|
||||
create_chat_message("assistant", "Why did the tomato turn red? Because it saw the salad dressing!"),
|
||||
create_chat_message("user", "Haha, that's funny."),
|
||||
]
|
||||
model = "gpt-3.5-turbo-0301"
|
||||
|
||||
# When
|
||||
result = generate_context(prompt, relevant_memory, full_message_history, model)
|
||||
|
||||
# Then
|
||||
self.assertIsInstance(result[0], int)
|
||||
self.assertIsInstance(result[1], int)
|
||||
self.assertIsInstance(result[2], int)
|
||||
self.assertIsInstance(result[3], list)
|
||||
self.assertGreaterEqual(result[0], 0)
|
||||
self.assertGreaterEqual(result[1], 0)
|
||||
self.assertGreaterEqual(result[2], 0)
|
||||
self.assertGreaterEqual(len(result[3]), 3) # current_context should have at least 3 messages
|
||||
self.assertLessEqual(result[1], 2048) # token limit for GPT-3.5-turbo-0301 is 2048 tokens
|
||||
|
||||
# Tests that the function works correctly with valid inputs.
|
||||
def test_generate_context_valid_inputs(self):
|
||||
# Arrange
|
||||
prompt = "Hello, how can I assist you today?"
|
||||
relevant_memory = "You previously mentioned needing help with a software issue."
|
||||
full_message_history = [
|
||||
create_chat_message("user", "Can you help me with a software issue?"),
|
||||
create_chat_message("assistant", "Of course, what seems to be the problem?"),
|
||||
create_chat_message("user", "I keep getting an error message."),
|
||||
create_chat_message("assistant", "Let's try some troubleshooting steps."),
|
||||
]
|
||||
model = "gpt-3.5-turbo-0301"
|
||||
|
||||
# Act
|
||||
next_message_index, tokens_used, insertion_index, context = generate_context(
|
||||
prompt, relevant_memory, full_message_history, model
|
||||
)
|
||||
|
||||
# Assert
|
||||
self.assertEqual(next_message_index, 3)
|
||||
self.assertGreater(tokens_used, 0)
|
||||
self.assertLessEqual(tokens_used, 2048)
|
||||
self.assertEqual(insertion_index, 3)
|
||||
self.assertEqual(len(context), 3)
|
||||
Reference in New Issue
Block a user