Files
Auto-GPT/tests/integration/memory/utils.py
Reinier van der Leer bfbe613960 Vector memory revamp (part 1: refactoring) (#4208)
Additional changes:

* Improve typing

* Modularize message history memory & fix/refactor lots of things

* Fix summarization

* Move memory relevance calculation to MemoryItem & improve test

* Fix import warnings in web_selenium.py

* Remove `memory_add` ghost command

* Implement overlap in `split_text`

* Move memory tests into subdirectory

* Remove deprecated `get_ada_embedding()` and helpers

* Fix used token calculation in `chat_with_ai`

* Replace Message TypedDict by dataclass

* Fix AgentManager singleton issues in tests

---------

Co-authored-by: Auto-GPT-Bot <github-bot@agpt.co>
2023-05-25 20:31:11 +02:00

45 lines
1.3 KiB
Python

import numpy
import pytest
from pytest_mock import MockerFixture
import autogpt.memory.vector.memory_item as vector_memory_item
import autogpt.memory.vector.providers.base as memory_provider_base
from autogpt.config.config import Config
from autogpt.llm.providers.openai import OPEN_AI_EMBEDDING_MODELS
from autogpt.memory.vector import get_memory
from autogpt.memory.vector.utils import Embedding
@pytest.fixture
def embedding_dimension(config: Config):
return OPEN_AI_EMBEDDING_MODELS[config.embedding_model].embedding_dimensions
@pytest.fixture
def mock_embedding(embedding_dimension: int) -> Embedding:
return numpy.full((1, embedding_dimension), 0.0255, numpy.float32)[0]
@pytest.fixture
def mock_get_embedding(mocker: MockerFixture, embedding_dimension: int):
mocker.patch.object(
vector_memory_item,
"get_embedding",
return_value=[0.0255] * embedding_dimension,
)
mocker.patch.object(
memory_provider_base,
"get_embedding",
return_value=[0.0255] * embedding_dimension,
)
@pytest.fixture
def memory_none(agent_test_config: Config, mock_get_embedding):
was_memory_backend = agent_test_config.memory_backend
agent_test_config.set_memory_backend("no_memory")
yield get_memory(agent_test_config)
agent_test_config.set_memory_backend(was_memory_backend)