mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-18 14:34:23 +01:00
Embedding Improvement
1. move embedding function into llm_utils 2. add try feature with in embedding function
This commit is contained in:
@@ -2,8 +2,8 @@ import pinecone
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
|
||||
|
||||
from autogpt.memory.base import MemoryProviderSingleton
|
||||
from autogpt.llm_utils import create_embedding_with_ada
|
||||
|
||||
class PineconeMemory(MemoryProviderSingleton):
|
||||
def __init__(self, cfg):
|
||||
@@ -43,7 +43,7 @@ class PineconeMemory(MemoryProviderSingleton):
|
||||
self.index = pinecone.Index(table_name)
|
||||
|
||||
def add(self, data):
|
||||
vector = get_ada_embedding(data)
|
||||
vector = create_embedding_with_ada(data)
|
||||
# no metadata here. We may wish to change that long term.
|
||||
self.index.upsert([(str(self.vec_num), vector, {"raw_text": data})])
|
||||
_text = f"Inserting data into memory at index: {self.vec_num}:\n data: {data}"
|
||||
@@ -63,7 +63,7 @@ class PineconeMemory(MemoryProviderSingleton):
|
||||
:param data: The data to compare to.
|
||||
:param num_relevant: The number of relevant data to return. Defaults to 5
|
||||
"""
|
||||
query_embedding = get_ada_embedding(data)
|
||||
query_embedding = create_embedding_with_ada(data)
|
||||
results = self.index.query(
|
||||
query_embedding, top_k=num_relevant, include_metadata=True
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user