Vector memory revamp (part 1: refactoring) (#4208)

Additional changes:

* Improve typing

* Modularize message history memory & fix/refactor lots of things

* Fix summarization

* Move memory relevance calculation to MemoryItem & improve test

* Fix import warnings in web_selenium.py

* Remove `memory_add` ghost command

* Implement overlap in `split_text`

* Move memory tests into subdirectory

* Remove deprecated `get_ada_embedding()` and helpers

* Fix used token calculation in `chat_with_ai`

* Replace Message TypedDict by dataclass

* Fix AgentManager singleton issues in tests

---------

Co-authored-by: Auto-GPT-Bot <github-bot@agpt.co>
This commit is contained in:
Reinier van der Leer
2023-05-25 20:31:11 +02:00
committed by GitHub
parent 10489e0df2
commit bfbe613960
92 changed files with 7282 additions and 7989 deletions

View File

@@ -90,30 +90,18 @@ OPENAI_API_KEY=your-openai-api-key
### EMBEDDINGS ### EMBEDDINGS
## EMBEDDING_MODEL - Model to use for creating embeddings ## EMBEDDING_MODEL - Model to use for creating embeddings
## EMBEDDING_TOKENIZER - Tokenizer to use for chunking large inputs
## EMBEDDING_TOKEN_LIMIT - Chunk size limit for large inputs
# EMBEDDING_MODEL=text-embedding-ada-002 # EMBEDDING_MODEL=text-embedding-ada-002
# EMBEDDING_TOKENIZER=cl100k_base
# EMBEDDING_TOKEN_LIMIT=8191
################################################################################ ################################################################################
### MEMORY ### MEMORY
################################################################################ ################################################################################
### MEMORY_BACKEND - Memory backend type ### MEMORY_BACKEND - Memory backend type
## local - Default ## json_file - Default
## pinecone - Pinecone (if configured)
## redis - Redis (if configured) ## redis - Redis (if configured)
## milvus - Milvus (if configured - also works with Zilliz)
## MEMORY_INDEX - Name of index created in Memory backend (Default: auto-gpt) ## MEMORY_INDEX - Name of index created in Memory backend (Default: auto-gpt)
# MEMORY_BACKEND=local # MEMORY_BACKEND=json_file
# MEMORY_INDEX=auto-gpt # MEMORY_INDEX=auto-gpt-memory
### PINECONE
## PINECONE_API_KEY - Pinecone API Key (Example: my-pinecone-api-key)
## PINECONE_ENV - Pinecone environment (region) (Example: us-west-2)
# PINECONE_API_KEY=your-pinecone-api-key
# PINECONE_ENV=your-pinecone-region
### REDIS ### REDIS
## REDIS_HOST - Redis host (Default: localhost, use "redis" for docker-compose) ## REDIS_HOST - Redis host (Default: localhost, use "redis" for docker-compose)
@@ -125,38 +113,6 @@ OPENAI_API_KEY=your-openai-api-key
# REDIS_PASSWORD= # REDIS_PASSWORD=
# WIPE_REDIS_ON_START=True # WIPE_REDIS_ON_START=True
### WEAVIATE
## MEMORY_BACKEND - Use 'weaviate' to use Weaviate vector storage
## WEAVIATE_HOST - Weaviate host IP
## WEAVIATE_PORT - Weaviate host port
## WEAVIATE_PROTOCOL - Weaviate host protocol (e.g. 'http')
## USE_WEAVIATE_EMBEDDED - Whether to use Embedded Weaviate
## WEAVIATE_EMBEDDED_PATH - File system path were to persist data when running Embedded Weaviate
## WEAVIATE_USERNAME - Weaviate username
## WEAVIATE_PASSWORD - Weaviate password
## WEAVIATE_API_KEY - Weaviate API key if using API-key-based authentication
# WEAVIATE_HOST="127.0.0.1"
# WEAVIATE_PORT=8080
# WEAVIATE_PROTOCOL="http"
# USE_WEAVIATE_EMBEDDED=False
# WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate"
# WEAVIATE_USERNAME=
# WEAVIATE_PASSWORD=
# WEAVIATE_API_KEY=
### MILVUS
## MILVUS_ADDR - Milvus remote address (e.g. localhost:19530, https://xxx-xxxx.xxxx.xxxx.zillizcloud.com:443)
## MILVUS_USERNAME - username for your Milvus database
## MILVUS_PASSWORD - password for your Milvus database
## MILVUS_SECURE - True to enable TLS. (Default: False)
## Setting MILVUS_ADDR to a `https://` URL will override this setting.
## MILVUS_COLLECTION - Milvus collection, change it if you want to start a new memory and retain the old memory.
# MILVUS_ADDR=localhost:19530
# MILVUS_USERNAME=
# MILVUS_PASSWORD=
# MILVUS_SECURE=
# MILVUS_COLLECTION=autogpt
################################################################################ ################################################################################
### IMAGE GENERATION PROVIDER ### IMAGE GENERATION PROVIDER
################################################################################ ################################################################################

View File

@@ -43,3 +43,11 @@ Auto-GPT now has support for plugins! With plugins, you can extend Auto-GPT's ab
adding support for third-party services and more. adding support for third-party services and more.
See https://github.com/Significant-Gravitas/Auto-GPT-Plugins for instructions and See https://github.com/Significant-Gravitas/Auto-GPT-Plugins for instructions and
available plugins. Specific plugins can be allowlisted/denylisted in .env. available plugins. Specific plugins can be allowlisted/denylisted in .env.
## Memory backend deprecation ⚠️
The Milvus, Pinecone and Weaviate memory backends were rendered incompatible
by work on the memory system, and have been removed in `master`. The Redis
memory store was also temporarily removed but we aim to merge a new implementation
before the next release.
Whether built-in support for the others will be added back in the future is subject to
discussion, feel free to pitch in: https://github.com/Significant-Gravitas/Auto-GPT/discussions/4280

View File

@@ -5,11 +5,14 @@ from datetime import datetime
from colorama import Fore, Style from colorama import Fore, Style
from autogpt.app import execute_command, get_command from autogpt.app import execute_command, get_command
from autogpt.commands.command import CommandRegistry
from autogpt.config import Config from autogpt.config import Config
from autogpt.config.ai_config import AIConfig
from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques
from autogpt.json_utils.utilities import LLM_DEFAULT_RESPONSE_FORMAT, validate_json from autogpt.json_utils.utilities import LLM_DEFAULT_RESPONSE_FORMAT, validate_json
from autogpt.llm import chat_with_ai, create_chat_completion, create_chat_message from autogpt.llm.base import ChatSequence
from autogpt.llm.token_counter import count_string_tokens from autogpt.llm.chat import chat_with_ai, create_chat_completion
from autogpt.llm.utils import count_string_tokens
from autogpt.log_cycle.log_cycle import ( from autogpt.log_cycle.log_cycle import (
FULL_MESSAGE_HISTORY_FILE_NAME, FULL_MESSAGE_HISTORY_FILE_NAME,
NEXT_ACTION_FILE_NAME, NEXT_ACTION_FILE_NAME,
@@ -19,6 +22,8 @@ from autogpt.log_cycle.log_cycle import (
LogCycleHandler, LogCycleHandler,
) )
from autogpt.logs import logger, print_assistant_thoughts from autogpt.logs import logger, print_assistant_thoughts
from autogpt.memory.message_history import MessageHistory
from autogpt.memory.vector import VectorMemory
from autogpt.speech import say_text from autogpt.speech import say_text
from autogpt.spinner import Spinner from autogpt.spinner import Spinner
from autogpt.utils import clean_input from autogpt.utils import clean_input
@@ -31,7 +36,6 @@ class Agent:
Attributes: Attributes:
ai_name: The name of the agent. ai_name: The name of the agent.
memory: The memory object to use. memory: The memory object to use.
full_message_history: The full message history.
next_action_count: The number of actions to execute. next_action_count: The number of actions to execute.
system_prompt: The system prompt is the initial prompt that defines everything system_prompt: The system prompt is the initial prompt that defines everything
the AI needs to know to achieve its task successfully. the AI needs to know to achieve its task successfully.
@@ -56,24 +60,19 @@ class Agent:
def __init__( def __init__(
self, self,
ai_name, ai_name: str,
memory, memory: VectorMemory,
full_message_history, next_action_count: int,
next_action_count, command_registry: CommandRegistry,
command_registry, config: AIConfig,
config, system_prompt: str,
system_prompt, triggering_prompt: str,
triggering_prompt, workspace_directory: str,
workspace_directory,
): ):
cfg = Config() cfg = Config()
self.ai_name = ai_name self.ai_name = ai_name
self.memory = memory self.memory = memory
self.summary_memory = ( self.history = MessageHistory(self)
"I was created." # Initial memory necessary to avoid hallucination
)
self.last_memory_index = 0
self.full_message_history = full_message_history
self.next_action_count = next_action_count self.next_action_count = next_action_count
self.command_registry = command_registry self.command_registry = command_registry
self.config = config self.config = config
@@ -114,7 +113,7 @@ class Agent:
self.config.ai_name, self.config.ai_name,
self.created_at, self.created_at,
self.cycle_count, self.cycle_count,
self.full_message_history, [m.raw() for m in self.history],
FULL_MESSAGE_HISTORY_FILE_NAME, FULL_MESSAGE_HISTORY_FILE_NAME,
) )
if ( if (
@@ -132,8 +131,6 @@ class Agent:
self, self,
self.system_prompt, self.system_prompt,
self.triggering_prompt, self.triggering_prompt,
self.full_message_history,
self.memory,
cfg.fast_token_limit, cfg.fast_token_limit,
) # TODO: This hardcodes the model to use GPT3.5. Make this an argument ) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
@@ -260,9 +257,7 @@ class Agent:
# Execute command # Execute command
if command_name is not None and command_name.lower().startswith("error"): if command_name is not None and command_name.lower().startswith("error"):
result = ( result = f"Could not execute command: {arguments}"
f"Command {command_name} threw the following error: {arguments}"
)
elif command_name == "human_feedback": elif command_name == "human_feedback":
result = f"Human feedback: {user_input}" result = f"Human feedback: {user_input}"
elif command_name == "self_feedback": elif command_name == "self_feedback":
@@ -286,7 +281,7 @@ class Agent:
str(command_result), cfg.fast_llm_model str(command_result), cfg.fast_llm_model
) )
memory_tlength = count_string_tokens( memory_tlength = count_string_tokens(
str(self.summary_memory), cfg.fast_llm_model str(self.history.summary_message()), cfg.fast_llm_model
) )
if result_tlength + memory_tlength + 600 > cfg.fast_token_limit: if result_tlength + memory_tlength + 600 > cfg.fast_token_limit:
result = f"Failure: command {command_name} returned too much output. \ result = f"Failure: command {command_name} returned too much output. \
@@ -302,12 +297,10 @@ class Agent:
# Check if there's a result from the command append it to the message # Check if there's a result from the command append it to the message
# history # history
if result is not None: if result is not None:
self.full_message_history.append(create_chat_message("system", result)) self.history.add("system", result, "action_result")
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result) logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
else: else:
self.full_message_history.append( self.history.add("system", "Unable to execute command", "action_result")
create_chat_message("system", "Unable to execute command")
)
logger.typewriter_log( logger.typewriter_log(
"SYSTEM: ", Fore.YELLOW, "Unable to execute command" "SYSTEM: ", Fore.YELLOW, "Unable to execute command"
) )
@@ -343,17 +336,18 @@ class Agent:
thought = thoughts.get("thoughts", "") thought = thoughts.get("thoughts", "")
feedback_thoughts = thought + reasoning + plan feedback_thoughts = thought + reasoning + plan
messages = [{"role": "user", "content": feedback_prompt + feedback_thoughts}] prompt = ChatSequence.for_model(llm_model)
prompt.add("user", feedback_prompt + feedback_thoughts)
self.log_cycle_handler.log_cycle( self.log_cycle_handler.log_cycle(
self.config.ai_name, self.config.ai_name,
self.created_at, self.created_at,
self.cycle_count, self.cycle_count,
messages, prompt.raw(),
PROMPT_SUPERVISOR_FEEDBACK_FILE_NAME, PROMPT_SUPERVISOR_FEEDBACK_FILE_NAME,
) )
feedback = create_chat_completion(messages, model=llm_model) feedback = create_chat_completion(prompt)
self.log_cycle_handler.log_cycle( self.log_cycle_handler.log_cycle(
self.config.ai_name, self.config.ai_name,

View File

@@ -1,10 +1,9 @@
"""Agent manager for managing GPT agents""" """Agent manager for managing GPT agents"""
from __future__ import annotations from __future__ import annotations
from typing import List from autogpt.config import Config
from autogpt.llm.base import ChatSequence
from autogpt.config.config import Config from autogpt.llm.chat import Message, create_chat_completion
from autogpt.llm import Message, create_chat_completion
from autogpt.singleton import Singleton from autogpt.singleton import Singleton
@@ -13,55 +12,55 @@ class AgentManager(metaclass=Singleton):
def __init__(self): def __init__(self):
self.next_key = 0 self.next_key = 0
self.agents = {} # key, (task, full_message_history, model) self.agents: dict[
int, tuple[str, list[Message], str]
] = {} # key, (task, full_message_history, model)
self.cfg = Config() self.cfg = Config()
# Create new GPT agent # Create new GPT agent
# TODO: Centralise use of create_chat_completion() to globally enforce token limit # TODO: Centralise use of create_chat_completion() to globally enforce token limit
def create_agent(self, task: str, prompt: str, model: str) -> tuple[int, str]: def create_agent(
self, task: str, creation_prompt: str, model: str
) -> tuple[int, str]:
"""Create a new agent and return its key """Create a new agent and return its key
Args: Args:
task: The task to perform task: The task to perform
prompt: The prompt to use creation_prompt: Prompt passed to the LLM at creation
model: The model to use model: The model to use to run this agent
Returns: Returns:
The key of the new agent The key of the new agent
""" """
messages: List[Message] = [ messages = ChatSequence.for_model(model, [Message("user", creation_prompt)])
{"role": "user", "content": prompt},
]
for plugin in self.cfg.plugins: for plugin in self.cfg.plugins:
if not plugin.can_handle_pre_instruction(): if not plugin.can_handle_pre_instruction():
continue continue
if plugin_messages := plugin.pre_instruction(messages): if plugin_messages := plugin.pre_instruction(messages.raw()):
messages.extend(iter(plugin_messages)) messages.extend([Message(**raw_msg) for raw_msg in plugin_messages])
# Start GPT instance # Start GPT instance
agent_reply = create_chat_completion( agent_reply = create_chat_completion(prompt=messages)
model=model,
messages=messages,
)
messages.append({"role": "assistant", "content": agent_reply}) messages.add("assistant", agent_reply)
plugins_reply = "" plugins_reply = ""
for i, plugin in enumerate(self.cfg.plugins): for i, plugin in enumerate(self.cfg.plugins):
if not plugin.can_handle_on_instruction(): if not plugin.can_handle_on_instruction():
continue continue
if plugin_result := plugin.on_instruction(messages): if plugin_result := plugin.on_instruction([m.raw() for m in messages]):
sep = "\n" if i else "" sep = "\n" if i else ""
plugins_reply = f"{plugins_reply}{sep}{plugin_result}" plugins_reply = f"{plugins_reply}{sep}{plugin_result}"
if plugins_reply and plugins_reply != "": if plugins_reply and plugins_reply != "":
messages.append({"role": "assistant", "content": plugins_reply}) messages.add("assistant", plugins_reply)
key = self.next_key key = self.next_key
# This is done instead of len(agents) to make keys unique even if agents # This is done instead of len(agents) to make keys unique even if agents
# are deleted # are deleted
self.next_key += 1 self.next_key += 1
self.agents[key] = (task, messages, model) self.agents[key] = (task, list(messages), model)
for plugin in self.cfg.plugins: for plugin in self.cfg.plugins:
if not plugin.can_handle_post_instruction(): if not plugin.can_handle_post_instruction():
@@ -83,33 +82,30 @@ class AgentManager(metaclass=Singleton):
task, messages, model = self.agents[int(key)] task, messages, model = self.agents[int(key)]
# Add user message to message history before sending to agent # Add user message to message history before sending to agent
messages.append({"role": "user", "content": message}) messages = ChatSequence.for_model(model, messages)
messages.add("user", message)
for plugin in self.cfg.plugins: for plugin in self.cfg.plugins:
if not plugin.can_handle_pre_instruction(): if not plugin.can_handle_pre_instruction():
continue continue
if plugin_messages := plugin.pre_instruction(messages): if plugin_messages := plugin.pre_instruction([m.raw() for m in messages]):
for plugin_message in plugin_messages: messages.extend([Message(**raw_msg) for raw_msg in plugin_messages])
messages.append(plugin_message)
# Start GPT instance # Start GPT instance
agent_reply = create_chat_completion( agent_reply = create_chat_completion(prompt=messages)
model=model,
messages=messages,
)
messages.append({"role": "assistant", "content": agent_reply}) messages.add("assistant", agent_reply)
plugins_reply = agent_reply plugins_reply = agent_reply
for i, plugin in enumerate(self.cfg.plugins): for i, plugin in enumerate(self.cfg.plugins):
if not plugin.can_handle_on_instruction(): if not plugin.can_handle_on_instruction():
continue continue
if plugin_result := plugin.on_instruction(messages): if plugin_result := plugin.on_instruction([m.raw() for m in messages]):
sep = "\n" if i else "" sep = "\n" if i else ""
plugins_reply = f"{plugins_reply}{sep}{plugin_result}" plugins_reply = f"{plugins_reply}{sep}{plugin_result}"
# Update full message history # Update full message history
if plugins_reply and plugins_reply != "": if plugins_reply and plugins_reply != "":
messages.append({"role": "assistant", "content": plugins_reply}) messages.add("assistant", plugins_reply)
for plugin in self.cfg.plugins: for plugin in self.cfg.plugins:
if not plugin.can_handle_post_instruction(): if not plugin.can_handle_post_instruction():

View File

@@ -7,14 +7,13 @@ from autogpt.commands.command import CommandRegistry, command
from autogpt.commands.web_requests import scrape_links, scrape_text from autogpt.commands.web_requests import scrape_links, scrape_text
from autogpt.config import Config from autogpt.config import Config
from autogpt.logs import logger from autogpt.logs import logger
from autogpt.memory import get_memory from autogpt.memory.vector import get_memory
from autogpt.processing.text import summarize_text from autogpt.processing.text import summarize_text
from autogpt.prompts.generator import PromptGenerator from autogpt.prompts.generator import PromptGenerator
from autogpt.speech import say_text from autogpt.speech import say_text
from autogpt.url_utils.validators import validate_url from autogpt.url_utils.validators import validate_url
CFG = Config() CFG = Config()
AGENT_MANAGER = AgentManager()
def is_valid_int(value: str) -> bool: def is_valid_int(value: str) -> bool:
@@ -114,24 +113,20 @@ def execute_command(
# TODO: Remove commands below after they are moved to the command registry. # TODO: Remove commands below after they are moved to the command registry.
command_name = map_command_synonyms(command_name.lower()) command_name = map_command_synonyms(command_name.lower())
if command_name == "memory_add":
return get_memory(CFG).add(arguments["string"])
# TODO: Change these to take in a file rather than pasted code, if # TODO: Change these to take in a file rather than pasted code, if
# non-file is given, return instructions "Input should be a python # non-file is given, return instructions "Input should be a python
# filepath, write your code to file and try again # filepath, write your code to file and try again
else: for command in prompt.commands:
for command in prompt.commands: if (
if ( command_name == command["label"].lower()
command_name == command["label"].lower() or command_name == command["name"].lower()
or command_name == command["name"].lower() ):
): return command["function"](**arguments)
return command["function"](**arguments) return (
return ( f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'"
f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'" " list for available commands and only respond in the specified JSON"
" list for available commands and only respond in the specified JSON" " format."
" format." )
)
except Exception as e: except Exception as e:
return f"Error: {str(e)}" return f"Error: {str(e)}"
@@ -141,7 +136,7 @@ def execute_command(
) )
@validate_url @validate_url
def get_text_summary(url: str, question: str) -> str: def get_text_summary(url: str, question: str) -> str:
"""Return the results of a Google search """Get the text summary of a webpage
Args: Args:
url (str): The url to scrape url (str): The url to scrape
@@ -151,14 +146,15 @@ def get_text_summary(url: str, question: str) -> str:
str: The summary of the text str: The summary of the text
""" """
text = scrape_text(url) text = scrape_text(url)
summary = summarize_text(url, text, question) summary, _ = summarize_text(text, question=question)
return f""" "Result" : {summary}""" return f""" "Result" : {summary}"""
@command("get_hyperlinks", "Get hyperlinks", '"url": "<url>"') @command("get_hyperlinks", "Get hyperlinks", '"url": "<url>"')
@validate_url @validate_url
def get_hyperlinks(url: str) -> Union[str, List[str]]: def get_hyperlinks(url: str) -> Union[str, List[str]]:
"""Return the results of a Google search """Get all hyperlinks on a webpage
Args: Args:
url (str): The url to scrape url (str): The url to scrape
@@ -186,6 +182,8 @@ def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) ->
Returns: Returns:
str: The response of the agent str: The response of the agent
""" """
agent_manager = AgentManager()
# Remove underscores from name # Remove underscores from name
voice_name = name.replace("_", " ") voice_name = name.replace("_", " ")
@@ -195,13 +193,13 @@ def start_agent(name: str, task: str, prompt: str, model=CFG.fast_llm_model) ->
# Create agent # Create agent
if CFG.speak_mode: if CFG.speak_mode:
say_text(agent_intro, 1) say_text(agent_intro, 1)
key, ack = AGENT_MANAGER.create_agent(task, first_message, model) key, ack = agent_manager.create_agent(task, first_message, model)
if CFG.speak_mode: if CFG.speak_mode:
say_text(f"Hello {voice_name}. Your task is as follows. {task}.") say_text(f"Hello {voice_name}. Your task is as follows. {task}.")
# Assign task (prompt), get response # Assign task (prompt), get response
agent_response = AGENT_MANAGER.message_agent(key, prompt) agent_response = agent_manager.message_agent(key, prompt)
return f"Agent {name} created with key {key}. First response: {agent_response}" return f"Agent {name} created with key {key}. First response: {agent_response}"
@@ -211,7 +209,7 @@ def message_agent(key: str, message: str) -> str:
"""Message an agent with a given key and message""" """Message an agent with a given key and message"""
# Check if the key is a valid integer # Check if the key is a valid integer
if is_valid_int(key): if is_valid_int(key):
agent_response = AGENT_MANAGER.message_agent(int(key), message) agent_response = AgentManager().message_agent(int(key), message)
else: else:
return "Invalid key, must be an integer." return "Invalid key, must be an integer."
@@ -229,7 +227,7 @@ def list_agents() -> str:
str: A list of all agents str: A list of all agents
""" """
return "List of agents:\n" + "\n".join( return "List of agents:\n" + "\n".join(
[str(x[0]) + ": " + x[1] for x in AGENT_MANAGER.list_agents()] [str(x[0]) + ": " + x[1] for x in AgentManager().list_agents()]
) )
@@ -243,5 +241,5 @@ def delete_agent(key: str) -> str:
Returns: Returns:
str: A message indicating whether the agent was deleted or not str: A message indicating whether the agent was deleted or not
""" """
result = AGENT_MANAGER.delete_agent(key) result = AgentManager().delete_agent(key)
return f"Agent {key} deleted." if result else f"Agent {key} does not exist." return f"Agent {key} deleted." if result else f"Agent {key} does not exist."

View File

@@ -2,7 +2,7 @@
from __future__ import annotations from __future__ import annotations
from autogpt.commands.command import command from autogpt.commands.command import command
from autogpt.llm import call_ai_function from autogpt.llm.utils import call_ai_function
@command( @command(

View File

@@ -4,7 +4,7 @@ from __future__ import annotations
import hashlib import hashlib
import os import os
import os.path import os.path
from typing import Dict, Generator, Literal, Tuple from typing import Generator, Literal
import charset_normalizer import charset_normalizer
import requests import requests
@@ -15,6 +15,7 @@ from autogpt.commands.command import command
from autogpt.commands.file_operations_utils import read_textual_file from autogpt.commands.file_operations_utils import read_textual_file
from autogpt.config import Config from autogpt.config import Config
from autogpt.logs import logger from autogpt.logs import logger
from autogpt.memory.vector import MemoryItem, VectorMemory
from autogpt.spinner import Spinner from autogpt.spinner import Spinner
from autogpt.utils import readable_file_size from autogpt.utils import readable_file_size
@@ -28,7 +29,9 @@ def text_checksum(text: str) -> str:
return hashlib.md5(text.encode("utf-8")).hexdigest() return hashlib.md5(text.encode("utf-8")).hexdigest()
def operations_from_log(log_path: str) -> Generator[Tuple[Operation, str, str | None]]: def operations_from_log(
log_path: str,
) -> Generator[tuple[Operation, str, str | None], None, None]:
"""Parse the file operations log and return a tuple containing the log entries""" """Parse the file operations log and return a tuple containing the log entries"""
try: try:
log = open(log_path, "r", encoding="utf-8") log = open(log_path, "r", encoding="utf-8")
@@ -45,6 +48,7 @@ def operations_from_log(log_path: str) -> Generator[Tuple[Operation, str, str |
try: try:
path, checksum = (x.strip() for x in tail.rsplit(" #", maxsplit=1)) path, checksum = (x.strip() for x in tail.rsplit(" #", maxsplit=1))
except ValueError: except ValueError:
logger.warn(f"File log entry lacks checksum: '{line}'")
path, checksum = tail.strip(), None path, checksum = tail.strip(), None
yield (operation, path, checksum) yield (operation, path, checksum)
elif operation == "delete": elif operation == "delete":
@@ -53,7 +57,7 @@ def operations_from_log(log_path: str) -> Generator[Tuple[Operation, str, str |
log.close() log.close()
def file_operations_state(log_path: str) -> Dict: def file_operations_state(log_path: str) -> dict[str, str]:
"""Iterates over the operations log and returns the expected state. """Iterates over the operations log and returns the expected state.
Parses a log file at CFG.file_logger_path to construct a dictionary that maps Parses a log file at CFG.file_logger_path to construct a dictionary that maps
@@ -156,43 +160,41 @@ def read_file(filename: str) -> str:
""" """
try: try:
content = read_textual_file(filename, logger) content = read_textual_file(filename, logger)
# TODO: invalidate/update memory when file is edited
file_memory = MemoryItem.from_text_file(content, filename)
if len(file_memory.chunks) > 1:
return file_memory.summary
return content return content
except Exception as e: except Exception as e:
return f"Error: {str(e)}" return f"Error: {str(e)}"
def ingest_file( def ingest_file(
filename: str, memory, max_length: int = 4000, overlap: int = 200 filename: str,
memory: VectorMemory,
) -> None: ) -> None:
""" """
Ingest a file by reading its content, splitting it into chunks with a specified Ingest a file by reading its content, splitting it into chunks with a specified
maximum length and overlap, and adding the chunks to the memory storage. maximum length and overlap, and adding the chunks to the memory storage.
:param filename: The name of the file to ingest Args:
:param memory: An object with an add() method to store the chunks in memory filename: The name of the file to ingest
:param max_length: The maximum length of each chunk, default is 4000 memory: An object with an add() method to store the chunks in memory
:param overlap: The number of overlapping characters between chunks, default is 200
""" """
try: try:
logger.info(f"Working with file {filename}") logger.info(f"Ingesting file {filename}")
content = read_file(filename) content = read_file(filename)
content_length = len(content)
logger.info(f"File length: {content_length} characters")
chunks = list(split_file(content, max_length=max_length, overlap=overlap)) # TODO: differentiate between different types of files
file_memory = MemoryItem.from_text_file(content, filename)
logger.debug(f"Created memory: {file_memory.dump()}")
memory.add(file_memory)
num_chunks = len(chunks) logger.info(f"Ingested {len(file_memory.e_chunks)} chunks from {filename}")
for i, chunk in enumerate(chunks):
logger.info(f"Ingesting chunk {i + 1} / {num_chunks} into memory")
memory_to_add = (
f"Filename: {filename}\n" f"Content part#{i + 1}/{num_chunks}: {chunk}"
)
memory.add(memory_to_add)
logger.info(f"Done ingesting {num_chunks} chunks from {filename}.")
except Exception as err: except Exception as err:
logger.info(f"Error while ingesting file '{filename}': {err}") logger.warn(f"Error while ingesting file '{filename}': {err}")
@command("write_to_file", "Write to file", '"filename": "<filename>", "text": "<text>"') @command("write_to_file", "Write to file", '"filename": "<filename>", "text": "<text>"')

View File

@@ -10,6 +10,7 @@ from bs4 import BeautifulSoup
from pylatexenc.latex2text import LatexNodes2Text from pylatexenc.latex2text import LatexNodes2Text
from autogpt import logs from autogpt import logs
from autogpt.logs import logger
class ParserStrategy: class ParserStrategy:
@@ -21,6 +22,7 @@ class ParserStrategy:
class TXTParser(ParserStrategy): class TXTParser(ParserStrategy):
def read(self, file_path): def read(self, file_path):
charset_match = charset_normalizer.from_path(file_path).best() charset_match = charset_normalizer.from_path(file_path).best()
logger.debug(f"Reading '{file_path}' with encoding '{charset_match.encoding}'")
return str(charset_match) return str(charset_match)
@@ -150,9 +152,7 @@ def read_textual_file(file_path: str, logger: logs.Logger):
parser = extension_to_parser.get(file_extension) parser = extension_to_parser.get(file_extension)
if not parser: if not parser:
if is_binary: if is_binary:
raise ValueError( raise ValueError(f"Unsupported binary file format: {file_extension}")
"Unsupported binary file format: {}".format(file_extension)
)
# fallback to txt file parser (to support script and code files loading) # fallback to txt file parser (to support script and code files loading)
parser = TXTParser() parser = TXTParser()
file_context = FileContext(parser, logger) file_context = FileContext(parser, logger)

View File

@@ -3,7 +3,7 @@ from __future__ import annotations
import json import json
from autogpt.commands.command import command from autogpt.commands.command import command
from autogpt.llm import call_ai_function from autogpt.llm.utils import call_ai_function
@command( @command(

View File

@@ -100,13 +100,3 @@ def scrape_links(url: str) -> str | list[str]:
hyperlinks = extract_hyperlinks(soup, url) hyperlinks = extract_hyperlinks(soup, url)
return format_hyperlinks(hyperlinks) return format_hyperlinks(hyperlinks)
def create_message(chunk, question):
"""Create a message for the user to summarize a chunk of text"""
return {
"role": "user",
"content": f'"""{chunk}""" Using the above text, answer the following'
f' question: "{question}" -- if the question cannot be answered using the'
" text, summarize the text.",
}

View File

@@ -4,28 +4,39 @@ from __future__ import annotations
import logging import logging
from pathlib import Path from pathlib import Path
from sys import platform from sys import platform
from typing import Optional, Type
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.common.exceptions import WebDriverException from selenium.common.exceptions import WebDriverException
from selenium.webdriver.chrome.options import Options as ChromeOptions from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.chrome.service import Service as ChromeDriverService
from selenium.webdriver.chrome.webdriver import WebDriver as ChromeDriver
from selenium.webdriver.common.by import By from selenium.webdriver.common.by import By
from selenium.webdriver.edge.options import Options as EdgeOptions from selenium.webdriver.edge.options import Options as EdgeOptions
from selenium.webdriver.edge.service import Service as EdgeDriverService
from selenium.webdriver.edge.webdriver import WebDriver as EdgeDriver
from selenium.webdriver.firefox.options import Options as FirefoxOptions from selenium.webdriver.firefox.options import Options as FirefoxOptions
from selenium.webdriver.firefox.service import Service as GeckoDriverService
from selenium.webdriver.firefox.webdriver import WebDriver as FirefoxDriver
from selenium.webdriver.remote.webdriver import WebDriver from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.safari.options import Options as SafariOptions from selenium.webdriver.safari.options import Options as SafariOptions
from selenium.webdriver.safari.webdriver import WebDriver as SafariDriver
from selenium.webdriver.support import expected_conditions as EC from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.support.wait import WebDriverWait
from webdriver_manager.chrome import ChromeDriverManager from webdriver_manager.chrome import ChromeDriverManager
from webdriver_manager.firefox import GeckoDriverManager from webdriver_manager.firefox import GeckoDriverManager
from webdriver_manager.microsoft import EdgeChromiumDriverManager from webdriver_manager.microsoft import EdgeChromiumDriverManager as EdgeDriverManager
import autogpt.processing.text as summary
from autogpt.commands.command import command from autogpt.commands.command import command
from autogpt.config import Config from autogpt.config import Config
from autogpt.logs import logger
from autogpt.memory.vector import MemoryItem, NoMemory, get_memory
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
from autogpt.processing.text import summarize_text
from autogpt.url_utils.validators import validate_url from autogpt.url_utils.validators import validate_url
BrowserOptions = ChromeOptions | EdgeOptions | FirefoxOptions | SafariOptions
FILE_DIR = Path(__file__).parent.parent FILE_DIR = Path(__file__).parent.parent
CFG = Config() CFG = Config()
@@ -55,14 +66,14 @@ def browse_website(url: str, question: str) -> str:
return f"Error: {msg}" return f"Error: {msg}"
add_header(driver) add_header(driver)
summary_text = summary.summarize_text(url, text, question, driver) summary = summarize_memorize_webpage(url, text, question, driver)
links = scrape_links_with_selenium(driver, url) links = scrape_links_with_selenium(driver, url)
# Limit links to 5 # Limit links to 5
if len(links) > 5: if len(links) > 5:
links = links[:5] links = links[:5]
close_browser(driver) close_browser(driver)
return f"Answer gathered from website: {summary_text} \n \n Links: {links}" return f"Answer gathered from website: {summary}\n\nLinks: {links}"
def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]: def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
@@ -76,14 +87,14 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
""" """
logging.getLogger("selenium").setLevel(logging.CRITICAL) logging.getLogger("selenium").setLevel(logging.CRITICAL)
options_available = { options_available: dict[str, Type[BrowserOptions]] = {
"chrome": ChromeOptions, "chrome": ChromeOptions,
"safari": SafariOptions,
"firefox": FirefoxOptions,
"edge": EdgeOptions, "edge": EdgeOptions,
"firefox": FirefoxOptions,
"safari": SafariOptions,
} }
options = options_available[CFG.selenium_web_browser]() options: BrowserOptions = options_available[CFG.selenium_web_browser]()
options.add_argument( options.add_argument(
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36" "user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36"
) )
@@ -92,17 +103,17 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
if CFG.selenium_headless: if CFG.selenium_headless:
options.headless = True options.headless = True
options.add_argument("--disable-gpu") options.add_argument("--disable-gpu")
driver = webdriver.Firefox( driver = FirefoxDriver(
executable_path=GeckoDriverManager().install(), options=options service=GeckoDriverService(GeckoDriverManager().install()), options=options
)
elif CFG.selenium_web_browser == "edge":
driver = EdgeDriver(
service=EdgeDriverService(EdgeDriverManager().install()), options=options
) )
elif CFG.selenium_web_browser == "safari": elif CFG.selenium_web_browser == "safari":
# Requires a bit more setup on the users end # Requires a bit more setup on the users end
# See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari # See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari
driver = webdriver.Safari(options=options) driver = SafariDriver(options=options)
elif CFG.selenium_web_browser == "edge":
driver = webdriver.Edge(
executable_path=EdgeChromiumDriverManager().install(), options=options
)
else: else:
if platform == "linux" or platform == "linux2": if platform == "linux" or platform == "linux2":
options.add_argument("--disable-dev-shm-usage") options.add_argument("--disable-dev-shm-usage")
@@ -115,10 +126,10 @@ def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
chromium_driver_path = Path("/usr/bin/chromedriver") chromium_driver_path = Path("/usr/bin/chromedriver")
driver = webdriver.Chrome( driver = ChromeDriver(
executable_path=chromium_driver_path service=ChromeDriverService(str(chromium_driver_path))
if chromium_driver_path.exists() if chromium_driver_path.exists()
else ChromeDriverManager().install(), else ChromeDriverService(ChromeDriverManager().install()),
options=options, options=options,
) )
driver.get(url) driver.get(url)
@@ -188,3 +199,30 @@ def add_header(driver: WebDriver) -> None:
driver.execute_script(overlay_script) driver.execute_script(overlay_script)
except Exception as e: except Exception as e:
print(f"Error executing overlay.js: {e}") print(f"Error executing overlay.js: {e}")
def summarize_memorize_webpage(
url: str, text: str, question: str, driver: Optional[WebDriver] = None
) -> str:
"""Summarize text using the OpenAI API
Args:
url (str): The url of the text
text (str): The text to summarize
question (str): The question to ask the model
driver (WebDriver): The webdriver to use to scroll the page
Returns:
str: The summary of the text
"""
if not text:
return "Error: No text to summarize"
text_length = len(text)
logger.info(f"Text length: {text_length} characters")
memory = get_memory(CFG)
new_memory = MemoryItem.from_webpage(text, url, question=question)
memory.add(new_memory)
return new_memory.summary

View File

@@ -4,7 +4,7 @@ from __future__ import annotations
import json import json
from autogpt.commands.command import command from autogpt.commands.command import command
from autogpt.llm import call_ai_function from autogpt.llm.utils import call_ai_function
@command( @command(

View File

@@ -7,11 +7,12 @@ from __future__ import annotations
import os import os
import platform import platform
from pathlib import Path from pathlib import Path
from typing import Any, Optional, Type from typing import Optional
import distro import distro
import yaml import yaml
from autogpt.commands.command import CommandRegistry
from autogpt.prompts.generator import PromptGenerator from autogpt.prompts.generator import PromptGenerator
# Soon this will go in a folder where it remembers more stuff about the run(s) # Soon this will go in a folder where it remembers more stuff about the run(s)
@@ -53,8 +54,8 @@ class AIConfig:
self.ai_role = ai_role self.ai_role = ai_role
self.ai_goals = ai_goals self.ai_goals = ai_goals
self.api_budget = api_budget self.api_budget = api_budget
self.prompt_generator = None self.prompt_generator: PromptGenerator | None = None
self.command_registry = None self.command_registry: CommandRegistry | None = None
@staticmethod @staticmethod
def load(config_file: str = SAVE_FILE) -> "AIConfig": def load(config_file: str = SAVE_FILE) -> "AIConfig":

View File

@@ -17,8 +17,8 @@ class Config(metaclass=Singleton):
def __init__(self) -> None: def __init__(self) -> None:
"""Initialize the Config class""" """Initialize the Config class"""
self.workspace_path = None self.workspace_path: str = None
self.file_logger_path = None self.file_logger_path: str = None
self.debug_mode = False self.debug_mode = False
self.continuous_mode = False self.continuous_mode = False
@@ -58,9 +58,6 @@ class Config(metaclass=Singleton):
self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000)) self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000)) self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
self.embedding_model = os.getenv("EMBEDDING_MODEL", "text-embedding-ada-002") self.embedding_model = os.getenv("EMBEDDING_MODEL", "text-embedding-ada-002")
self.embedding_tokenizer = os.getenv("EMBEDDING_TOKENIZER", "cl100k_base")
self.embedding_token_limit = int(os.getenv("EMBEDDING_TOKEN_LIMIT", 8191))
self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 3000))
self.browse_spacy_language_model = os.getenv( self.browse_spacy_language_model = os.getenv(
"BROWSE_SPACY_LANGUAGE_MODEL", "en_core_web_sm" "BROWSE_SPACY_LANGUAGE_MODEL", "en_core_web_sm"
) )
@@ -99,28 +96,6 @@ class Config(metaclass=Singleton):
self.google_api_key = os.getenv("GOOGLE_API_KEY") self.google_api_key = os.getenv("GOOGLE_API_KEY")
self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID") self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID")
self.pinecone_api_key = os.getenv("PINECONE_API_KEY")
self.pinecone_region = os.getenv("PINECONE_ENV")
self.weaviate_host = os.getenv("WEAVIATE_HOST")
self.weaviate_port = os.getenv("WEAVIATE_PORT")
self.weaviate_protocol = os.getenv("WEAVIATE_PROTOCOL", "http")
self.weaviate_username = os.getenv("WEAVIATE_USERNAME", None)
self.weaviate_password = os.getenv("WEAVIATE_PASSWORD", None)
self.weaviate_scopes = os.getenv("WEAVIATE_SCOPES", None)
self.weaviate_embedded_path = os.getenv("WEAVIATE_EMBEDDED_PATH")
self.weaviate_api_key = os.getenv("WEAVIATE_API_KEY", None)
self.use_weaviate_embedded = (
os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True"
)
# milvus or zilliz cloud configuration.
self.milvus_addr = os.getenv("MILVUS_ADDR", "localhost:19530")
self.milvus_username = os.getenv("MILVUS_USERNAME")
self.milvus_password = os.getenv("MILVUS_PASSWORD")
self.milvus_collection = os.getenv("MILVUS_COLLECTION", "autogpt")
self.milvus_secure = os.getenv("MILVUS_SECURE") == "True"
self.image_provider = os.getenv("IMAGE_PROVIDER") self.image_provider = os.getenv("IMAGE_PROVIDER")
self.image_size = int(os.getenv("IMAGE_SIZE", 256)) self.image_size = int(os.getenv("IMAGE_SIZE", 256))
self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN") self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN")
@@ -146,14 +121,13 @@ class Config(metaclass=Singleton):
" (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36", " (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
) )
self.memory_backend = os.getenv("MEMORY_BACKEND", "json_file")
self.memory_index = os.getenv("MEMORY_INDEX", "auto-gpt-memory")
self.redis_host = os.getenv("REDIS_HOST", "localhost") self.redis_host = os.getenv("REDIS_HOST", "localhost")
self.redis_port = os.getenv("REDIS_PORT", "6379") self.redis_port = int(os.getenv("REDIS_PORT", "6379"))
self.redis_password = os.getenv("REDIS_PASSWORD", "") self.redis_password = os.getenv("REDIS_PASSWORD", "")
self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == "True" self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == "True"
self.memory_index = os.getenv("MEMORY_INDEX", "auto-gpt")
# Note that indexes must be created on db 0 in redis, this is not configurable.
self.memory_backend = os.getenv("MEMORY_BACKEND", "local")
self.plugins_dir = os.getenv("PLUGINS_DIR", "plugins") self.plugins_dir = os.getenv("PLUGINS_DIR", "plugins")
self.plugins: List[AutoGPTPluginTemplate] = [] self.plugins: List[AutoGPTPluginTemplate] = []
@@ -250,18 +224,6 @@ class Config(metaclass=Singleton):
"""Set the model to use for creating embeddings.""" """Set the model to use for creating embeddings."""
self.embedding_model = value self.embedding_model = value
def set_embedding_tokenizer(self, value: str) -> None:
"""Set the tokenizer to use when creating embeddings."""
self.embedding_tokenizer = value
def set_embedding_token_limit(self, value: int) -> None:
"""Set the token limit for creating embeddings."""
self.embedding_token_limit = value
def set_browse_chunk_max_length(self, value: int) -> None:
"""Set the browse_website command chunk max length value."""
self.browse_chunk_max_length = value
def set_openai_api_key(self, value: str) -> None: def set_openai_api_key(self, value: str) -> None:
"""Set the OpenAI API key value.""" """Set the OpenAI API key value."""
self.openai_api_key = value self.openai_api_key = value
@@ -286,14 +248,6 @@ class Config(metaclass=Singleton):
"""Set the custom search engine id value.""" """Set the custom search engine id value."""
self.custom_search_engine_id = value self.custom_search_engine_id = value
def set_pinecone_api_key(self, value: str) -> None:
"""Set the Pinecone API key value."""
self.pinecone_api_key = value
def set_pinecone_region(self, value: str) -> None:
"""Set the Pinecone region value."""
self.pinecone_region = value
def set_debug_mode(self, value: bool) -> None: def set_debug_mode(self, value: bool) -> None:
"""Set the debug mode value.""" """Set the debug mode value."""
self.debug_mode = value self.debug_mode = value

View File

@@ -6,9 +6,9 @@ from colorama import Back, Fore, Style
from autogpt import utils from autogpt import utils
from autogpt.config import Config from autogpt.config import Config
from autogpt.llm.llm_utils import check_model from autogpt.llm.utils import check_model
from autogpt.logs import logger from autogpt.logs import logger
from autogpt.memory import get_supported_memory_backends from autogpt.memory.vector import get_supported_memory_backends
CFG = Config() CFG = Config()

View File

@@ -11,7 +11,7 @@ from regex import regex
from autogpt.config import Config from autogpt.config import Config
from autogpt.json_utils.json_fix_general import correct_json from autogpt.json_utils.json_fix_general import correct_json
from autogpt.llm import call_ai_function from autogpt.llm.utils import call_ai_function
from autogpt.logs import logger from autogpt.logs import logger
from autogpt.speech import say_text from autogpt.speech import say_text

View File

@@ -1,4 +1,3 @@
from autogpt.llm.api_manager import ApiManager
from autogpt.llm.base import ( from autogpt.llm.base import (
ChatModelInfo, ChatModelInfo,
ChatModelResponse, ChatModelResponse,
@@ -8,18 +7,8 @@ from autogpt.llm.base import (
Message, Message,
ModelInfo, ModelInfo,
) )
from autogpt.llm.chat import chat_with_ai, create_chat_message, generate_context
from autogpt.llm.llm_utils import (
call_ai_function,
chunked_tokens,
create_chat_completion,
get_ada_embedding,
)
from autogpt.llm.modelsinfo import COSTS
from autogpt.llm.token_counter import count_message_tokens, count_string_tokens
__all__ = [ __all__ = [
"ApiManager",
"Message", "Message",
"ModelInfo", "ModelInfo",
"ChatModelInfo", "ChatModelInfo",
@@ -27,14 +16,4 @@ __all__ = [
"LLMResponse", "LLMResponse",
"ChatModelResponse", "ChatModelResponse",
"EmbeddingModelResponse", "EmbeddingModelResponse",
"create_chat_message",
"generate_context",
"chat_with_ai",
"call_ai_function",
"create_chat_completion",
"get_ada_embedding",
"chunked_tokens",
"COSTS",
"count_message_tokens",
"count_string_tokens",
] ]

View File

@@ -6,6 +6,7 @@ import openai
from openai import Model from openai import Model
from autogpt.config import Config from autogpt.config import Config
from autogpt.llm.base import MessageDict
from autogpt.llm.modelsinfo import COSTS from autogpt.llm.modelsinfo import COSTS
from autogpt.logs import logger from autogpt.logs import logger
from autogpt.singleton import Singleton from autogpt.singleton import Singleton
@@ -28,7 +29,7 @@ class ApiManager(metaclass=Singleton):
def create_chat_completion( def create_chat_completion(
self, self,
messages: list, # type: ignore messages: list[MessageDict],
model: str | None = None, model: str | None = None,
temperature: float = None, temperature: float = None,
max_tokens: int | None = None, max_tokens: int | None = None,
@@ -71,7 +72,7 @@ class ApiManager(metaclass=Singleton):
self.update_cost(prompt_tokens, completion_tokens, model) self.update_cost(prompt_tokens, completion_tokens, model)
return response return response
def update_cost(self, prompt_tokens, completion_tokens, model): def update_cost(self, prompt_tokens, completion_tokens, model: str):
""" """
Update the total cost, prompt tokens, and completion tokens. Update the total cost, prompt tokens, and completion tokens.
@@ -80,6 +81,9 @@ class ApiManager(metaclass=Singleton):
completion_tokens (int): The number of tokens used in the completion. completion_tokens (int): The number of tokens used in the completion.
model (str): The model used for the API call. model (str): The model used for the API call.
""" """
# the .model property in API responses can contain version suffixes like -v2
model = model[:-3] if model.endswith("-v2") else model
self.total_prompt_tokens += prompt_tokens self.total_prompt_tokens += prompt_tokens
self.total_completion_tokens += completion_tokens self.total_completion_tokens += completion_tokens
self.total_cost += ( self.total_cost += (

View File

@@ -1,12 +1,28 @@
from __future__ import annotations
from dataclasses import dataclass, field from dataclasses import dataclass, field
from typing import List, TypedDict from math import ceil, floor
from typing import List, Literal, TypedDict
MessageRole = Literal["system", "user", "assistant"]
MessageType = Literal["ai_response", "action_result"]
class Message(TypedDict): class MessageDict(TypedDict):
role: MessageRole
content: str
@dataclass
class Message:
"""OpenAI Message object containing a role and the message content""" """OpenAI Message object containing a role and the message content"""
role: str role: MessageRole
content: str content: str
type: MessageType | None = None
def raw(self) -> MessageDict:
return {"role": self.role, "content": self.content}
@dataclass @dataclass
@@ -31,6 +47,13 @@ class ChatModelInfo(ModelInfo):
pass pass
@dataclass
class TextModelInfo(ModelInfo):
"""Struct for text completion model information."""
pass
@dataclass @dataclass
class EmbeddingModelInfo(ModelInfo): class EmbeddingModelInfo(ModelInfo):
"""Struct for embedding model information.""" """Struct for embedding model information."""
@@ -38,6 +61,73 @@ class EmbeddingModelInfo(ModelInfo):
embedding_dimensions: int embedding_dimensions: int
@dataclass
class ChatSequence:
"""Utility container for a chat sequence"""
model: ChatModelInfo
messages: list[Message] = field(default_factory=list)
def __getitem__(self, i: int):
return self.messages[i]
def __iter__(self):
return iter(self.messages)
def __len__(self):
return len(self.messages)
def append(self, message: Message):
return self.messages.append(message)
def extend(self, messages: list[Message] | ChatSequence):
return self.messages.extend(messages)
def insert(self, index: int, *messages: Message):
for message in reversed(messages):
self.messages.insert(index, message)
@classmethod
def for_model(cls, model_name: str, messages: list[Message] | ChatSequence = []):
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS
if not model_name in OPEN_AI_CHAT_MODELS:
raise ValueError(f"Unknown chat model '{model_name}'")
return ChatSequence(
model=OPEN_AI_CHAT_MODELS[model_name], messages=list(messages)
)
def add(self, message_role: MessageRole, content: str):
self.messages.append(Message(message_role, content))
@property
def token_length(self):
from autogpt.llm.utils import count_message_tokens
return count_message_tokens(self.messages, self.model.name)
def raw(self) -> list[MessageDict]:
return [m.raw() for m in self.messages]
def dump(self) -> str:
SEPARATOR_LENGTH = 42
def separator(text: str):
half_sep_len = (SEPARATOR_LENGTH - 2 - len(text)) / 2
return f"{floor(half_sep_len)*'-'} {text.upper()} {ceil(half_sep_len)*'-'}"
formatted_messages = "\n".join(
[f"{separator(m.role)}\n{m.content}" for m in self.messages]
)
return f"""
============== ChatSequence ==============
Length: {self.token_length} tokens; {len(self.messages)} messages
{formatted_messages}
==========================================
"""
@dataclass @dataclass
class LLMResponse: class LLMResponse:
"""Standard response struct for a response from an LLM model.""" """Standard response struct for a response from an LLM model."""

View File

@@ -1,260 +1,200 @@
from __future__ import annotations
import time import time
from random import shuffle from random import shuffle
from typing import TYPE_CHECKING
from openai.error import RateLimitError if TYPE_CHECKING:
from autogpt.agent.agent import Agent
from autogpt.config import Config from autogpt.config import Config
from autogpt.llm.api_manager import ApiManager from autogpt.llm.api_manager import ApiManager
from autogpt.llm.base import Message from autogpt.llm.base import ChatSequence, Message
from autogpt.llm.llm_utils import create_chat_completion from autogpt.llm.utils import count_message_tokens, create_chat_completion
from autogpt.llm.token_counter import count_message_tokens
from autogpt.log_cycle.log_cycle import CURRENT_CONTEXT_FILE_NAME from autogpt.log_cycle.log_cycle import CURRENT_CONTEXT_FILE_NAME
from autogpt.logs import logger from autogpt.logs import logger
from autogpt.memory.vector import MemoryItem, get_memory
cfg = Config() cfg = Config()
def create_chat_message(role, content) -> Message:
"""
Create a chat message with the given role and content.
Args:
role (str): The role of the message sender, e.g., "system", "user", or "assistant".
content (str): The content of the message.
Returns:
dict: A dictionary containing the role and content of the message.
"""
return {"role": role, "content": content}
def generate_context(prompt, relevant_memory, full_message_history, model):
current_context = [
create_chat_message("system", prompt),
create_chat_message(
"system", f"The current time and date is {time.strftime('%c')}"
),
# create_chat_message(
# "system",
# f"This reminds you of these events from your past:\n{relevant_memory}\n\n",
# ),
]
# Add messages from the full message history until we reach the token limit
next_message_to_add_index = len(full_message_history) - 1
insertion_index = len(current_context)
# Count the currently used tokens
current_tokens_used = count_message_tokens(current_context, model)
return (
next_message_to_add_index,
current_tokens_used,
insertion_index,
current_context,
)
# TODO: Change debug from hardcode to argument # TODO: Change debug from hardcode to argument
def chat_with_ai( def chat_with_ai(
agent, prompt, user_input, full_message_history, permanent_memory, token_limit agent: Agent,
system_prompt: str,
user_input: str,
token_limit: int,
): ):
"""Interact with the OpenAI API, sending the prompt, user input, message history, """
and permanent memory.""" Interact with the OpenAI API, sending the prompt, user input,
while True: message history, and permanent memory.
try:
"""
Interact with the OpenAI API, sending the prompt, user input,
message history, and permanent memory.
Args: Args:
prompt (str): The prompt explaining the rules to the AI. system_prompt (str): The prompt explaining the rules to the AI.
user_input (str): The input from the user. user_input (str): The input from the user.
full_message_history (list): The list of all messages sent between the token_limit (int): The maximum number of tokens allowed in the API call.
user and the AI.
permanent_memory (Obj): The memory object containing the permanent
memory.
token_limit (int): The maximum number of tokens allowed in the API call.
Returns: Returns:
str: The AI's response. str: The AI's response.
""" """
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
# Reserve 1000 tokens for the response # Reserve 1000 tokens for the response
logger.debug(f"Token limit: {token_limit}") logger.debug(f"Token limit: {token_limit}")
send_token_limit = token_limit - 1000 send_token_limit = token_limit - 1000
# if len(full_message_history) == 0: # if len(agent.history) == 0:
# relevant_memory = "" # relevant_memory = ""
# else: # else:
# recent_history = full_message_history[-5:] # recent_history = agent.history[-5:]
# shuffle(recent_history) # shuffle(recent_history)
# relevant_memories = permanent_memory.get_relevant( # relevant_memories = agent.memory.get_relevant(
# str(recent_history), 5 # str(recent_history), 5
# ) # )
# if relevant_memories: # if relevant_memories:
# shuffle(relevant_memories) # shuffle(relevant_memories)
# relevant_memory = str(relevant_memories) # relevant_memory = str(relevant_memories)
relevant_memory = "" # logger.debug(f"Memory Stats: {agent.memory.get_stats()}")
logger.debug(f"Memory Stats: {permanent_memory.get_stats()}") relevant_memory = []
( message_sequence = ChatSequence.for_model(
next_message_to_add_index, model,
current_tokens_used, [
insertion_index, Message("system", system_prompt),
current_context, Message("system", f"The current time and date is {time.strftime('%c')}"),
) = generate_context(prompt, relevant_memory, full_message_history, model) # Message(
# "system",
# f"This reminds you of these events from your past:\n{relevant_memory}\n\n",
# ),
],
)
# while current_tokens_used > 2500: # Add messages from the full message history until we reach the token limit
# # remove memories until we are under 2500 tokens next_message_to_add_index = len(agent.history) - 1
# relevant_memory = relevant_memory[:-1] insertion_index = len(message_sequence)
# ( # Count the currently used tokens
# next_message_to_add_index, current_tokens_used = message_sequence.token_length
# current_tokens_used,
# insertion_index,
# current_context,
# ) = generate_context(
# prompt, relevant_memory, full_message_history, model
# )
current_tokens_used += count_message_tokens( # while current_tokens_used > 2500:
[create_chat_message("user", user_input)], model # # remove memories until we are under 2500 tokens
) # Account for user input (appended later) # relevant_memory = relevant_memory[:-1]
# (
# next_message_to_add_index,
# current_tokens_used,
# insertion_index,
# current_context,
# ) = generate_context(
# prompt, relevant_memory, agent.history, model
# )
current_tokens_used += 500 # Account for memory (appended later) TODO: The final memory may be less than 500 tokens # Account for user input (appended later)
user_input_msg = Message("user", user_input)
current_tokens_used += count_message_tokens([user_input_msg], model)
# Add Messages until the token limit is reached or there are no more messages to add. current_tokens_used += 500 # Reserve space for new_summary_message
while next_message_to_add_index >= 0:
# print (f"CURRENT TOKENS USED: {current_tokens_used}")
message_to_add = full_message_history[next_message_to_add_index]
tokens_to_add = count_message_tokens([message_to_add], model) # Add Messages until the token limit is reached or there are no more messages to add.
if current_tokens_used + tokens_to_add > send_token_limit: for cycle in reversed(list(agent.history.per_cycle())):
# save_memory_trimmed_from_context_window( messages_to_add = [msg for msg in cycle if msg is not None]
# full_message_history, tokens_to_add = count_message_tokens(messages_to_add, model)
# next_message_to_add_index, if current_tokens_used + tokens_to_add > send_token_limit:
# permanent_memory, break
# )
break
# Add the most recent message to the start of the current context, # Add the most recent message to the start of the chain,
# after the two system prompts. # after the system prompts.
current_context.insert( message_sequence.insert(insertion_index, *messages_to_add)
insertion_index, full_message_history[next_message_to_add_index] current_tokens_used += tokens_to_add
)
# Count the currently used tokens # Update & add summary of trimmed messages
current_tokens_used += tokens_to_add if len(agent.history) > 0:
new_summary_message, trimmed_messages = agent.history.trim_messages(
current_message_chain=list(message_sequence),
)
tokens_to_add = count_message_tokens([new_summary_message], model)
message_sequence.insert(insertion_index, new_summary_message)
current_tokens_used += tokens_to_add - 500
# Move to the next most recent message in the full message history # FIXME: uncomment when memory is back in use
next_message_to_add_index -= 1 # memory_store = get_memory(cfg)
from autogpt.memory_management.summary_memory import ( # for _, ai_msg, result_msg in agent.history.per_cycle(trimmed_messages):
get_newly_trimmed_messages, # memory_to_add = MemoryItem.from_ai_action(ai_msg, result_msg)
update_running_summary, # logger.debug(f"Storing the following memory:\n{memory_to_add.dump()}")
) # memory_store.add(memory_to_add)
# Insert Memories api_manager = ApiManager()
if len(full_message_history) > 0: # inform the AI about its remaining budget (if it has one)
( if api_manager.get_total_budget() > 0.0:
newly_trimmed_messages, remaining_budget = api_manager.get_total_budget() - api_manager.get_total_cost()
agent.last_memory_index, if remaining_budget < 0:
) = get_newly_trimmed_messages( remaining_budget = 0
full_message_history=full_message_history, budget_message = f"Your remaining API budget is ${remaining_budget:.3f}" + (
current_context=current_context, " BUDGET EXCEEDED! SHUT DOWN!\n\n"
last_memory_index=agent.last_memory_index, if remaining_budget == 0
) else " Budget very nearly exceeded! Shut down gracefully!\n\n"
if remaining_budget < 0.005
else " Budget nearly exceeded. Finish up.\n\n"
if remaining_budget < 0.01
else "\n\n"
)
logger.debug(budget_message)
message_sequence.add("system", budget_message)
current_tokens_used += count_message_tokens([message_sequence[-1]], model)
agent.summary_memory = update_running_summary( # Append user input, the length of this is accounted for above
agent, message_sequence.append(user_input_msg)
current_memory=agent.summary_memory,
new_events=newly_trimmed_messages,
)
current_context.insert(insertion_index, agent.summary_memory)
api_manager = ApiManager() plugin_count = len(cfg.plugins)
# inform the AI about its remaining budget (if it has one) for i, plugin in enumerate(cfg.plugins):
if api_manager.get_total_budget() > 0.0: if not plugin.can_handle_on_planning():
remaining_budget = ( continue
api_manager.get_total_budget() - api_manager.get_total_cost() plugin_response = plugin.on_planning(
) agent.config.prompt_generator, message_sequence.raw()
if remaining_budget < 0: )
remaining_budget = 0 if not plugin_response or plugin_response == "":
system_message = ( continue
f"Your remaining API budget is ${remaining_budget:.3f}" tokens_to_add = count_message_tokens(
+ ( [Message("system", plugin_response)], model
" BUDGET EXCEEDED! SHUT DOWN!\n\n" )
if remaining_budget == 0 if current_tokens_used + tokens_to_add > send_token_limit:
else " Budget very nearly exceeded! Shut down gracefully!\n\n" logger.debug(f"Plugin response too long, skipping: {plugin_response}")
if remaining_budget < 0.005 logger.debug(f"Plugins remaining at stop: {plugin_count - i}")
else " Budget nearly exceeded. Finish up.\n\n" break
if remaining_budget < 0.01 message_sequence.add("system", plugin_response)
else "\n\n"
)
)
logger.debug(system_message)
current_context.append(create_chat_message("system", system_message))
# Append user input, the length of this is accounted for above # Calculate remaining tokens
current_context.extend([create_chat_message("user", user_input)]) tokens_remaining = token_limit - current_tokens_used
# assert tokens_remaining >= 0, "Tokens remaining is negative.
# This should never happen, please submit a bug report at
# https://www.github.com/Torantulino/Auto-GPT"
plugin_count = len(cfg.plugins) # Debug print the current context
for i, plugin in enumerate(cfg.plugins): logger.debug(f"Token limit: {token_limit}")
if not plugin.can_handle_on_planning(): logger.debug(f"Send Token Count: {current_tokens_used}")
continue logger.debug(f"Tokens remaining for response: {tokens_remaining}")
plugin_response = plugin.on_planning( logger.debug("------------ CONTEXT SENT TO AI ---------------")
agent.config.prompt_generator, current_context for message in message_sequence:
) # Skip printing the prompt
if not plugin_response or plugin_response == "": if message.role == "system" and message.content == system_prompt:
continue continue
tokens_to_add = count_message_tokens( logger.debug(f"{message.role.capitalize()}: {message.content}")
[create_chat_message("system", plugin_response)], model logger.debug("")
) logger.debug("----------- END OF CONTEXT ----------------")
if current_tokens_used + tokens_to_add > send_token_limit: agent.log_cycle_handler.log_cycle(
logger.debug("Plugin response too long, skipping:", plugin_response) agent.config.ai_name,
logger.debug("Plugins remaining at stop:", plugin_count - i) agent.created_at,
break agent.cycle_count,
current_context.append(create_chat_message("system", plugin_response)) message_sequence.raw(),
CURRENT_CONTEXT_FILE_NAME,
)
# Calculate remaining tokens # TODO: use a model defined elsewhere, so that model can contain
tokens_remaining = token_limit - current_tokens_used # temperature and other settings we care about
# assert tokens_remaining >= 0, "Tokens remaining is negative. assistant_reply = create_chat_completion(
# This should never happen, please submit a bug report at prompt=message_sequence,
# https://www.github.com/Torantulino/Auto-GPT" max_tokens=tokens_remaining,
)
# Debug print the current context # Update full message history
logger.debug(f"Token limit: {token_limit}") agent.history.append(user_input_msg)
logger.debug(f"Send Token Count: {current_tokens_used}") agent.history.add("assistant", assistant_reply, "ai_response")
logger.debug(f"Tokens remaining for response: {tokens_remaining}")
logger.debug("------------ CONTEXT SENT TO AI ---------------")
for message in current_context:
# Skip printing the prompt
if message["role"] == "system" and message["content"] == prompt:
continue
logger.debug(f"{message['role'].capitalize()}: {message['content']}")
logger.debug("")
logger.debug("----------- END OF CONTEXT ----------------")
agent.log_cycle_handler.log_cycle(
agent.config.ai_name,
agent.created_at,
agent.cycle_count,
current_context,
CURRENT_CONTEXT_FILE_NAME,
)
# TODO: use a model defined elsewhere, so that model can contain return assistant_reply
# temperature and other settings we care about
assistant_reply = create_chat_completion(
model=model,
messages=current_context,
max_tokens=tokens_remaining,
)
# Update full message history
full_message_history.append(create_chat_message("user", user_input))
full_message_history.append(
create_chat_message("assistant", assistant_reply)
)
return assistant_reply
except RateLimitError:
# TODO: When we switch to langchain, this is built in
logger.warn("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
time.sleep(10)

View File

@@ -7,4 +7,5 @@ COSTS = {
"gpt-4-32k": {"prompt": 0.06, "completion": 0.12}, "gpt-4-32k": {"prompt": 0.06, "completion": 0.12},
"gpt-4-32k-0314": {"prompt": 0.06, "completion": 0.12}, "gpt-4-32k-0314": {"prompt": 0.06, "completion": 0.12},
"text-embedding-ada-002": {"prompt": 0.0004, "completion": 0.0}, "text-embedding-ada-002": {"prompt": 0.0004, "completion": 0.0},
"text-davinci-003": {"prompt": 0.02, "completion": 0.02},
} }

View File

@@ -1,37 +1,74 @@
from autogpt.llm.base import ChatModelInfo, EmbeddingModelInfo from autogpt.llm.base import ChatModelInfo, EmbeddingModelInfo, TextModelInfo
OPEN_AI_CHAT_MODELS = { OPEN_AI_CHAT_MODELS = {
"gpt-3.5-turbo": ChatModelInfo( info.name: info
name="gpt-3.5-turbo", for info in [
prompt_token_cost=0.002, ChatModelInfo(
completion_token_cost=0.002, name="gpt-3.5-turbo",
max_tokens=4096, prompt_token_cost=0.002,
), completion_token_cost=0.002,
"gpt-4": ChatModelInfo( max_tokens=4096,
name="gpt-4", ),
prompt_token_cost=0.03, ChatModelInfo(
completion_token_cost=0.06, name="gpt-3.5-turbo-0301",
max_tokens=8192, prompt_token_cost=0.002,
), completion_token_cost=0.002,
"gpt-4-32k": ChatModelInfo( max_tokens=4096,
name="gpt-4-32k", ),
prompt_token_cost=0.06, ChatModelInfo(
completion_token_cost=0.12, name="gpt-4",
max_tokens=32768, prompt_token_cost=0.03,
), completion_token_cost=0.06,
max_tokens=8192,
),
ChatModelInfo(
name="gpt-4-0314",
prompt_token_cost=0.03,
completion_token_cost=0.06,
max_tokens=8192,
),
ChatModelInfo(
name="gpt-4-32k",
prompt_token_cost=0.06,
completion_token_cost=0.12,
max_tokens=32768,
),
ChatModelInfo(
name="gpt-4-32k-0314",
prompt_token_cost=0.06,
completion_token_cost=0.12,
max_tokens=32768,
),
]
}
OPEN_AI_TEXT_MODELS = {
info.name: info
for info in [
TextModelInfo(
name="text-davinci-003",
prompt_token_cost=0.02,
completion_token_cost=0.02,
max_tokens=4097,
),
]
} }
OPEN_AI_EMBEDDING_MODELS = { OPEN_AI_EMBEDDING_MODELS = {
"text-embedding-ada-002": EmbeddingModelInfo( info.name: info
name="text-embedding-ada-002", for info in [
prompt_token_cost=0.0004, EmbeddingModelInfo(
completion_token_cost=0.0, name="text-embedding-ada-002",
max_tokens=8191, prompt_token_cost=0.0004,
embedding_dimensions=1536, completion_token_cost=0.0,
), max_tokens=8191,
embedding_dimensions=1536,
),
]
} }
OPEN_AI_MODELS = { OPEN_AI_MODELS: dict[str, ChatModelInfo | EmbeddingModelInfo | TextModelInfo] = {
**OPEN_AI_CHAT_MODELS, **OPEN_AI_CHAT_MODELS,
**OPEN_AI_TEXT_MODELS,
**OPEN_AI_EMBEDDING_MODELS, **OPEN_AI_EMBEDDING_MODELS,
} }

View File

@@ -2,20 +2,58 @@ from __future__ import annotations
import functools import functools
import time import time
from itertools import islice
from typing import List, Literal, Optional from typing import List, Literal, Optional
from unittest.mock import patch
import numpy as np
import openai import openai
import tiktoken import openai.api_resources.abstract.engine_api_resource as engine_api_resource
import openai.util
from colorama import Fore, Style from colorama import Fore, Style
from openai.error import APIError, RateLimitError, Timeout from openai.error import APIError, RateLimitError
from openai.openai_object import OpenAIObject
from autogpt.config import Config from autogpt.config import Config
from autogpt.llm.api_manager import ApiManager
from autogpt.llm.base import Message
from autogpt.logs import logger from autogpt.logs import logger
from ..api_manager import ApiManager
from ..base import ChatSequence, Message
from .token_counter import *
def metered(func):
"""Adds ApiManager metering to functions which make OpenAI API calls"""
api_manager = ApiManager()
openai_obj_processor = openai.util.convert_to_openai_object
def update_usage_with_response(response: OpenAIObject):
try:
usage = response.usage
logger.debug(f"Reported usage from call to model {response.model}: {usage}")
api_manager.update_cost(
response.usage.prompt_tokens,
response.usage.completion_tokens if "completion_tokens" in usage else 0,
response.model,
)
except Exception as err:
logger.warn(f"Failed to update API costs: {err.__class__.__name__}: {err}")
def metering_wrapper(*args, **kwargs):
openai_obj = openai_obj_processor(*args, **kwargs)
if isinstance(openai_obj, OpenAIObject) and "usage" in openai_obj:
update_usage_with_response(openai_obj)
return openai_obj
def metered_func(*args, **kwargs):
with patch.object(
engine_api_resource.util,
"convert_to_openai_object",
side_effect=metering_wrapper,
):
return func(*args, **kwargs)
return metered_func
def retry_openai_api( def retry_openai_api(
num_retries: int = 10, num_retries: int = 10,
@@ -93,23 +131,57 @@ def call_ai_function(
# For each arg, if any are None, convert to "None": # For each arg, if any are None, convert to "None":
args = [str(arg) if arg is not None else "None" for arg in args] args = [str(arg) if arg is not None else "None" for arg in args]
# parse args to comma separated string # parse args to comma separated string
args: str = ", ".join(args) arg_str: str = ", ".join(args)
messages: List[Message] = [
{
"role": "system",
"content": f"You are now the following python function: ```# {description}"
f"\n{function}```\n\nOnly respond with your `return` value.",
},
{"role": "user", "content": args},
]
return create_chat_completion(model=model, messages=messages, temperature=0) prompt = ChatSequence.for_model(
model,
[
Message(
"system",
f"You are now the following python function: ```# {description}"
f"\n{function}```\n\nOnly respond with your `return` value.",
),
Message("user", arg_str),
],
)
return create_chat_completion(prompt=prompt, temperature=0)
@metered
@retry_openai_api()
def create_text_completion(
prompt: str,
model: Optional[str],
temperature: Optional[float],
max_output_tokens: Optional[int],
) -> str:
cfg = Config()
if model is None:
model = cfg.fast_llm_model
if temperature is None:
temperature = cfg.temperature
if cfg.use_azure:
kwargs = {"deployment_id": cfg.get_azure_deployment_id_for_model(model)}
else:
kwargs = {"model": model}
response = openai.Completion.create(
**kwargs,
prompt=prompt,
temperature=temperature,
max_tokens=max_output_tokens,
api_key=cfg.openai_api_key,
)
return response.choices[0].text
# Overly simple abstraction until we create something better # Overly simple abstraction until we create something better
# simple retry mechanism when getting a rate error or a bad gateway # simple retry mechanism when getting a rate error or a bad gateway
@metered
@retry_openai_api()
def create_chat_completion( def create_chat_completion(
messages: List[Message], # type: ignore prompt: ChatSequence,
model: Optional[str] = None, model: Optional[str] = None,
temperature: float = None, temperature: float = None,
max_tokens: Optional[int] = None, max_tokens: Optional[int] = None,
@@ -126,23 +198,23 @@ def create_chat_completion(
str: The response from the chat completion str: The response from the chat completion
""" """
cfg = Config() cfg = Config()
if model is None:
model = prompt.model.name
if temperature is None: if temperature is None:
temperature = cfg.temperature temperature = cfg.temperature
num_retries = 10
warned_user = False
logger.debug( logger.debug(
f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}" f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}"
) )
for plugin in cfg.plugins: for plugin in cfg.plugins:
if plugin.can_handle_chat_completion( if plugin.can_handle_chat_completion(
messages=messages, messages=prompt.raw(),
model=model, model=model,
temperature=temperature, temperature=temperature,
max_tokens=max_tokens, max_tokens=max_tokens,
): ):
message = plugin.handle_chat_completion( message = plugin.handle_chat_completion(
messages=messages, messages=prompt.raw(),
model=model, model=model,
temperature=temperature, temperature=temperature,
max_tokens=max_tokens, max_tokens=max_tokens,
@@ -151,57 +223,19 @@ def create_chat_completion(
return message return message
api_manager = ApiManager() api_manager = ApiManager()
response = None response = None
for attempt in range(num_retries):
backoff = 2 ** (attempt + 2) if cfg.use_azure:
try: kwargs = {"deployment_id": cfg.get_azure_deployment_id_for_model(model)}
if cfg.use_azure: else:
response = api_manager.create_chat_completion( kwargs = {"model": model}
deployment_id=cfg.get_azure_deployment_id_for_model(model),
model=model, response = api_manager.create_chat_completion(
messages=messages, **kwargs,
temperature=temperature, messages=prompt.raw(),
max_tokens=max_tokens, temperature=temperature,
) max_tokens=max_tokens,
else: )
response = api_manager.create_chat_completion(
model=model,
messages=messages,
temperature=temperature,
max_tokens=max_tokens,
)
break
except RateLimitError:
logger.debug(
f"{Fore.RED}Error: ", f"Reached rate limit, passing...{Fore.RESET}"
)
if not warned_user:
logger.double_check(
f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. "
+ f"You can read more here: {Fore.CYAN}https://docs.agpt.co/setup/#getting-an-api-key{Fore.RESET}"
)
warned_user = True
except (APIError, Timeout) as e:
if e.http_status != 502:
raise
if attempt == num_retries - 1:
raise
logger.debug(
f"{Fore.RED}Error: ",
f"API Bad gateway. Waiting {backoff} seconds...{Fore.RESET}",
)
time.sleep(backoff)
if response is None:
logger.typewriter_log(
"FAILED TO GET RESPONSE FROM OPENAI",
Fore.RED,
"Auto-GPT has failed to get a response from OpenAI's services. "
+ f"Try running Auto-GPT again, and if the problem the persists try running it with `{Fore.CYAN}--debug{Fore.RESET}`.",
)
logger.double_check()
if cfg.debug_mode:
raise RuntimeError(f"Failed to get response after {num_retries} retries")
else:
quit(1)
resp = response.choices[0].message["content"] resp = response.choices[0].message["content"]
for plugin in cfg.plugins: for plugin in cfg.plugins:
if not plugin.can_handle_on_response(): if not plugin.can_handle_on_response():
@@ -210,91 +244,6 @@ def create_chat_completion(
return resp return resp
def batched(iterable, n):
"""Batch data into tuples of length n. The last batch may be shorter."""
# batched('ABCDEFG', 3) --> ABC DEF G
if n < 1:
raise ValueError("n must be at least one")
it = iter(iterable)
while batch := tuple(islice(it, n)):
yield batch
def chunked_tokens(text, tokenizer_name, chunk_length):
tokenizer = tiktoken.get_encoding(tokenizer_name)
tokens = tokenizer.encode(text)
chunks_iterator = batched(tokens, chunk_length)
yield from chunks_iterator
def get_ada_embedding(text: str) -> List[float]:
"""Get an embedding from the ada model.
Args:
text (str): The text to embed.
Returns:
List[float]: The embedding.
"""
cfg = Config()
model = cfg.embedding_model
text = text.replace("\n", " ")
if cfg.use_azure:
kwargs = {"engine": cfg.get_azure_deployment_id_for_model(model)}
else:
kwargs = {"model": model}
embedding = create_embedding(text, **kwargs)
return embedding
@retry_openai_api()
def create_embedding(
text: str,
*_,
**kwargs,
) -> openai.Embedding:
"""Create an embedding using the OpenAI API
Args:
text (str): The text to embed.
kwargs: Other arguments to pass to the OpenAI API embedding creation call.
Returns:
openai.Embedding: The embedding object.
"""
cfg = Config()
chunk_embeddings = []
chunk_lengths = []
for chunk in chunked_tokens(
text,
tokenizer_name=cfg.embedding_tokenizer,
chunk_length=cfg.embedding_token_limit,
):
embedding = openai.Embedding.create(
input=[chunk],
api_key=cfg.openai_api_key,
**kwargs,
)
api_manager = ApiManager()
api_manager.update_cost(
prompt_tokens=embedding.usage.prompt_tokens,
completion_tokens=0,
model=cfg.embedding_model,
)
chunk_embeddings.append(embedding["data"][0]["embedding"])
chunk_lengths.append(len(chunk))
# do weighted avg
chunk_embeddings = np.average(chunk_embeddings, axis=0, weights=chunk_lengths)
chunk_embeddings = chunk_embeddings / np.linalg.norm(
chunk_embeddings
) # normalize the length to one
chunk_embeddings = chunk_embeddings.tolist()
return chunk_embeddings
def check_model( def check_model(
model_name: str, model_type: Literal["smart_llm_model", "fast_llm_model"] model_name: str, model_type: Literal["smart_llm_model", "fast_llm_model"]
) -> str: ) -> str:

View File

@@ -53,7 +53,7 @@ def count_message_tokens(
num_tokens = 0 num_tokens = 0
for message in messages: for message in messages:
num_tokens += tokens_per_message num_tokens += tokens_per_message
for key, value in message.items(): for key, value in message.raw().items():
num_tokens += len(encoding.encode(value)) num_tokens += len(encoding.encode(value))
if key == "name": if key == "name":
num_tokens += tokens_per_name num_tokens += tokens_per_name

View File

@@ -5,12 +5,12 @@ from pathlib import Path
from colorama import Fore, Style from colorama import Fore, Style
from autogpt.agent.agent import Agent from autogpt.agent import Agent
from autogpt.commands.command import CommandRegistry from autogpt.commands.command import CommandRegistry
from autogpt.config import Config, check_openai_api_key from autogpt.config import Config, check_openai_api_key
from autogpt.configurator import create_config from autogpt.configurator import create_config
from autogpt.logs import logger from autogpt.logs import logger
from autogpt.memory import get_memory from autogpt.memory.vector import get_memory
from autogpt.plugins import scan_plugins from autogpt.plugins import scan_plugins
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT, construct_main_ai_config from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT, construct_main_ai_config
from autogpt.utils import ( from autogpt.utils import (
@@ -160,7 +160,6 @@ def run_auto_gpt(
ai_name = ai_config.ai_name ai_name = ai_config.ai_name
# print(prompt) # print(prompt)
# Initialize variables # Initialize variables
full_message_history = []
next_action_count = 0 next_action_count = 0
# add chat plugins capable of report to logger # add chat plugins capable of report to logger
@@ -184,7 +183,6 @@ def run_auto_gpt(
agent = Agent( agent = Agent(
ai_name=ai_name, ai_name=ai_name,
memory=memory, memory=memory,
full_message_history=full_message_history,
next_action_count=next_action_count, next_action_count=next_action_count,
command_registry=command_registry, command_registry=command_registry,
config=ai_config, config=ai_config,

View File

@@ -1,96 +0,0 @@
from autogpt.logs import logger
from autogpt.memory.local import LocalCache
from autogpt.memory.no_memory import NoMemory
# List of supported memory backends
# Add a backend to this list if the import attempt is successful
supported_memory = ["local", "no_memory"]
try:
from autogpt.memory.redismem import RedisMemory
supported_memory.append("redis")
except ImportError:
RedisMemory = None
try:
from autogpt.memory.pinecone import PineconeMemory
supported_memory.append("pinecone")
except ImportError:
PineconeMemory = None
try:
from autogpt.memory.weaviate import WeaviateMemory
supported_memory.append("weaviate")
except ImportError:
WeaviateMemory = None
try:
from autogpt.memory.milvus import MilvusMemory
supported_memory.append("milvus")
except ImportError:
MilvusMemory = None
def get_memory(cfg, init=False):
memory = None
if cfg.memory_backend == "pinecone":
if not PineconeMemory:
logger.warn(
"Error: Pinecone is not installed. Please install pinecone"
" to use Pinecone as a memory backend."
)
else:
memory = PineconeMemory(cfg)
if init:
memory.clear()
elif cfg.memory_backend == "redis":
if not RedisMemory:
logger.warn(
"Error: Redis is not installed. Please install redis-py to"
" use Redis as a memory backend."
)
else:
memory = RedisMemory(cfg)
elif cfg.memory_backend == "weaviate":
if not WeaviateMemory:
logger.warn(
"Error: Weaviate is not installed. Please install weaviate-client to"
" use Weaviate as a memory backend."
)
else:
memory = WeaviateMemory(cfg)
elif cfg.memory_backend == "milvus":
if not MilvusMemory:
logger.warn(
"Error: pymilvus sdk is not installed."
"Please install pymilvus to use Milvus or Zilliz Cloud as memory backend."
)
else:
memory = MilvusMemory(cfg)
elif cfg.memory_backend == "no_memory":
memory = NoMemory(cfg)
if memory is None:
memory = LocalCache(cfg)
if init:
memory.clear()
return memory
def get_supported_memory_backends():
return supported_memory
__all__ = [
"get_memory",
"LocalCache",
"RedisMemory",
"PineconeMemory",
"NoMemory",
"MilvusMemory",
"WeaviateMemory",
]

View File

@@ -1,31 +0,0 @@
"""Base class for memory providers."""
import abc
from autogpt.singleton import AbstractSingleton
class MemoryProviderSingleton(AbstractSingleton):
@abc.abstractmethod
def add(self, data):
"""Adds to memory"""
pass
@abc.abstractmethod
def get(self, data):
"""Gets from memory"""
pass
@abc.abstractmethod
def clear(self):
"""Clears memory"""
pass
@abc.abstractmethod
def get_relevant(self, data, num_relevant=5):
"""Gets relevant memory for"""
pass
@abc.abstractmethod
def get_stats(self):
"""Get stats from memory"""
pass

View File

@@ -1,126 +0,0 @@
from __future__ import annotations
import dataclasses
from pathlib import Path
from typing import Any, List
import numpy as np
import orjson
from autogpt.llm import get_ada_embedding
from autogpt.memory.base import MemoryProviderSingleton
EMBED_DIM = 1536
SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS
def create_default_embeddings():
return np.zeros((0, EMBED_DIM)).astype(np.float32)
@dataclasses.dataclass
class CacheContent:
texts: List[str] = dataclasses.field(default_factory=list)
embeddings: np.ndarray = dataclasses.field(
default_factory=create_default_embeddings
)
class LocalCache(MemoryProviderSingleton):
"""A class that stores the memory in a local file"""
def __init__(self, cfg) -> None:
"""Initialize a class instance
Args:
cfg: Config object
Returns:
None
"""
workspace_path = Path(cfg.workspace_path)
self.filename = workspace_path / f"{cfg.memory_index}.json"
self.filename.touch(exist_ok=True)
file_content = b"{}"
with self.filename.open("w+b") as f:
f.write(file_content)
self.data = CacheContent()
def add(self, text: str):
"""
Add text to our list of texts, add embedding as row to our
embeddings-matrix
Args:
text: str
Returns: None
"""
if "Command Error:" in text:
return ""
self.data.texts.append(text)
embedding = get_ada_embedding(text)
vector = np.array(embedding).astype(np.float32)
vector = vector[np.newaxis, :]
self.data.embeddings = np.concatenate(
[
self.data.embeddings,
vector,
],
axis=0,
)
with open(self.filename, "wb") as f:
out = orjson.dumps(self.data, option=SAVE_OPTIONS)
f.write(out)
return text
def clear(self) -> str:
"""
Clears the data in memory.
Returns: A message indicating that the memory has been cleared.
"""
self.data = CacheContent()
return "Obliviated"
def get(self, data: str) -> list[Any] | None:
"""
Gets the data from the memory that is most relevant to the given data.
Args:
data: The data to compare to.
Returns: The most relevant data.
"""
return self.get_relevant(data, 1)
def get_relevant(self, text: str, k: int) -> list[Any]:
""" "
matrix-vector mult to find score-for-each-row-of-matrix
get indices for top-k winning scores
return texts for those indices
Args:
text: str
k: int
Returns: List[str]
"""
embedding = get_ada_embedding(text)
scores = np.dot(self.data.embeddings, embedding)
top_k_indices = np.argsort(scores)[-k:][::-1]
return [self.data.texts[i] for i in top_k_indices]
def get_stats(self) -> tuple[int, tuple[int, ...]]:
"""
Returns: The stats of the local cache.
"""
return len(self.data.texts), self.data.embeddings.shape

View File

@@ -0,0 +1,204 @@
from __future__ import annotations
import copy
import json
from dataclasses import dataclass, field
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from autogpt.agent import Agent
from autogpt.config import Config
from autogpt.json_utils.utilities import (
LLM_DEFAULT_RESPONSE_FORMAT,
is_string_valid_json,
)
from autogpt.llm.base import ChatSequence, Message, MessageRole, MessageType
from autogpt.llm.utils import create_chat_completion
from autogpt.log_cycle.log_cycle import PROMPT_SUMMARY_FILE_NAME, SUMMARY_FILE_NAME
from autogpt.logs import logger
@dataclass
class MessageHistory:
agent: Agent
messages: list[Message] = field(default_factory=list)
summary: str = "I was created"
last_trimmed_index: int = 0
def __getitem__(self, i: int):
return self.messages[i]
def __iter__(self):
return iter(self.messages)
def __len__(self):
return len(self.messages)
def add(
self,
role: MessageRole,
content: str,
type: MessageType | None = None,
):
return self.append(Message(role, content, type))
def append(self, message: Message):
return self.messages.append(message)
def trim_messages(
self,
current_message_chain: list[Message],
) -> tuple[Message, list[Message]]:
"""
Returns a list of trimmed messages: messages which are in the message history
but not in current_message_chain.
Args:
current_message_chain (list[Message]): The messages currently in the context.
Returns:
Message: A message with the new running summary after adding the trimmed messages.
list[Message]: A list of messages that are in full_message_history with an index higher than last_trimmed_index and absent from current_message_chain.
"""
# Select messages in full_message_history with an index higher than last_trimmed_index
new_messages = [
msg for i, msg in enumerate(self) if i > self.last_trimmed_index
]
# Remove messages that are already present in current_message_chain
new_messages_not_in_chain = [
msg for msg in new_messages if msg not in current_message_chain
]
if not new_messages_not_in_chain:
return self.summary_message(), []
new_summary_message = self.update_running_summary(
new_events=new_messages_not_in_chain
)
# Find the index of the last message processed
last_message = new_messages_not_in_chain[-1]
self.last_trimmed_index = self.messages.index(last_message)
return new_summary_message, new_messages_not_in_chain
def per_cycle(self, messages: list[Message] | None = None):
"""
Yields:
Message: a message containing user input
Message: a message from the AI containing a proposed action
Message: the message containing the result of the AI's proposed action
"""
messages = messages or self.messages
for i in range(0, len(messages) - 1):
ai_message = messages[i]
if ai_message.type != "ai_response":
continue
user_message = (
messages[i - 1] if i > 0 and messages[i - 1].role == "user" else None
)
result_message = messages[i + 1]
try:
assert is_string_valid_json(
ai_message.content, LLM_DEFAULT_RESPONSE_FORMAT
), "AI response is not a valid JSON object"
assert result_message.type == "action_result"
yield user_message, ai_message, result_message
except AssertionError as err:
logger.debug(
f"Invalid item in message history: {err}; Messages: {messages[i-1:i+2]}"
)
def summary_message(self) -> Message:
return Message(
"system",
f"This reminds you of these events from your past: \n{self.summary}",
)
def update_running_summary(self, new_events: list[Message]) -> Message:
"""
This function takes a list of dictionaries representing new events and combines them with the current summary,
focusing on key and potentially important information to remember. The updated summary is returned in a message
formatted in the 1st person past tense.
Args:
new_events (List[Dict]): A list of dictionaries containing the latest events to be added to the summary.
Returns:
str: A message containing the updated summary of actions, formatted in the 1st person past tense.
Example:
new_events = [{"event": "entered the kitchen."}, {"event": "found a scrawled note with the number 7"}]
update_running_summary(new_events)
# Returns: "This reminds you of these events from your past: \nI entered the kitchen and found a scrawled note saying 7."
"""
cfg = Config()
if not new_events:
return self.summary_message()
# Create a copy of the new_events list to prevent modifying the original list
new_events = copy.deepcopy(new_events)
# Replace "assistant" with "you". This produces much better first person past tense results.
for event in new_events:
if event.role.lower() == "assistant":
event.role = "you"
# Remove "thoughts" dictionary from "content"
try:
content_dict = json.loads(event.content)
if "thoughts" in content_dict:
del content_dict["thoughts"]
event.content = json.dumps(content_dict)
except json.decoder.JSONDecodeError:
if cfg.debug_mode:
logger.error(f"Error: Invalid JSON: {event.content}\n")
elif event.role.lower() == "system":
event.role = "your computer"
# Delete all user messages
elif event.role == "user":
new_events.remove(event)
prompt = f'''Your task is to create a concise running summary of actions and information results in the provided text, focusing on key and potentially important information to remember.
You will receive the current summary and the your latest actions. Combine them, adding relevant key information from the latest development in 1st person past tense and keeping the summary concise.
Summary So Far:
"""
{self.summary}
"""
Latest Development:
"""
{new_events or "Nothing new happened."}
"""
'''
prompt = ChatSequence.for_model(cfg.fast_llm_model, [Message("user", prompt)])
self.agent.log_cycle_handler.log_cycle(
self.agent.config.ai_name,
self.agent.created_at,
self.agent.cycle_count,
prompt.raw(),
PROMPT_SUMMARY_FILE_NAME,
)
self.summary = create_chat_completion(prompt)
self.agent.log_cycle_handler.log_cycle(
self.agent.config.ai_name,
self.agent.created_at,
self.agent.cycle_count,
self.summary,
SUMMARY_FILE_NAME,
)
return self.summary_message()

View File

@@ -1,162 +0,0 @@
""" Milvus memory storage provider."""
import re
from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections
from autogpt.config import Config
from autogpt.llm import get_ada_embedding
from autogpt.memory.base import MemoryProviderSingleton
class MilvusMemory(MemoryProviderSingleton):
"""Milvus memory storage provider."""
def __init__(self, cfg: Config) -> None:
"""Construct a milvus memory storage connection.
Args:
cfg (Config): Auto-GPT global config.
"""
self.configure(cfg)
connect_kwargs = {}
if self.username:
connect_kwargs["user"] = self.username
connect_kwargs["password"] = self.password
connections.connect(
**connect_kwargs,
uri=self.uri or "",
address=self.address or "",
secure=self.secure,
)
self.init_collection()
def configure(self, cfg: Config) -> None:
# init with configuration.
self.uri = None
self.address = cfg.milvus_addr
self.secure = cfg.milvus_secure
self.username = cfg.milvus_username
self.password = cfg.milvus_password
self.collection_name = cfg.milvus_collection
# use HNSW by default.
self.index_params = {
"metric_type": "IP",
"index_type": "HNSW",
"params": {"M": 8, "efConstruction": 64},
}
if (self.username is None) != (self.password is None):
raise ValueError(
"Both username and password must be set to use authentication for Milvus"
)
# configured address may be a full URL.
if re.match(r"^(https?|tcp)://", self.address) is not None:
self.uri = self.address
self.address = None
if self.uri.startswith("https"):
self.secure = True
# Zilliz Cloud requires AutoIndex.
if re.match(r"^https://(.*)\.zillizcloud\.(com|cn)", self.uri) is not None:
self.index_params = {
"metric_type": "IP",
"index_type": "AUTOINDEX",
"params": {},
}
def init_collection(self) -> None:
"""Initialize collection in vector database."""
fields = [
FieldSchema(name="pk", dtype=DataType.INT64, is_primary=True, auto_id=True),
FieldSchema(name="embeddings", dtype=DataType.FLOAT_VECTOR, dim=1536),
FieldSchema(name="raw_text", dtype=DataType.VARCHAR, max_length=65535),
]
# create collection if not exist and load it.
self.schema = CollectionSchema(fields, "auto-gpt memory storage")
self.collection = Collection(self.collection_name, self.schema)
# create index if not exist.
if not self.collection.has_index():
self.collection.release()
self.collection.create_index(
"embeddings",
self.index_params,
index_name="embeddings",
)
self.collection.load()
def add(self, data) -> str:
"""Add an embedding of data into memory.
Args:
data (str): The raw text to construct embedding index.
Returns:
str: log.
"""
embedding = get_ada_embedding(data)
result = self.collection.insert([[embedding], [data]])
_text = (
"Inserting data into memory at primary key: "
f"{result.primary_keys[0]}:\n data: {data}"
)
return _text
def get(self, data):
"""Return the most relevant data in memory.
Args:
data: The data to compare to.
"""
return self.get_relevant(data, 1)
def clear(self) -> str:
"""Drop the index in memory.
Returns:
str: log.
"""
self.collection.drop()
self.collection = Collection(self.collection_name, self.schema)
self.collection.create_index(
"embeddings",
self.index_params,
index_name="embeddings",
)
self.collection.load()
return "Obliviated"
def get_relevant(self, data: str, num_relevant: int = 5):
"""Return the top-k relevant data in memory.
Args:
data: The data to compare to.
num_relevant (int, optional): The max number of relevant data.
Defaults to 5.
Returns:
list: The top-k relevant data.
"""
# search the embedding and return the most relevant text.
embedding = get_ada_embedding(data)
search_params = {
"metrics_type": "IP",
"params": {"nprobe": 8},
}
result = self.collection.search(
[embedding],
"embeddings",
search_params,
num_relevant,
output_fields=["raw_text"],
)
return [item.entity.value_of_field("raw_text") for item in result[0]]
def get_stats(self) -> str:
"""
Returns: The stats of the milvus cache.
"""
return f"Entities num: {self.collection.num_entities}"

View File

@@ -1,73 +0,0 @@
"""A class that does not store any data. This is the default memory provider."""
from __future__ import annotations
from typing import Any
from autogpt.memory.base import MemoryProviderSingleton
class NoMemory(MemoryProviderSingleton):
"""
A class that does not store any data. This is the default memory provider.
"""
def __init__(self, cfg):
"""
Initializes the NoMemory provider.
Args:
cfg: The config object.
Returns: None
"""
pass
def add(self, data: str) -> str:
"""
Adds a data point to the memory. No action is taken in NoMemory.
Args:
data: The data to add.
Returns: An empty string.
"""
return ""
def get(self, data: str) -> list[Any] | None:
"""
Gets the data from the memory that is most relevant to the given data.
NoMemory always returns None.
Args:
data: The data to compare to.
Returns: None
"""
return None
def clear(self) -> str:
"""
Clears the memory. No action is taken in NoMemory.
Returns: An empty string.
"""
return ""
def get_relevant(self, data: str, num_relevant: int = 5) -> list[Any] | None:
"""
Returns all the data in the memory that is relevant to the given data.
NoMemory always returns None.
Args:
data: The data to compare to.
num_relevant: The number of relevant data to return.
Returns: None
"""
return None
def get_stats(self):
"""
Returns: An empty dictionary as there are no stats in NoMemory.
"""
return {}

View File

@@ -1,78 +0,0 @@
import pinecone
from colorama import Fore, Style
from autogpt.llm import get_ada_embedding
from autogpt.logs import logger
from autogpt.memory.base import MemoryProviderSingleton
class PineconeMemory(MemoryProviderSingleton):
def __init__(self, cfg):
pinecone_api_key = cfg.pinecone_api_key
pinecone_region = cfg.pinecone_region
pinecone.init(api_key=pinecone_api_key, environment=pinecone_region)
dimension = 1536
metric = "cosine"
pod_type = "p1"
table_name = "auto-gpt"
# this assumes we don't start with memory.
# for now this works.
# we'll need a more complicated and robust system if we want to start with
# memory.
self.vec_num = 0
try:
pinecone.whoami()
except Exception as e:
logger.typewriter_log(
"FAILED TO CONNECT TO PINECONE",
Fore.RED,
Style.BRIGHT + str(e) + Style.RESET_ALL,
)
logger.double_check(
"Please ensure you have setup and configured Pinecone properly for use."
+ f"You can check out {Fore.CYAN + Style.BRIGHT}"
"https://docs.agpt.co/configuration/memory/#pinecone-api-key-setup"
f"{Style.RESET_ALL} to ensure you've set up everything correctly."
)
exit(1)
if table_name not in pinecone.list_indexes():
logger.typewriter_log(
"Connecting Pinecone. This may take some time...", Fore.MAGENTA, ""
)
pinecone.create_index(
table_name, dimension=dimension, metric=metric, pod_type=pod_type
)
self.index = pinecone.Index(table_name)
def add(self, data):
vector = get_ada_embedding(data)
# no metadata here. We may wish to change that long term.
self.index.upsert([(str(self.vec_num), vector, {"raw_text": data})])
_text = f"Inserting data into memory at index: {self.vec_num}:\n data: {data}"
self.vec_num += 1
return _text
def get(self, data):
return self.get_relevant(data, 1)
def clear(self):
self.index.delete(deleteAll=True)
return "Obliviated"
def get_relevant(self, data, num_relevant=5):
"""
Returns all the data in the memory that is relevant to the given data.
:param data: The data to compare to.
:param num_relevant: The number of relevant data to return. Defaults to 5
"""
query_embedding = get_ada_embedding(data)
results = self.index.query(
query_embedding, top_k=num_relevant, include_metadata=True
)
sorted_results = sorted(results.matches, key=lambda x: x.score)
return [str(item["metadata"]["raw_text"]) for item in sorted_results]
def get_stats(self):
return self.index.describe_index_stats()

View File

@@ -1,156 +0,0 @@
"""Redis memory provider."""
from __future__ import annotations
from typing import Any
import numpy as np
import redis
from colorama import Fore, Style
from redis.commands.search.field import TextField, VectorField
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
from redis.commands.search.query import Query
from autogpt.llm import get_ada_embedding
from autogpt.logs import logger
from autogpt.memory.base import MemoryProviderSingleton
SCHEMA = [
TextField("data"),
VectorField(
"embedding",
"HNSW",
{"TYPE": "FLOAT32", "DIM": 1536, "DISTANCE_METRIC": "COSINE"},
),
]
class RedisMemory(MemoryProviderSingleton):
def __init__(self, cfg):
"""
Initializes the Redis memory provider.
Args:
cfg: The config object.
Returns: None
"""
redis_host = cfg.redis_host
redis_port = cfg.redis_port
redis_password = cfg.redis_password
self.dimension = 1536
self.redis = redis.Redis(
host=redis_host,
port=redis_port,
password=redis_password,
db=0, # Cannot be changed
)
self.cfg = cfg
# Check redis connection
try:
self.redis.ping()
except redis.ConnectionError as e:
logger.typewriter_log(
"FAILED TO CONNECT TO REDIS",
Fore.RED,
Style.BRIGHT + str(e) + Style.RESET_ALL,
)
logger.double_check(
"Please ensure you have setup and configured Redis properly for use. "
+ f"You can check out {Fore.CYAN + Style.BRIGHT}"
f"https://docs.agpt.co/configuration/memory/#redis-setup{Style.RESET_ALL}"
" to ensure you've set up everything correctly."
)
exit(1)
if cfg.wipe_redis_on_start:
self.redis.flushall()
try:
self.redis.ft(f"{cfg.memory_index}").create_index(
fields=SCHEMA,
definition=IndexDefinition(
prefix=[f"{cfg.memory_index}:"], index_type=IndexType.HASH
),
)
except Exception as e:
logger.warn("Error creating Redis search index: ", e)
existing_vec_num = self.redis.get(f"{cfg.memory_index}-vec_num")
self.vec_num = int(existing_vec_num.decode("utf-8")) if existing_vec_num else 0
def add(self, data: str) -> str:
"""
Adds a data point to the memory.
Args:
data: The data to add.
Returns: Message indicating that the data has been added.
"""
if "Command Error:" in data:
return ""
vector = get_ada_embedding(data)
vector = np.array(vector).astype(np.float32).tobytes()
data_dict = {b"data": data, "embedding": vector}
pipe = self.redis.pipeline()
pipe.hset(f"{self.cfg.memory_index}:{self.vec_num}", mapping=data_dict)
_text = (
f"Inserting data into memory at index: {self.vec_num}:\n" f"data: {data}"
)
self.vec_num += 1
pipe.set(f"{self.cfg.memory_index}-vec_num", self.vec_num)
pipe.execute()
return _text
def get(self, data: str) -> list[Any] | None:
"""
Gets the data from the memory that is most relevant to the given data.
Args:
data: The data to compare to.
Returns: The most relevant data.
"""
return self.get_relevant(data, 1)
def clear(self) -> str:
"""
Clears the redis server.
Returns: A message indicating that the memory has been cleared.
"""
self.redis.flushall()
return "Obliviated"
def get_relevant(self, data: str, num_relevant: int = 5) -> list[Any] | None:
"""
Returns all the data in the memory that is relevant to the given data.
Args:
data: The data to compare to.
num_relevant: The number of relevant data to return.
Returns: A list of the most relevant data.
"""
query_embedding = get_ada_embedding(data)
base_query = f"*=>[KNN {num_relevant} @embedding $vector AS vector_score]"
query = (
Query(base_query)
.return_fields("data", "vector_score")
.sort_by("vector_score")
.dialect(2)
)
query_vector = np.array(query_embedding).astype(np.float32).tobytes()
try:
results = self.redis.ft(f"{self.cfg.memory_index}").search(
query, query_params={"vector": query_vector}
)
except Exception as e:
logger.warn("Error calling Redis search: ", e)
return None
return [result.data for result in results.docs]
def get_stats(self):
"""
Returns: The stats of the memory index.
"""
return self.redis.ft(f"{self.cfg.memory_index}").info()

View File

@@ -0,0 +1,138 @@
from autogpt.config import Config
from autogpt.logs import logger
from .memory_item import MemoryItem, MemoryItemRelevance
from .providers.base import VectorMemoryProvider as VectorMemory
from .providers.json_file import JSONFileMemory
from .providers.no_memory import NoMemory
# List of supported memory backends
# Add a backend to this list if the import attempt is successful
supported_memory = ["json_file", "no_memory"]
# try:
# from .providers.redis import RedisMemory
# supported_memory.append("redis")
# except ImportError:
# RedisMemory = None
# try:
# from .providers.pinecone import PineconeMemory
# supported_memory.append("pinecone")
# except ImportError:
# PineconeMemory = None
# try:
# from .providers.weaviate import WeaviateMemory
# supported_memory.append("weaviate")
# except ImportError:
# WeaviateMemory = None
# try:
# from .providers.milvus import MilvusMemory
# supported_memory.append("milvus")
# except ImportError:
# MilvusMemory = None
def get_memory(cfg: Config, init=False) -> VectorMemory:
memory = None
match cfg.memory_backend:
case "json_file":
memory = JSONFileMemory(cfg)
case "pinecone":
raise NotImplementedError(
"The Pinecone memory backend has been rendered incompatible by work on "
"the memory system, and was removed. Whether support will be added back "
"in the future is subject to discussion, feel free to pitch in: "
"https://github.com/Significant-Gravitas/Auto-GPT/discussions/4280"
)
# if not PineconeMemory:
# logger.warn(
# "Error: Pinecone is not installed. Please install pinecone"
# " to use Pinecone as a memory backend."
# )
# else:
# memory = PineconeMemory(cfg)
# if init:
# memory.clear()
case "redis":
raise NotImplementedError(
"The Redis memory backend has been rendered incompatible by work on "
"the memory system, and has been removed temporarily."
)
# if not RedisMemory:
# logger.warn(
# "Error: Redis is not installed. Please install redis-py to"
# " use Redis as a memory backend."
# )
# else:
# memory = RedisMemory(cfg)
case "weaviate":
raise NotImplementedError(
"The Weaviate memory backend has been rendered incompatible by work on "
"the memory system, and was removed. Whether support will be added back "
"in the future is subject to discussion, feel free to pitch in: "
"https://github.com/Significant-Gravitas/Auto-GPT/discussions/4280"
)
# if not WeaviateMemory:
# logger.warn(
# "Error: Weaviate is not installed. Please install weaviate-client to"
# " use Weaviate as a memory backend."
# )
# else:
# memory = WeaviateMemory(cfg)
case "milvus":
raise NotImplementedError(
"The Milvus memory backend has been rendered incompatible by work on "
"the memory system, and was removed. Whether support will be added back "
"in the future is subject to discussion, feel free to pitch in: "
"https://github.com/Significant-Gravitas/Auto-GPT/discussions/4280"
)
# if not MilvusMemory:
# logger.warn(
# "Error: pymilvus sdk is not installed."
# "Please install pymilvus to use Milvus or Zilliz Cloud as memory backend."
# )
# else:
# memory = MilvusMemory(cfg)
case "no_memory":
memory = NoMemory()
case _:
raise ValueError(
f"Unknown memory backend '{cfg.memory_backend}'. Please check your config."
)
if memory is None:
memory = JSONFileMemory(cfg)
return memory
def get_supported_memory_backends():
return supported_memory
__all__ = [
"get_memory",
"MemoryItem",
"MemoryItemRelevance",
"JSONFileMemory",
"NoMemory",
"VectorMemory",
# "RedisMemory",
# "PineconeMemory",
# "MilvusMemory",
# "WeaviateMemory",
]

View File

@@ -0,0 +1,223 @@
from __future__ import annotations
import dataclasses
import json
from typing import Literal
import numpy as np
from autogpt.config import Config
from autogpt.llm import Message
from autogpt.llm.utils import count_string_tokens
from autogpt.logs import logger
from autogpt.processing.text import chunk_content, split_text, summarize_text
from .utils import Embedding, get_embedding
MemoryDocType = Literal["webpage", "text_file", "code_file", "agent_history"]
@dataclasses.dataclass
class MemoryItem:
"""Memory object containing raw content as well as embeddings"""
raw_content: str
summary: str
chunks: list[str]
chunk_summaries: list[str]
e_summary: Embedding
e_chunks: list[Embedding]
metadata: dict
def relevance_for(self, query: str, e_query: Embedding | None = None):
return MemoryItemRelevance.of(self, query, e_query)
@staticmethod
def from_text(
text: str,
source_type: MemoryDocType,
metadata: dict = {},
how_to_summarize: str | None = None,
question_for_summary: str | None = None,
):
cfg = Config()
logger.debug(f"Memorizing text:\n{'-'*32}\n{text}\n{'-'*32}\n")
chunks = [
chunk
for chunk, _ in (
split_text(text, cfg.embedding_model)
if source_type != "code_file"
else chunk_content(text, cfg.embedding_model)
)
]
logger.debug("Chunks: " + str(chunks))
chunk_summaries = [
summary
for summary, _ in [
summarize_text(
text_chunk,
instruction=how_to_summarize,
question=question_for_summary,
)
for text_chunk in chunks
]
]
logger.debug("Chunk summaries: " + str(chunk_summaries))
e_chunks = get_embedding(chunks)
summary = (
chunk_summaries[0]
if len(chunks) == 1
else summarize_text(
"\n\n".join(chunk_summaries),
instruction=how_to_summarize,
question=question_for_summary,
)[0]
)
logger.debug("Total summary: " + summary)
# TODO: investigate search performance of weighted average vs summary
# e_average = np.average(e_chunks, axis=0, weights=[len(c) for c in chunks])
e_summary = get_embedding(summary)
metadata["source_type"] = source_type
return MemoryItem(
text,
summary,
chunks,
chunk_summaries,
e_summary,
e_chunks,
metadata=metadata,
)
@staticmethod
def from_text_file(content: str, path: str):
return MemoryItem.from_text(content, "text_file", {"location": path})
@staticmethod
def from_code_file(content: str, path: str):
# TODO: implement tailored code memories
return MemoryItem.from_text(content, "code_file", {"location": path})
@staticmethod
def from_ai_action(ai_message: Message, result_message: Message):
# The result_message contains either user feedback
# or the result of the command specified in ai_message
if ai_message["role"] != "assistant":
raise ValueError(f"Invalid role on 'ai_message': {ai_message['role']}")
result = (
result_message["content"]
if result_message["content"].startswith("Command")
else "None"
)
user_input = (
result_message["content"]
if result_message["content"].startswith("Human feedback")
else "None"
)
memory_content = (
f"Assistant Reply: {ai_message['content']}"
"\n\n"
f"Result: {result}"
"\n\n"
f"Human Feedback: {user_input}"
)
return MemoryItem.from_text(
text=memory_content,
source_type="agent_history",
how_to_summarize="if possible, also make clear the link between the command in the assistant's response and the command result. Do not mention the human feedback if there is none",
)
@staticmethod
def from_webpage(content: str, url: str, question: str | None = None):
return MemoryItem.from_text(
text=content,
source_type="webpage",
metadata={"location": url},
question_for_summary=question,
)
def dump(self) -> str:
token_length = count_string_tokens(self.raw_content, Config().embedding_model)
return f"""
=============== MemoryItem ===============
Length: {token_length} tokens in {len(self.e_chunks)} chunks
Metadata: {json.dumps(self.metadata, indent=2)}
---------------- SUMMARY -----------------
{self.summary}
------------------ RAW -------------------
{self.raw_content}
==========================================
"""
@dataclasses.dataclass
class MemoryItemRelevance:
"""
Class that encapsulates memory relevance search functionality and data.
Instances contain a MemoryItem and its relevance scores for a given query.
"""
memory_item: MemoryItem
for_query: str
summary_relevance_score: float
chunk_relevance_scores: list[float]
@staticmethod
def of(
memory_item: MemoryItem, for_query: str, e_query: Embedding | None = None
) -> MemoryItemRelevance:
e_query = e_query or get_embedding(for_query)
_, srs, crs = MemoryItemRelevance.calculate_scores(memory_item, e_query)
return MemoryItemRelevance(
for_query=for_query,
memory_item=memory_item,
summary_relevance_score=srs,
chunk_relevance_scores=crs,
)
@staticmethod
def calculate_scores(
memory: MemoryItem, compare_to: Embedding
) -> tuple[float, float, list[float]]:
"""
Calculates similarity between given embedding and all embeddings of the memory
Returns:
float: the aggregate (max) relevance score of the memory
float: the relevance score of the memory summary
list: the relevance scores of the memory chunks
"""
summary_relevance_score = np.dot(memory.e_summary, compare_to)
chunk_relevance_scores = np.dot(memory.e_chunks, compare_to)
logger.debug(f"Relevance of summary: {summary_relevance_score}")
logger.debug(f"Relevance of chunks: {chunk_relevance_scores}")
relevance_scores = [summary_relevance_score, *chunk_relevance_scores]
logger.debug(f"Relevance scores: {relevance_scores}")
return max(relevance_scores), summary_relevance_score, chunk_relevance_scores
@property
def score(self) -> float:
"""The aggregate relevance score of the memory item for the given query"""
return max([self.summary_relevance_score, *self.chunk_relevance_scores])
@property
def most_relevant_chunk(self) -> tuple[str, float]:
"""The most relevant chunk of the memory item + its score for the given query"""
i_relmax = np.argmax(self.chunk_relevance_scores)
return self.memory_item.chunks[i_relmax], self.chunk_relevance_scores[i_relmax]
def __str__(self):
return (
f"{self.memory_item.summary} ({self.summary_relevance_score}) "
f"{self.chunk_relevance_scores}"
)

View File

@@ -0,0 +1,7 @@
from .json_file import JSONFileMemory
from .no_memory import NoMemory
__all__ = [
"JSONFileMemory",
"NoMemory",
]

View File

@@ -0,0 +1,74 @@
import abc
import functools
from typing import MutableSet, Sequence
import numpy as np
from autogpt.config.config import Config
from autogpt.logs import logger
from autogpt.singleton import AbstractSingleton
from .. import MemoryItem, MemoryItemRelevance
from ..utils import Embedding, get_embedding
class VectorMemoryProvider(MutableSet[MemoryItem], AbstractSingleton):
@abc.abstractmethod
def __init__(self, config: Config):
pass
def get(self, query: str) -> MemoryItemRelevance | None:
"""
Gets the data from the memory that is most relevant to the given query.
Args:
data: The data to compare to.
Returns: The most relevant Memory
"""
result = self.get_relevant(query, 1)
return result[0] if result else None
def get_relevant(self, query: str, k: int) -> Sequence[MemoryItemRelevance]:
"""
Returns the top-k most relevant memories for the given query
Args:
query: the query to compare stored memories to
k: the number of relevant memories to fetch
Returns:
list[MemoryItemRelevance] containing the top [k] relevant memories
"""
if len(self) < 1:
return []
logger.debug(
f"Searching for {k} relevant memories for query '{query}'; "
f"{len(self)} memories in index"
)
relevances = self.score_memories_for_relevance(query)
logger.debug(f"Memory relevance scores: {[str(r) for r in relevances]}")
# take last k items and reverse
top_k_indices = np.argsort([r.score for r in relevances])[-k:][::-1]
return [relevances[i] for i in top_k_indices]
def score_memories_for_relevance(
self, for_query: str
) -> Sequence[MemoryItemRelevance]:
"""
Returns MemoryItemRelevance for every memory in the index.
Implementations may override this function for performance purposes.
"""
e_query: Embedding = get_embedding(for_query)
return [m.relevance_for(for_query, e_query) for m in self]
def get_stats(self) -> tuple[int, int]:
"""
Returns:
tuple (n_memories: int, n_chunks: int): the stats of the memory index
"""
return len(self), functools.reduce(lambda t, m: t + len(m.e_chunks), self, 0)

View File

@@ -0,0 +1,68 @@
from __future__ import annotations
from pathlib import Path
from typing import Iterator
import orjson
from autogpt.config import Config
from autogpt.logs import logger
from ..memory_item import MemoryItem
from .base import VectorMemoryProvider
class JSONFileMemory(VectorMemoryProvider):
"""Memory backend that stores memories in a JSON file"""
SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS
file_path: Path
memories: list[MemoryItem]
def __init__(self, cfg: Config) -> None:
"""Initialize a class instance
Args:
cfg: Config object
Returns:
None
"""
workspace_path = Path(cfg.workspace_path)
self.file_path = workspace_path / f"{cfg.memory_index}.json"
self.file_path.touch()
logger.debug(f"Initialized {__name__} with index path {self.file_path}")
self.memories = []
self.save_index()
def __iter__(self) -> Iterator[MemoryItem]:
return iter(self.memories)
def __contains__(self, x: MemoryItem) -> bool:
return x in self.memories
def __len__(self) -> int:
return len(self.memories)
def add(self, item: MemoryItem):
self.memories.append(item)
self.save_index()
return len(self.memories)
def discard(self, item: MemoryItem):
try:
self.remove(item)
except:
pass
def clear(self):
"""Clears the data in memory."""
self.memories.clear()
self.save_index()
def save_index(self):
logger.debug(f"Saving memory index to file {self.file_path}")
with self.file_path.open("wb") as f:
return f.write(orjson.dumps(self.memories, option=self.SAVE_OPTIONS))

View File

@@ -0,0 +1,36 @@
"""A class that does not store any data. This is the default memory provider."""
from __future__ import annotations
from typing import Iterator, Optional
from autogpt.config.config import Config
from .. import MemoryItem
from .base import VectorMemoryProvider
class NoMemory(VectorMemoryProvider):
"""
A class that does not store any data. This is the default memory provider.
"""
def __init__(self, config: Optional[Config] = None):
pass
def __iter__(self) -> Iterator[MemoryItem]:
return iter([])
def __contains__(self, x: MemoryItem) -> bool:
return False
def __len__(self) -> int:
return 0
def add(self, item: MemoryItem):
pass
def discard(self, item: MemoryItem):
pass
def clear(self):
pass

View File

@@ -0,0 +1,71 @@
from typing import Any, overload
import numpy as np
import numpy.typing as npt
import openai
from autogpt.config import Config
from autogpt.llm.utils import metered, retry_openai_api
from autogpt.logs import logger
Embedding = list[np.float32] | np.ndarray[Any, np.dtype[np.float32]]
"""Embedding vector"""
TText = list[int]
"""Token array representing text"""
@overload
def get_embedding(input: str | TText) -> Embedding:
...
@overload
def get_embedding(input: list[str] | list[TText]) -> list[Embedding]:
...
@metered
@retry_openai_api()
def get_embedding(
input: str | TText | list[str] | list[TText],
) -> Embedding | list[Embedding]:
"""Get an embedding from the ada model.
Args:
input: Input text to get embeddings for, encoded as a string or array of tokens.
Multiple inputs may be given as a list of strings or token arrays.
Returns:
List[float]: The embedding.
"""
cfg = Config()
multiple = isinstance(input, list) and all(not isinstance(i, int) for i in input)
if isinstance(input, str):
input = input.replace("\n", " ")
elif multiple and isinstance(input[0], str):
input = [text.replace("\n", " ") for text in input]
model = cfg.embedding_model
if cfg.use_azure:
kwargs = {"engine": cfg.get_azure_deployment_id_for_model(model)}
else:
kwargs = {"model": model}
logger.debug(
f"Getting embedding{f's for {len(input)} inputs' if multiple else ''}"
f" with model '{model}'"
+ (f" via Azure deployment '{kwargs['engine']}'" if cfg.use_azure else "")
)
embeddings = openai.Embedding.create(
input=input,
api_key=cfg.openai_api_key,
**kwargs,
).data
if not multiple:
return embeddings[0]["embedding"]
embeddings = sorted(embeddings, key=lambda x: x["index"])
return [d["embedding"] for d in embeddings]

View File

@@ -1,127 +0,0 @@
import weaviate
from weaviate import Client
from weaviate.embedded import EmbeddedOptions
from weaviate.util import generate_uuid5
from autogpt.llm import get_ada_embedding
from autogpt.logs import logger
from autogpt.memory.base import MemoryProviderSingleton
def default_schema(weaviate_index):
return {
"class": weaviate_index,
"properties": [
{
"name": "raw_text",
"dataType": ["text"],
"description": "original text for the embedding",
}
],
}
class WeaviateMemory(MemoryProviderSingleton):
def __init__(self, cfg):
auth_credentials = self._build_auth_credentials(cfg)
url = f"{cfg.weaviate_protocol}://{cfg.weaviate_host}:{cfg.weaviate_port}"
if cfg.use_weaviate_embedded:
self.client = Client(
embedded_options=EmbeddedOptions(
hostname=cfg.weaviate_host,
port=int(cfg.weaviate_port),
persistence_data_path=cfg.weaviate_embedded_path,
)
)
logger.info(
f"Weaviate Embedded running on: {url} with persistence path: {cfg.weaviate_embedded_path}"
)
else:
self.client = Client(url, auth_client_secret=auth_credentials)
self.index = WeaviateMemory.format_classname(cfg.memory_index)
self._create_schema()
@staticmethod
def format_classname(index):
# weaviate uses capitalised index names
# The python client uses the following code to format
# index names before the corresponding class is created
index = index.replace("-", "_")
if len(index) == 1:
return index.capitalize()
return index[0].capitalize() + index[1:]
def _create_schema(self):
schema = default_schema(self.index)
if not self.client.schema.contains(schema):
self.client.schema.create_class(schema)
def _build_auth_credentials(self, cfg):
if cfg.weaviate_username and cfg.weaviate_password:
return weaviate.AuthClientPassword(
cfg.weaviate_username, cfg.weaviate_password
)
if cfg.weaviate_api_key:
return weaviate.AuthApiKey(api_key=cfg.weaviate_api_key)
else:
return None
def add(self, data):
vector = get_ada_embedding(data)
doc_uuid = generate_uuid5(data, self.index)
data_object = {"raw_text": data}
with self.client.batch as batch:
batch.add_data_object(
uuid=doc_uuid,
data_object=data_object,
class_name=self.index,
vector=vector,
)
return f"Inserting data into memory at uuid: {doc_uuid}:\n data: {data}"
def get(self, data):
return self.get_relevant(data, 1)
def clear(self):
self.client.schema.delete_all()
# weaviate does not yet have a neat way to just remove the items in an index
# without removing the entire schema, therefore we need to re-create it
# after a call to delete_all
self._create_schema()
return "Obliterated"
def get_relevant(self, data, num_relevant=5):
query_embedding = get_ada_embedding(data)
try:
results = (
self.client.query.get(self.index, ["raw_text"])
.with_near_vector({"vector": query_embedding, "certainty": 0.7})
.with_limit(num_relevant)
.do()
)
if len(results["data"]["Get"][self.index]) > 0:
return [
str(item["raw_text"]) for item in results["data"]["Get"][self.index]
]
else:
return []
except Exception as err:
logger.warn(f"Unexpected error {err=}, {type(err)=}")
return []
def get_stats(self):
result = self.client.query.aggregate(self.index).with_meta_count().do()
class_data = result["data"]["Aggregate"][self.index]
return class_data[0]["meta"] if class_data else {}

View File

@@ -1,33 +0,0 @@
from autogpt.json_utils.utilities import (
LLM_DEFAULT_RESPONSE_FORMAT,
is_string_valid_json,
)
from autogpt.logs import logger
def format_memory(assistant_reply, next_message_content):
# the next_message_content is a variable to stores either the user_input or the command following the assistant_reply
result = (
"None" if next_message_content.startswith("Command") else next_message_content
)
user_input = (
"None"
if next_message_content.startswith("Human feedback")
else next_message_content
)
return f"Assistant Reply: {assistant_reply}\nResult: {result}\nHuman Feedback:{user_input}"
def save_memory_trimmed_from_context_window(
full_message_history, next_message_to_add_index, permanent_memory
):
while next_message_to_add_index >= 0:
message_content = full_message_history[next_message_to_add_index]["content"]
if is_string_valid_json(message_content, LLM_DEFAULT_RESPONSE_FORMAT):
next_message = full_message_history[next_message_to_add_index + 1]
memory_to_add = format_memory(message_content, next_message["content"])
logger.debug(f"Storing the following memory: {memory_to_add}")
permanent_memory.add(memory_to_add)
next_message_to_add_index -= 1

View File

@@ -1,143 +0,0 @@
import copy
import json
from typing import Dict, List, Tuple
from autogpt.agent import Agent
from autogpt.config import Config
from autogpt.llm.llm_utils import create_chat_completion
from autogpt.log_cycle.log_cycle import PROMPT_SUMMARY_FILE_NAME, SUMMARY_FILE_NAME
from autogpt.logs import logger
cfg = Config()
def get_newly_trimmed_messages(
full_message_history: List[Dict[str, str]],
current_context: List[Dict[str, str]],
last_memory_index: int,
) -> Tuple[List[Dict[str, str]], int]:
"""
This function returns a list of dictionaries contained in full_message_history
with an index higher than prev_index that are absent from current_context.
Args:
full_message_history (list): A list of dictionaries representing the full message history.
current_context (list): A list of dictionaries representing the current context.
last_memory_index (int): An integer representing the previous index.
Returns:
list: A list of dictionaries that are in full_message_history with an index higher than last_memory_index and absent from current_context.
int: The new index value for use in the next loop.
"""
# Select messages in full_message_history with an index higher than last_memory_index
new_messages = [
msg for i, msg in enumerate(full_message_history) if i > last_memory_index
]
# Remove messages that are already present in current_context
new_messages_not_in_context = [
msg for msg in new_messages if msg not in current_context
]
# Find the index of the last message processed
new_index = last_memory_index
if new_messages_not_in_context:
last_message = new_messages_not_in_context[-1]
new_index = full_message_history.index(last_message)
return new_messages_not_in_context, new_index
def update_running_summary(
agent: Agent, current_memory: str, new_events: List[Dict[str, str]]
) -> str:
"""
This function takes a list of dictionaries representing new events and combines them with the current summary,
focusing on key and potentially important information to remember. The updated summary is returned in a message
formatted in the 1st person past tense.
Args:
new_events (List[Dict]): A list of dictionaries containing the latest events to be added to the summary.
Returns:
str: A message containing the updated summary of actions, formatted in the 1st person past tense.
Example:
new_events = [{"event": "entered the kitchen."}, {"event": "found a scrawled note with the number 7"}]
update_running_summary(new_events)
# Returns: "This reminds you of these events from your past: \nI entered the kitchen and found a scrawled note saying 7."
"""
# Create a copy of the new_events list to prevent modifying the original list
new_events = copy.deepcopy(new_events)
# Replace "assistant" with "you". This produces much better first person past tense results.
for event in new_events:
if event["role"].lower() == "assistant":
event["role"] = "you"
# Remove "thoughts" dictionary from "content"
try:
content_dict = json.loads(event["content"])
if "thoughts" in content_dict:
del content_dict["thoughts"]
event["content"] = json.dumps(content_dict)
except json.decoder.JSONDecodeError:
if cfg.debug_mode:
logger.error(f"Error: Invalid JSON: {event['content']}\n")
elif event["role"].lower() == "system":
event["role"] = "your computer"
# Delete all user messages
elif event["role"] == "user":
new_events.remove(event)
# This can happen at any point during execution, not just the beginning
if len(new_events) == 0:
new_events = "Nothing new happened."
prompt = f'''Your task is to create a concise running summary of actions and information results in the provided text, focusing on key and potentially important information to remember.
You will receive the current summary and the your latest actions. Combine them, adding relevant key information from the latest development in 1st person past tense and keeping the summary concise.
Summary So Far:
"""
{current_memory}
"""
Latest Development:
"""
{new_events}
"""
'''
messages = [
{
"role": "user",
"content": prompt,
}
]
agent.log_cycle_handler.log_cycle(
agent.config.ai_name,
agent.created_at,
agent.cycle_count,
messages,
PROMPT_SUMMARY_FILE_NAME,
)
current_memory = create_chat_completion(messages, cfg.fast_llm_model)
agent.log_cycle_handler.log_cycle(
agent.config.ai_name,
agent.created_at,
agent.cycle_count,
current_memory,
SUMMARY_FILE_NAME,
)
message_to_return = {
"role": "system",
"content": f"This reminds you of these events from your past: \n{current_memory}",
}
return message_to_return

View File

@@ -1,6 +1,6 @@
"""Handles loading of plugins.""" """Handles loading of plugins."""
import importlib import importlib.util
import json import json
import os import os
import zipfile import zipfile
@@ -12,7 +12,7 @@ from zipimport import zipimporter
import openapi_python_client import openapi_python_client
import requests import requests
from auto_gpt_plugin_template import AutoGPTPluginTemplate from auto_gpt_plugin_template import AutoGPTPluginTemplate
from openapi_python_client.cli import Config as OpenAPIConfig from openapi_python_client.config import Config as OpenAPIConfig
from autogpt.config import Config from autogpt.config import Config
from autogpt.logs import logger from autogpt.logs import logger
@@ -152,7 +152,7 @@ def initialize_openai_plugins(
) )
prev_cwd = Path.cwd() prev_cwd = Path.cwd()
os.chdir(openai_plugin_client_dir) os.chdir(openai_plugin_client_dir)
Path("ai-plugin.json")
if not os.path.exists("client"): if not os.path.exists("client"):
client_results = openapi_python_client.create_new_client( client_results = openapi_python_client.create_new_client(
url=manifest_spec["manifest"]["api"]["url"], url=manifest_spec["manifest"]["api"]["url"],
@@ -170,9 +170,13 @@ def initialize_openai_plugins(
"client", "client/client/client.py" "client", "client/client/client.py"
) )
module = importlib.util.module_from_spec(spec) module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
try:
spec.loader.exec_module(module)
finally:
os.chdir(prev_cwd)
client = module.Client(base_url=url) client = module.Client(base_url=url)
os.chdir(prev_cwd)
manifest_spec["client"] = client manifest_spec["client"] = client
return manifests_specs return manifests_specs

View File

@@ -1,170 +1,234 @@
"""Text processing functions""" """Text processing functions"""
from typing import Dict, Generator, Optional from math import ceil
from typing import Optional
import spacy import spacy
from selenium.webdriver.remote.webdriver import WebDriver import tiktoken
from autogpt.config import Config from autogpt.config import Config
from autogpt.llm import count_message_tokens, create_chat_completion from autogpt.llm.base import ChatSequence
from autogpt.llm.providers.openai import OPEN_AI_MODELS
from autogpt.llm.utils import count_string_tokens, create_chat_completion
from autogpt.logs import logger from autogpt.logs import logger
from autogpt.memory import get_memory from autogpt.utils import batch
CFG = Config() CFG = Config()
def _max_chunk_length(model: str, max: Optional[int] = None) -> int:
model_max_input_tokens = OPEN_AI_MODELS[model].max_tokens - 1
if max is not None and max > 0:
return min(max, model_max_input_tokens)
return model_max_input_tokens
def must_chunk_content(
text: str, for_model: str, max_chunk_length: Optional[int] = None
) -> bool:
return count_string_tokens(text, for_model) > _max_chunk_length(
for_model, max_chunk_length
)
def chunk_content(
content: str,
for_model: str,
max_chunk_length: Optional[int] = None,
with_overlap=True,
):
"""Split content into chunks of approximately equal token length."""
MAX_OVERLAP = 200 # limit overlap to save tokens
if not must_chunk_content(content, for_model, max_chunk_length):
yield content, count_string_tokens(content, for_model)
return
max_chunk_length = max_chunk_length or _max_chunk_length(for_model)
tokenizer = tiktoken.encoding_for_model(for_model)
tokenized_text = tokenizer.encode(content)
total_length = len(tokenized_text)
n_chunks = ceil(total_length / max_chunk_length)
chunk_length = ceil(total_length / n_chunks)
overlap = min(max_chunk_length - chunk_length, MAX_OVERLAP) if with_overlap else 0
for token_batch in batch(tokenized_text, chunk_length + overlap, overlap):
yield tokenizer.decode(token_batch), len(token_batch)
def summarize_text(
text: str, instruction: Optional[str] = None, question: Optional[str] = None
) -> tuple[str, None | list[tuple[str, str]]]:
"""Summarize text using the OpenAI API
Args:
text (str): The text to summarize
instruction (str): Additional instruction for summarization, e.g. "focus on information related to polar bears", "omit personal information contained in the text"
Returns:
str: The summary of the text
list[(summary, chunk)]: Text chunks and their summary, if the text was chunked.
None otherwise.
"""
if not text:
raise ValueError("No text to summarize")
if instruction and question:
raise ValueError("Parameters 'question' and 'instructions' cannot both be set")
model = CFG.fast_llm_model
if question:
instruction = (
f'include any information that can be used to answer the question "{question}". '
"Do not directly answer the question itself"
)
summarization_prompt = ChatSequence.for_model(model)
token_length = count_string_tokens(text, model)
logger.info(f"Text length: {token_length} tokens")
# reserve 50 tokens for summary prompt, 500 for the response
max_chunk_length = _max_chunk_length(model) - 550
logger.info(f"Max chunk length: {max_chunk_length} tokens")
if not must_chunk_content(text, model, max_chunk_length):
# summarization_prompt.add("user", text)
summarization_prompt.add(
"user",
"Write a concise summary of the following text"
f"{f'; {instruction}' if instruction is not None else ''}:"
"\n\n\n"
f'LITERAL TEXT: """{text}"""'
"\n\n\n"
"CONCISE SUMMARY: The text is best summarized as"
# "Only respond with a concise summary or description of the user message."
)
logger.debug(f"Summarizing with {model}:\n{summarization_prompt.dump()}\n")
summary = create_chat_completion(
summarization_prompt, temperature=0, max_tokens=500
)
logger.debug(f"\n{'-'*16} SUMMARY {'-'*17}\n{summary}\n{'-'*42}\n")
return summary.strip(), None
summaries: list[str] = []
chunks = list(split_text(text, for_model=model, max_chunk_length=max_chunk_length))
for i, (chunk, chunk_length) in enumerate(chunks):
logger.info(
f"Summarizing chunk {i + 1} / {len(chunks)} of length {chunk_length} tokens"
)
summary, _ = summarize_text(chunk, instruction)
summaries.append(summary)
logger.info(f"Summarized {len(chunks)} chunks")
summary, _ = summarize_text("\n\n".join(summaries))
return summary.strip(), [
(summaries[i], chunks[i][0]) for i in range(0, len(chunks))
]
def split_text( def split_text(
text: str, text: str,
max_length: int = CFG.browse_chunk_max_length, for_model: str = CFG.fast_llm_model,
model: str = CFG.fast_llm_model, with_overlap=True,
question: str = "", max_chunk_length: Optional[int] = None,
) -> Generator[str, None, None]: ):
"""Split text into chunks of a maximum length """Split text into chunks of sentences, with each chunk not exceeding the maximum length
Args: Args:
text (str): The text to split text (str): The text to split
max_length (int, optional): The maximum length of each chunk. Defaults to 8192. for_model (str): The model to chunk for; determines tokenizer and constraints
max_length (int, optional): The maximum length of each chunk
Yields: Yields:
str: The next chunk of text str: The next chunk of text
Raises: Raises:
ValueError: If the text is longer than the maximum length ValueError: when a sentence is longer than the maximum length
""" """
flattened_paragraphs = " ".join(text.split("\n")) max_length = _max_chunk_length(for_model, max_chunk_length)
nlp = spacy.load(CFG.browse_spacy_language_model)
# flatten paragraphs to improve performance
text = text.replace("\n", " ")
text_length = count_string_tokens(text, for_model)
if text_length < max_length:
yield text, text_length
return
n_chunks = ceil(text_length / max_length)
target_chunk_length = ceil(text_length / n_chunks)
nlp: spacy.language.Language = spacy.load(CFG.browse_spacy_language_model)
nlp.add_pipe("sentencizer") nlp.add_pipe("sentencizer")
doc = nlp(flattened_paragraphs) doc = nlp(text)
sentences = [sent.text.strip() for sent in doc.sents] sentences = [sentence.text.strip() for sentence in doc.sents]
current_chunk = [] current_chunk: list[str] = []
current_chunk_length = 0
last_sentence = None
last_sentence_length = 0
for sentence in sentences: i = 0
message_with_additional_sentence = [ while i < len(sentences):
create_message(" ".join(current_chunk) + " " + sentence, question) sentence = sentences[i]
] sentence_length = count_string_tokens(sentence, for_model)
expected_chunk_length = current_chunk_length + 1 + sentence_length
expected_token_usage = ( if (
count_message_tokens(messages=message_with_additional_sentence, model=model) expected_chunk_length < max_length
+ 1 # try to create chunks of approximately equal size
) and expected_chunk_length - (sentence_length / 2) < target_chunk_length
if expected_token_usage <= max_length: ):
current_chunk.append(sentence) current_chunk.append(sentence)
else: current_chunk_length = expected_chunk_length
yield " ".join(current_chunk)
current_chunk = [sentence] elif sentence_length < max_length:
message_this_sentence_only = [ if last_sentence:
create_message(" ".join(current_chunk), question) yield " ".join(current_chunk), current_chunk_length
current_chunk = []
current_chunk_length = 0
if with_overlap:
overlap_max_length = max_length - sentence_length - 1
if last_sentence_length < overlap_max_length:
current_chunk += [last_sentence]
current_chunk_length += last_sentence_length + 1
elif overlap_max_length > 5:
# add as much from the end of the last sentence as fits
current_chunk += [
list(
chunk_content(
last_sentence,
for_model,
overlap_max_length,
)
).pop()[0],
]
current_chunk_length += overlap_max_length + 1
current_chunk += [sentence]
current_chunk_length += sentence_length
else: # sentence longer than maximum length -> chop up and try again
sentences[i : i + 1] = [
chunk
for chunk, _ in chunk_content(sentence, for_model, target_chunk_length)
] ]
expected_token_usage = ( continue
count_message_tokens(messages=message_this_sentence_only, model=model)
+ 1 i += 1
) last_sentence = sentence
if expected_token_usage > max_length: last_sentence_length = sentence_length
raise ValueError(
f"Sentence is too long in webpage: {expected_token_usage} tokens."
)
if current_chunk: if current_chunk:
yield " ".join(current_chunk) yield " ".join(current_chunk), current_chunk_length
def summarize_text(
url: str, text: str, question: str, driver: Optional[WebDriver] = None
) -> str:
"""Summarize text using the OpenAI API
Args:
url (str): The url of the text
text (str): The text to summarize
question (str): The question to ask the model
driver (WebDriver): The webdriver to use to scroll the page
Returns:
str: The summary of the text
"""
if not text:
return "Error: No text to summarize"
model = CFG.fast_llm_model
text_length = len(text)
logger.info(f"Text length: {text_length} characters")
summaries = []
chunks = list(
split_text(
text, max_length=CFG.browse_chunk_max_length, model=model, question=question
),
)
scroll_ratio = 1 / len(chunks)
for i, chunk in enumerate(chunks):
if driver:
scroll_to_percentage(driver, scroll_ratio * i)
logger.info(f"Adding chunk {i + 1} / {len(chunks)} to memory")
memory_to_add = f"Source: {url}\n" f"Raw content part#{i + 1}: {chunk}"
memory = get_memory(CFG)
memory.add(memory_to_add)
messages = [create_message(chunk, question)]
tokens_for_chunk = count_message_tokens(messages, model)
logger.info(
f"Summarizing chunk {i + 1} / {len(chunks)} of length {len(chunk)} characters, or {tokens_for_chunk} tokens"
)
summary = create_chat_completion(
model=model,
messages=messages,
)
summaries.append(summary)
logger.info(
f"Added chunk {i + 1} summary to memory, of length {len(summary)} characters"
)
memory_to_add = f"Source: {url}\n" f"Content summary part#{i + 1}: {summary}"
memory.add(memory_to_add)
logger.info(f"Summarized {len(chunks)} chunks.")
combined_summary = "\n".join(summaries)
messages = [create_message(combined_summary, question)]
return create_chat_completion(
model=model,
messages=messages,
)
def scroll_to_percentage(driver: WebDriver, ratio: float) -> None:
"""Scroll to a percentage of the page
Args:
driver (WebDriver): The webdriver to use
ratio (float): The percentage to scroll to
Raises:
ValueError: If the ratio is not between 0 and 1
"""
if ratio < 0 or ratio > 1:
raise ValueError("Percentage should be between 0 and 1")
driver.execute_script(f"window.scrollTo(0, document.body.scrollHeight * {ratio});")
def create_message(chunk: str, question: str) -> Dict[str, str]:
"""Create a message for the chat completion
Args:
chunk (str): The chunk of text to summarize
question (str): The question to answer
Returns:
Dict[str, str]: The message to send to the chat completion
"""
return {
"role": "user",
"content": f'"""{chunk}""" Using the above text, answer the following'
f' question: "{question}" -- if the question cannot be answered using the text,'
" summarize the text.",
}

View File

@@ -1,6 +1,9 @@
""" A module for generating custom prompt strings.""" """ A module for generating custom prompt strings."""
import json import json
from typing import Any, Callable, Dict, List, Optional from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
if TYPE_CHECKING:
from autogpt.commands.command import CommandRegistry
class PromptGenerator: class PromptGenerator:
@@ -19,7 +22,7 @@ class PromptGenerator:
self.resources = [] self.resources = []
self.performance_evaluation = [] self.performance_evaluation = []
self.goals = [] self.goals = []
self.command_registry = None self.command_registry: CommandRegistry | None = None
self.name = "Bob" self.name = "Bob"
self.role = "AI" self.role = "AI"
self.response_format = { self.response_format = {

View File

@@ -3,7 +3,7 @@ from colorama import Fore
from autogpt.config.ai_config import AIConfig from autogpt.config.ai_config import AIConfig
from autogpt.config.config import Config from autogpt.config.config import Config
from autogpt.config.prompt_config import PromptConfig from autogpt.config.prompt_config import PromptConfig
from autogpt.llm import ApiManager from autogpt.llm.api_manager import ApiManager
from autogpt.logs import logger from autogpt.logs import logger
from autogpt.prompts.generator import PromptGenerator from autogpt.prompts.generator import PromptGenerator
from autogpt.setup import prompt_user from autogpt.setup import prompt_user

View File

@@ -7,7 +7,8 @@ from jinja2 import Template
from autogpt import utils from autogpt import utils
from autogpt.config import Config from autogpt.config import Config
from autogpt.config.ai_config import AIConfig from autogpt.config.ai_config import AIConfig
from autogpt.llm import create_chat_completion from autogpt.llm.base import ChatSequence, Message
from autogpt.llm.chat import create_chat_completion
from autogpt.logs import logger from autogpt.logs import logger
from autogpt.prompts.default_prompts import ( from autogpt.prompts.default_prompts import (
DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC, DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC,
@@ -175,17 +176,15 @@ def generate_aiconfig_automatic(user_prompt) -> AIConfig:
DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC
).render(user_prompt=user_prompt) ).render(user_prompt=user_prompt)
# Call LLM with the string as user input # Call LLM with the string as user input
messages = [ output = create_chat_completion(
{ ChatSequence.for_model(
"role": "system", CFG.fast_llm_model,
"content": system_prompt, [
}, Message("system", system_prompt),
{ Message("user", prompt_ai_config_automatic),
"role": "user", ],
"content": prompt_ai_config_automatic, )
}, )
]
output = create_chat_completion(messages, CFG.fast_llm_model)
# Debug LLM Output # Debug LLM Output
logger.debug(f"AI Config Generator Raw Output: {output}") logger.debug(f"AI Config Generator Raw Output: {output}")

View File

@@ -4,7 +4,7 @@ import os
import requests import requests
from playsound import playsound from playsound import playsound
from autogpt.config import Config from autogpt.config.config import Config
from autogpt.speech.base import VoiceBase from autogpt.speech.base import VoiceBase
PLACEHOLDERS = {"your-voice-id"} PLACEHOLDERS = {"your-voice-id"}

View File

@@ -2,7 +2,7 @@
import threading import threading
from threading import Semaphore from threading import Semaphore
from autogpt.config import Config from autogpt.config.config import Config
from autogpt.speech.base import VoiceBase from autogpt.speech.base import VoiceBase
from autogpt.speech.brian import BrianSpeech from autogpt.speech.brian import BrianSpeech
from autogpt.speech.eleven_labs import ElevenLabsSpeech from autogpt.speech.eleven_labs import ElevenLabsSpeech

View File

@@ -17,6 +17,15 @@ except ImportError:
from autogpt.config import Config from autogpt.config import Config
def batch(iterable, max_batch_length: int, overlap: int = 0):
"""Batch data from iterable into slices of length N. The last batch may be shorter."""
# batched('ABCDEFG', 3) --> ABC DEF G
if max_batch_length < 1:
raise ValueError("n must be at least one")
for i in range(0, len(iterable), max_batch_length - overlap):
yield iterable[i : i + max_batch_length]
def clean_input(prompt: str = "", talk=False): def clean_input(prompt: str = "", talk=False):
try: try:
cfg = Config() cfg = Config()

View File

@@ -3,7 +3,7 @@ import logging
from autogpt.commands.file_operations import ingest_file, list_files from autogpt.commands.file_operations import ingest_file, list_files
from autogpt.config import Config from autogpt.config import Config
from autogpt.memory import get_memory from autogpt.memory.vector import VectorMemory, get_memory
cfg = Config() cfg = Config()
@@ -21,14 +21,14 @@ def configure_logging():
return logging.getLogger("AutoGPT-Ingestion") return logging.getLogger("AutoGPT-Ingestion")
def ingest_directory(directory, memory, args): def ingest_directory(directory: str, memory: VectorMemory, args):
""" """
Ingest all files in a directory by calling the ingest_file function for each file. Ingest all files in a directory by calling the ingest_file function for each file.
:param directory: The directory containing the files to ingest :param directory: The directory containing the files to ingest
:param memory: An object with an add() method to store the chunks in memory :param memory: An object with an add() method to store the chunks in memory
""" """
global logger logger = logging.getLogger("AutoGPT-Ingestion")
try: try:
files = list_files(directory) files = list_files(directory)
for file in files: for file in files:

View File

@@ -33,7 +33,7 @@ Create your agent fixture.
```python ```python
def kubernetes_agent( def kubernetes_agent(
agent_test_config, memory_local_cache, workspace: Workspace agent_test_config, memory_json_file, workspace: Workspace
): ):
# Please choose the commands your agent will need to beat the challenges, the full list is available in the main.py # Please choose the commands your agent will need to beat the challenges, the full list is available in the main.py
# (we 're working on a better way to design this, for now you have to look at main.py) # (we 're working on a better way to design this, for now you have to look at main.py)
@@ -56,7 +56,7 @@ def kubernetes_agent(
agent = Agent( agent = Agent(
# We also give the AI a name # We also give the AI a name
ai_name="Kubernetes-Demo", ai_name="Kubernetes-Demo",
memory=memory_local_cache, memory=memory_json_file,
full_message_history=[], full_message_history=[],
command_registry=command_registry, command_registry=command_registry,
config=ai_config, config=ai_config,
@@ -131,5 +131,3 @@ def test_information_retrieval_challenge_a(kubernetes_agent, monkeypatch) -> Non
``` ```

View File

@@ -1,3 +1,9 @@
!!! warning
The Pinecone, Milvus and Weaviate memory backends were rendered incompatible
by work on the memory system, and have been removed in `master`.
Whether support will be added back in the future is subject to discussion,
feel free to pitch in: https://github.com/Significant-Gravitas/Auto-GPT/discussions/4280
## Setting Your Cache Type ## Setting Your Cache Type
By default, Auto-GPT set up with Docker Compose will use Redis as its memory backend. By default, Auto-GPT set up with Docker Compose will use Redis as its memory backend.
@@ -6,7 +12,7 @@ Otherwise, the default is LocalCache (which stores memory in a JSON file).
To switch to a different backend, change the `MEMORY_BACKEND` in `.env` To switch to a different backend, change the `MEMORY_BACKEND` in `.env`
to the value that you want: to the value that you want:
* `local` uses a local JSON cache file * `json_file` uses a local JSON cache file
* `pinecone` uses the Pinecone.io account you configured in your ENV settings * `pinecone` uses the Pinecone.io account you configured in your ENV settings
* `redis` will use the redis cache that you configured * `redis` will use the redis cache that you configured
* `milvus` will use the milvus cache that you configured * `milvus` will use the milvus cache that you configured

View File

@@ -4,11 +4,11 @@ from pathlib import Path
import pytest import pytest
from pytest_mock import MockerFixture from pytest_mock import MockerFixture
from autogpt.config import Config from autogpt.config.config import Config
from autogpt.llm import ApiManager from autogpt.llm.api_manager import ApiManager
from autogpt.workspace import Workspace from autogpt.workspace import Workspace
pytest_plugins = ["tests.integration.agent_factory"] pytest_plugins = ["tests.integration.agent_factory", "tests.integration.memory.utils"]
PROXY = os.environ.get("PROXY") PROXY = os.environ.get("PROXY")

View File

@@ -3,7 +3,7 @@ import pytest
from autogpt.agent import Agent from autogpt.agent import Agent
from autogpt.commands.command import CommandRegistry from autogpt.commands.command import CommandRegistry
from autogpt.config import AIConfig, Config from autogpt.config import AIConfig, Config
from autogpt.memory import LocalCache, NoMemory, get_memory from autogpt.memory.vector import NoMemory, get_memory
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
from autogpt.workspace import Workspace from autogpt.workspace import Workspace
@@ -20,20 +20,10 @@ def agent_test_config(config: Config):
@pytest.fixture @pytest.fixture
def memory_local_cache(agent_test_config: Config): def memory_json_file(agent_test_config: Config):
was_memory_backend = agent_test_config.memory_backend was_memory_backend = agent_test_config.memory_backend
agent_test_config.set_memory_backend("local_cache") agent_test_config.set_memory_backend("json_file")
yield get_memory(agent_test_config, init=True)
agent_test_config.set_memory_backend(was_memory_backend)
@pytest.fixture
def memory_none(agent_test_config: Config):
was_memory_backend = agent_test_config.memory_backend
agent_test_config.set_memory_backend("no_memory")
yield get_memory(agent_test_config, init=True) yield get_memory(agent_test_config, init=True)
agent_test_config.set_memory_backend(was_memory_backend) agent_test_config.set_memory_backend(was_memory_backend)
@@ -64,7 +54,6 @@ def browser_agent(agent_test_config, memory_none: NoMemory, workspace: Workspace
agent = Agent( agent = Agent(
ai_name="", ai_name="",
memory=memory_none, memory=memory_none,
full_message_history=[],
command_registry=command_registry, command_registry=command_registry,
config=ai_config, config=ai_config,
next_action_count=0, next_action_count=0,
@@ -103,7 +92,6 @@ def writer_agent(agent_test_config, memory_none: NoMemory, workspace: Workspace)
agent = Agent( agent = Agent(
ai_name="", ai_name="",
memory=memory_none, memory=memory_none,
full_message_history=[],
command_registry=command_registry, command_registry=command_registry,
config=ai_config, config=ai_config,
next_action_count=0, next_action_count=0,
@@ -116,9 +104,7 @@ def writer_agent(agent_test_config, memory_none: NoMemory, workspace: Workspace)
@pytest.fixture @pytest.fixture
def memory_management_agent( def memory_management_agent(agent_test_config, memory_json_file, workspace: Workspace):
agent_test_config, memory_local_cache, workspace: Workspace
):
command_registry = CommandRegistry() command_registry = CommandRegistry()
command_registry.import_commands("autogpt.commands.file_operations") command_registry.import_commands("autogpt.commands.file_operations")
command_registry.import_commands("autogpt.app") command_registry.import_commands("autogpt.app")
@@ -138,8 +124,7 @@ def memory_management_agent(
agent = Agent( agent = Agent(
ai_name="", ai_name="",
memory=memory_local_cache, memory=memory_json_file,
full_message_history=[],
command_registry=command_registry, command_registry=command_registry,
config=ai_config, config=ai_config,
next_action_count=0, next_action_count=0,
@@ -153,7 +138,7 @@ def memory_management_agent(
@pytest.fixture @pytest.fixture
def get_company_revenue_agent( def get_company_revenue_agent(
agent_test_config, memory_local_cache, workspace: Workspace agent_test_config, memory_json_file, workspace: Workspace
): ):
command_registry = CommandRegistry() command_registry = CommandRegistry()
command_registry.import_commands("autogpt.commands.file_operations") command_registry.import_commands("autogpt.commands.file_operations")
@@ -172,7 +157,7 @@ def get_company_revenue_agent(
Config().set_continuous_mode(False) Config().set_continuous_mode(False)
agent = Agent( agent = Agent(
ai_name="Get-CompanyRevenue", ai_name="Get-CompanyRevenue",
memory=memory_local_cache, memory=memory_json_file,
full_message_history=[], full_message_history=[],
command_registry=command_registry, command_registry=command_registry,
config=ai_config, config=ai_config,
@@ -186,7 +171,7 @@ def get_company_revenue_agent(
@pytest.fixture @pytest.fixture
def kubernetes_agent(memory_local_cache, workspace: Workspace): def kubernetes_agent(memory_json_file, workspace: Workspace):
command_registry = CommandRegistry() command_registry = CommandRegistry()
command_registry.import_commands("autogpt.commands.file_operations") command_registry.import_commands("autogpt.commands.file_operations")
command_registry.import_commands("autogpt.app") command_registry.import_commands("autogpt.app")
@@ -205,7 +190,7 @@ def kubernetes_agent(memory_local_cache, workspace: Workspace):
Config().set_continuous_mode(False) Config().set_continuous_mode(False)
agent = Agent( agent = Agent(
ai_name="Kubernetes-Demo", ai_name="Kubernetes-Demo",
memory=memory_local_cache, memory=memory_json_file,
full_message_history=[], full_message_history=[],
command_registry=command_registry, command_registry=command_registry,
config=ai_config, config=ai_config,

View File

@@ -1,168 +0,0 @@
interactions:
- request:
body: '{"input": [[1985]], "model": "text-embedding-ada-002", "encoding_format":
"base64"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '83'
Content-Type:
- application/json
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SaSxO6Orvl5++n2LWn9FsiIgl7xl3kkiAgYldXlyAiKHJNgJw6371L/6dOd08c
QAqV5HnWWr/kP/71119/t1ld5NPf//z197sap7//x/fa/Tbd/v7nr//5r7/++uuv//h9/n8jiyYr
7vfqU/6G/25Wn3ux/P3PX/x/X/m/g/756++DcanIKGZXd/X5SZTuBpdQW6+lYQqkfQ5NFN8pSgIw
zBGFBLyF+0x9ZATRzFVcC+xx3aJPfw3cJaBFA2RnibBlW4EuBNKhlLJ5dbH/QRJYDniFELe0Ryw1
zy4l2M8lVX4IVLvccrDkD26WikG50PuxCQFdnkwEiSTcsFJvgcv2m6mB50DfUpu5FLAPV1ZQMZ8b
IvW7azZ//KsHo/R9nYA/OPXa3M0WXkbEI3Dus2z9lNsEjEngUv+4PdWrN6EKdpdVoMba1vqqHZkH
b+fNG4mbTQRmWGsFxIdzQM3Kfkbt1AUcBI3doc1xk9ZLJVoC3ClDSm3yBtl4AC8Bvk47CzunCbFl
aqkGQtGR0A74sT4HY8DDq8puaE3xHC1C9H7BD749CHUSFM03CxuwDd2YQD5eskks+B4idSLUZd1V
n+fOt2FzPT2pkptdNtu6EQD6Cg2sRFSr1/NdKvcmSu74EM8ioDerUyT9vHfIWElpvUwkz0F2c2e0
vXpNNvuXsQe81EdofRp6LfDVTYMNUUx6PtQEjL//y79kHiuu1mRzc3xWsDg6TwQepu6OOhkEae88
TKzIbykanVhZ5SOLI3zWZCmjogx6OCXDhAT9yuoulKYevMWdRl2+GOtu9W6a9CHZBxvBramHc8Up
MNsImCJ0jBgThDiA13A6U8UU5IxJ8keQGG8I2At5RecPahjD4yJBrGZgAl2aDgjGwQth47BVXR5l
eQyPQVnRMISPjMUvNgNJeRm4GC/BsMh0SsDUKRt6PB+LelHlIJCl9SYjYfewsjbe5Rb8MNvDONKd
gclhbEAMywgba6u762TdAkjVIkJrd3Pc9antBBgEgUIEXZPZdDzHL/hCTxEtOGkHtg5XBQiuvsUe
DR76a+oCCDe720rWtGncxT69JHi49BTbp/jo8vxe5mEjlx02733NVuNIb4BDao0PgNnDnOWLBoNH
YlB8ebb6ulyVUS5at8MW8c5DeVb2IdQv8RN7R9F02VEWRxBM2Yka77HVZzqLMbwdpBSJVyNzRymr
rY2zKwusPUQpIrNlFYC+AgOHmmCxbfbkS3mMeR47t0UB/ImJHvBP64C4GV/rebiFCKKw66h78mRA
twSKwOJ5DtvXng4ru62tlPsgx+63Hpep/Sgwm2cXH55jxNZzfL4BkkeYHPqNMVBkCSFkaL8isbq4
gPmml0AnmldsPYNbvYiZBwEcZhcHqV9my/bSWVDZRwOaUSyxWs4WT17k15keClcZmCRTHua8lmGn
2QTR9CDbEtabDUaSqLHoMsfUkgoo+WRNL5+oc6ruJqnc3qeeHIwRldNjBXb81aCHeE7ZOnKrAMs6
JeRZtn00qXIQwho5Cek+J21Y5/NJg547drRQwStaLreWg+M2irB7ZM96em4kDSiX65bsDjAeRv5o
FxDt+JEqUVmzWUkwhD23rthXls0wLRvTAWgnjGhV9/nQ77ZKLpUhxNTYmEs0LU5pyNKTK4gEBjmb
9N1DA/zhpmPNutz0JdZ8A3Kq4OOD8HnUtBma8M99fXOpaxLfHF56VUxFwtozfS10vYc5jhl671zD
7ayTOErIrC4EGB9xmPbVK4aBW2dorrdAXwN9HGExwJCikPu4zMx2BF6OmU6k0DCGuXWjXLKi/EVj
6cPYlD+EGSr704CPt/NnWBRijjBqDzLaXMIuYrvNkgIm5DYRWWXWs7XbOlC0e4/aBlcNzNn0PcyX
4YiA8UmHlRVVDBsOKvQU8Za+RmkqgcwZPIy2xgiWNNzOsBncC9Wt/D6Q7a0RwFsKrlhP44ixuK1z
mJ9QT93w2LqzFJocrO+tRA9G1NdLW18R7N2qIuLuRoalfTwrYMvjTOZvf1qorhBgHHuB6tK+cef9
dXFgBIUnqTt7x2b1/kkgi/oLavuDDKZdJdrSSqWeatz5zOa38EHAy7BPzYDYYAG9ToDs9yrhv/O7
3kQ7Br4WHLD71bdOeCgF5OhVRnJYTWDmzWCG7gXK2MxhmlH95s9gPrsaPvrPul6GduWhpZUdPp5a
N2PcoxThd/6pJrx1sN5EJYGnk8OhrdsLw7AUhgfdNssRF36WqOtJGUDH9I5Ym54LYx6gDjjipiH8
GzRgILEkwqnjJHqA2qGeuYfFwfO6a5Akth82c+RUwlZrZ3o+eHt3GRvZAiS7xPiwOw86I0WqASsq
XiiYHiqYL1AeAa34CTufxHKX61vUwIDHiR6bOM3owxV4MCXdhM3g4ej8RRs8OBlsR80VvrMlLdIW
3nfBgJXJ9tz3tXReEIKAYteXjtHylLwGevfXA59j+VMvt9vZhtKaythJKk0XnCFuwFefscdLJhtP
bPZkJd16JMolO2PGpChw674Q4QXqsDnzm1y69MuduvxWi3jyiRA8bz2Rekfxra/2W4JwSk0DR8G5
1ufr6Qkh+qQ+eX/1tputOYUtvzHJOnU2W80EImmTcBHa8Ks0LPYlTOC2O26o/yjs73qiyR9/5CVW
XM/LPJbgFFUB2QUiZatq6RUsJPVB5u/7FOIks+HhaliIPWSZjUXjJHB95C69U3DRp+zJV0BYbJ6c
0rAG7KffB6kosW/K7bCcmBVAk5Idtk/1h43h6CuSu79cUNTZF0DuYPSAWRkOmWt5dafuPqxwDdID
RuVRdZmcuCvciMlEXSW9slV1Zw74/rPCSF6O7uortgAJ6Z9oEndrNKLXksPMs2qqHhRNHxkALyhe
vC1GZr4b5lrqCYyO1pZaqcTrHXgVAbwcNiN267qsV2z2OZzsV0ldheJh1nrowBRr7/96fxetRvBU
azySmk2Qje1JEeHWbRD1YL9j1FPzFiaf2id9EunZ+pATGy59ZKNdIGLwez5wwnOGjbJ86Gy6lTG0
3vHtT723vJnOcCPGE3a//WnZVbMDY+7IYeWtGtkCWMtDn/VP9OaAoDMuSBLAKamAtdi26+UdVCv0
nxHGB0/P63arZ8p+P8aAjKfdFgxTIvYSSooQqy0NdRbWqgDezcajWno5RLO5X0W407WeOob7yGbX
OzcSnYH01e8SzHSeEyjPUkidgdcH5sqjBEs+7mgx4339HtpVgKfH1FBNSEyXP1qAg+vmiqj9XHYZ
M7o0gWP4qAhI1MWdw9HUYErUiXqqvrB5TIpv/QoT1qT7yBbnagRAfUgmthscg4UF6igp/aTR42ab
DvOYezYYNchh/bW/6qPJm7lUJCeGkjXjs7EWmAIN1/Cxsk+ygcwukCCIA4aNoHtHdMCnAOyPTwfj
G8uHpRh8Afh+XSGiOfXPn/HAj5CGBKa2jKJE4aExFh+qtJH4/X1eCEP9HpE9pYrO25u4grz+vmHn
5o4ZScclgGVVMNTZgLqtsWYljPJUo97xibOlPro3aXEDhUbcbR+tGZgVyNXeh6r5rEbjKh17OJ+P
GrX7gg0lW4sc7PjMoLb9GdksWoIEHTUryV5528OuhwUH6xzX1ASdWn/rPQbVkLwRj/kFLP6wjlB7
4hrJqDfA1m1OhmQJwZlG5XrK/ughamObmvrwZrPjDC0wN35GcQsUIDiLpsHwfD5SDfcO4B2nbqF0
0mqql+sSMRSebHghLwsHXVC79OfHp1ueYPu+J1EHl9oCxFsHqj9Tk9EPM3t4etAGH+73rT7LzeqA
0UjP3/mANT2MPJKm4XbCasRb7uI3fgjuZ/FEHfG0uuNmjGYQcy6HXpL6yKbaPdrwlzd1IWuHpZxp
DrFcINJ882XHrLEBh0tLv/mnGlbFPoYgDcQc65L7za9bQ4GOESJqQU8DW+tc3sCoPj/Y3Yk70FWH
aw4DeTeTfWVe69mSqwamD67E5tfv7OxeKCC+kopsK/uZLbOcIVDySUd21/eoz3TQJCg81itWlxxF
8x4VIQzoWiNxF16GOS9bAukzS4i8v1tgvVw3CkRvTsAq92n1dc3nVN5fNx15g04dhL0g3eDY2io1
Y/kzrKNRxrK6hCV2HU3Jfs8D4oGcsKY9Fn1BtL8BdFYAdeAxc8k3/8GnlgmoV/rRnRtzEX71RK83
q/35j1jKK/tNfdS/wHxvFQu8zWYlUXDW9bksHg7sbo6N3SRt2PrJsARffL9DrDcrdyBp38DPfCoQ
f38r2Swmai5/x1Nlu22jKaDFC7KovRDh5FTunBp3WwqCUKEqunLR3KVlKC+39YX4aUjBKD+NEX7X
P1r22UOfYzbeoDffTtS5uV7GErZW0h+9KQJDX0JBhkDfLQo1N09nmL/5Bhiu5VPjmqjD7tUYLSik
EpA6ZsqwO2VVDlKiT+h1KIaoS4ugBd/5or/1O3qBGcD0yV/oY5tifW0rGoKNadyRYFuBy0javwD4
NMo3f94zFtZH/ud/aELW/pvXRgluwfP0x68vh2tg/PG/1tpHOvXUuP/lETKpQslI5AkatGqgI5k/
3cGiawuE9VPSyfztZ+xZ1go8v8cYeyipavbzb/GjD6minXuXFLrbg7g01m8+7Fxat9oN4g+j1AIX
nH3rrwVj66gYJY9b1L8uLw1WtpqSLd6fM4YsLtj7fJNjJMIzWPL3ywMavozUupb20AfSoYILtDn6
84vELv0S5kpIkbjrpuwPfwmGjYQku8uBcFtvEiSkfRIQHludJa8PhEuyAei1trU708ER4SSHB3zU
W06fV9Ks8BxeD1SP0rSe57UP4VooPFbf1gHM6LUUcvp4GGT51gfbA1mE9f1YYLuRlWi9kgGB4C5O
9GAvtvu5yjoPz5pYY4zugst2+tOBumSH1LxHyJ0jyo9wauua7O3XNVtMO2tg7+V3IontAcxD/Jqh
ubwIvbc6ijrTqCXotpJIFc5FbHYvcw4vnNFj5z5u6mWWIw8qkt/Qg3cLszXnPR7IhVVS1XluMxI/
xBBAKeapW5ApY5301mBNDlck2h8PrMn7JIJFbs7UiJ8kG03B9cDxWdzx8aPVbOVUmUBouguxfbPP
mKjWNxhGYEWQj0/ZrJOa/62fnz7U6zPUUjgEbfdnfuf8VDgwy94HrMe+566L/+bhKPgIl7zI6U8h
mhrw3oHDL78z9s3nIqGvHDvGEjG25ftY+vYDrGsvvp6PKioBMFaMxML29UXfXRTw43nuLaBgvb6R
Bb/8B9FI74f5+RbKXx6jaJtVQ0f3Q7k/12cNB3nc6lRO1QoWAxeixi1BtPiNGUL+teGpbQvD0Pey
IoLp824JTO59NpNJ6SFVFEjawWh1VlbtDF/K3qUG94CMvqdRA6p7fP78l7twyyPYJ3ZlYr+ePjW5
vi0L1iVnozJc02hOHa2Rv36ILOUprpcj/tjw+rY+2Dm/d4xOJ92Tf/xJfTAB0N96JpxrUttQ02ht
xgDJ19gZ8c//rBN7QMDrJkedOVSzMWzPMdyOyoT2xn2pxy9fg21cNtP+vHPcHnGuBiEkFlW2mDHm
OfsZdpYAsMKLhT5eT08OjALpCfzsNLaquO1hcZgcwhwJuIvbXA1gb5wDko4XO1vEArbAvXAyteFh
cNnpLGvQng2dPkprqic+WlJZGe0r2fNmw358FBRJxMii6Zcf38x/+QXndfvSf/4QvJ6XHmvRvhtY
4D0k6W7A5Ntv+2H83oe2m2Y0g9D55rVAg/0oQiKxswB6Im0SiWxMheL9Z2LdMPYV+OWH42mjusTM
ux7MZltgMy0k1ke58gLf+aFq7nhgTjiQ/3nf2mVZh+HKGgIDeTtj7CQkI6qSWFAUP5i8u+d9oD1M
uP03X9JDFdpRb5XXGG4dZ0fN4NG7bMcXJehasqBebD9gXiW1lW9pvKO6ewrAt7+uwB1Ch2rlNAHW
8bsGfip0Ii/8Zjq7bo4N+PJq+s1/YHs+vnK4+YjoD+9dvFeWAF0cAT7l12Egx3qL4ErFnhpxiIf9
GV9FuM5Cg60mr4cF7oIS6s3WJFs327G1GVMEI3nOqbbfpdGojXsR7urHEYlfXracuM4B37z8zW9P
0FOtWIF9D05fvnZnzO65HIIXDfHXr331UirBTn1H1OCEsP7pgXQqh4rI85Vn6+/9LE5ikr3/rIcv
b7Z+/AEf4aq7c+ebKQDGjIkgVLk+7697B2qCpJGBuqCeOHbWwEswEI6mZpuRn14GhG/JJtKd+udX
4I9H+8dmZfP97At/+MzmxuAw4c/Og/sxAdTyzoQxJeBz8PE39pdv7/W3U3UpLDUlxt4NfMD4q9d3
lxT4y1N0iryBQLe95tRNPu9h0TYOAvBS6/T40XS2rHYfSycGKbVhcmRbAroQLgN5YG+gA+sfFkhg
2PYJEoT9AkZwfcdgE79V6pq+r/M6fwqg3qEL9peDEs1uqCtw95JK8tRufs1AUNnAOJxe1Do5mr47
inog/fR9zmPb/fILBRKXn7G3T/WMbu61DS89uyMAETeQjU57sG4yRIZZoe46n68KtNj1g621Z+54
VK0S2l6c4JsalD+9QeCb12mmcChbrdsg/vIZxpxfRayMCg++L0WLoDFCl11lXQDvs5lQnz/J7Mej
gZvUzc+/RuyRqSncZ1eFfv25y3/rWT6cT3eMkiCrd+mIE7BjoMX6+/OqWXWTX/CrF4QT4ZbNh+3m
9ocXWf1QZXMfbC3ocXBPvTPl2Ki3XAo43q7oob/OP55syGopqPSX10lTAQPoUS1SWynf7penoD9+
6dSdrXoXk1SERg71n37UfSY4DfzuT2C9I162Oz1CBxo599O/dlhF5hB4E5cEF8MCAG2WbIR5sb38
/HJEj2pbwN4r7ljV9B2YuQeCkmJtEsRzjxysxZ4UkJ6klSoXdacT64RmcNw8O3pQjIFR7TlDuM2s
FStqumEL3KUV7EygIMsbXwPtD3tJEh6BjP0xnPSfvsBbAkpSfCIuY0HoJ0AXLh2av3x6Fpw1he5B
1bC2zPMwvgXqwaOPY4pbfxvN8f4kyE8zeFFzIyqR8Ov/H1+2qdL2ZOgVWw3+jDfubyXaYWfiwIdc
P9SrpHR4PbeqAdqoGagenHWXLP7E//whPpyTKWNfvyh/88mf/YWV2h4C1s4LsDMgqtP4YSXQo/mZ
BhMph13Udx7cbZMNgS/ryabuo4jwiFyMcXRTsuWbp6BUPg/kQ1EdLQFNGrgxrTs2pE8E+tsaSvAy
te6Pp4AJLoMB9XiDsdtMhb5uu30KN9GxR2y6JMP84rcB1IVzh7oqx9my9ZAnhfvXif78ycKUjQPS
zb7+7S8w8kQ3AkVJawjvbIp6XuUlgbzURmQP19plV9aMsBssHZtA93Xy88O/vK+ViaiP0r0U5K8f
psahcKNZcKQbbFwUY6x2oT6/KBbAb/9HxyWuZ+mZVjByKo/65oV3R/zwNPjjZUyITsMo908e3LVD
S/g4pDWTkOj94Y+XdHOpZ16wbLgRhSOST/CarSMnCWDevp/426/An/xvc+1Av+uVzXIj2dI+Fy5Y
iY7IpZddj8D+qhK0+er7aqR7CAso+kRsmr6epQu7yTW2Ttj/wCaaP5z1gostEqpc0hHQIuo18OXZ
ZJGO/cDsXsjB8T4w0hqqGHV0k3I//4rV6D5n9P1CEjjs9SO27mrGGA6iFGZw80Q722U1WY6cLe2U
LsV2RdRhXvkcwfAyZjTcbnBGfzx4t26m335a9NvvBV8egY+k58A01GYKv/yY+rMj6N3LA9ZPn7AD
d80wbMWbAr/9AvuqoLBp3r1n+M1zROhj7+tPrQpu1BNP3nZaRUtyaEZ4xK+G/Pj1ck2mAv78uTek
gt5utrMo/fTLFHiSrVn/GeUfn4y/ej0XVdjCe5xLGD0kTWeKb8ywAsz4+jPObVM8a/CsSTVG/srp
0zBWJfyUq0Qkv/m4q+ynrz/1AGrPYaszxC8Q5I8EzdrZ0dmLditYrhcfW5X/YqOPDhW8XVf05XOb
aC3sxYITjD2qLM1xYCu1R2guDUFSv9tnP14n9/dbivFxkw5ffbaBwCqLKi/j5tIHbsr9ZVOpWHck
oLPrPnZk8zsT6BNx0dgdzyOA1tum+uodwWrdakn8+3cq4D//9ddf/+t3wqBp78X7ezBgKpbp3/99
VODft/vt3zwv/JsKf04ikPFWFn//81+HEP7uhrbppv89ta/iM/79z1/bP6cN/p7a6fb+fy7/6/td
//mv/wMAAP//AwDOXgQl4SAAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 7c09bf823fb50b70-AMS
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 01 May 2023 17:29:41 GMT
Server:
- cloudflare
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
openai-organization:
- user-kd1j0bcill5flig1m29wdaof
openai-processing-ms:
- '69'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '3000'
x-ratelimit-remaining-requests:
- '2999'
x-ratelimit-reset-requests:
- 20ms
x-request-id:
- 555d4ffdb6ceac9f62f60bb64d87170d
status:
code: 200
message: OK
version: 1

View File

@@ -1,997 +0,0 @@
interactions:
- request:
body: '{"input": ["Sample text 1"], "model": "text-embedding-ada-002", "encoding_format":
"base64"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '92'
Content-Type:
- application/json
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1R6SxOyOrfm/PsVu/aU/kpFJIs9AwG5miB4o6urSwQREJFLAuTU+e9d+p463T1x
ALGiycpzW/mPf/31199NUmb34e9//vr7VfTD3//j+yy9Dbe///nrf/7rr7/++us/fp//38isTrI0
Ld75b/jvZfFOs+nvf/5a/veT/zvon7/+NuPiQ5nS1Anz5acM4uJ1ZNpa6bouyZMjLJdQMZs0H071
gcrA3keVHUtt4GxxmpbKpd8Q5urcTppOaEXZRcct8U/hJ+LRfchQhzZv4iqmnoz4sN3CaV17ePPq
P15npqcc9Pf9yLCTWmi8r4mBLoLgMxWNqKR6tKTwcM83rLRb8OZ4jLcgX4URN/ux8KgSvxtUsFJl
ji1XiBfJs1dO/JwwcyO5+oS8tQGQGluyS6ibDOB1N3AIDfAQNarXy5f4iE5iTpgpd5bOk8GtoPH0
HsOptJOxux9cqNitYI6nd2WR7zcusGJ7JGpq5px/KtyjfIMRUzeG7bHJkVto/O1AVxpqk1m0BRFN
SbH/vdeHi6MBXGZXI/jlC5wv2PWIOOlvTDvOQTJpbiSjOiUWBn67JNy/Xn3wyyUi2HaSjkVXVwX5
yBFTYytLhsxY9LBYXp/EROPEh9rYuqjFqkWiNiiSWeR2C/JN7Yl/Pr8iflyNAbCk7Yg1r4/JpO0M
G8xTsGGOfa35tHx1Mypq4cX2/uB59LA73SC1IoMR65B3w1IPd0qRVwqx/fuKT055zWVf2nV487pf
+fRQ7Roqeq6JqepFN54Wnx088MckesuJ3t+PqyvE75tLZ3o8JPMhMlr4iF2GZWU36/w45bmSc/HI
dLy0+GQ3VoMEB9XM552YTNkhCiFU45KiTaXpq9qOJbg894jo7eGgz6zoCpgyqhHttEbelK2XPRQv
pWLO+/XoRr69jhBErUy2w1R11K1iEbQNbYlNLi3nPpF6KNr9kaloj/mYvhoK8Ts1iZvG124+NMxA
S6FGeNwYjc6mdyOB3jIdo4Sq5ci3wQzWLgqZKgPhc56QEbFzLBNVjktv1q39HQQtCEhc58vvfCOF
o9V/8GrLRG/EdZWBtspkorfhAk3Ds/VBavSQOc6ClJOmNT3o7+xIdPKu0LiT9mcYqLRlD+uglnxx
WI0gLsUdMbfUSqbMWFMQUfDBrWaXXcv2xxEuihQwa65PUecf7zbo/RIRuz28k2lJ9XAhn0WJqcl0
jJpTezuDoF08st9Pli6K2b4CRweN2K3aJDRcbRuI5lRiu+tcd62elwKasl5j3/pG7RvWNdSZb7Mt
fY1Rfw9e7YabqoXFSyZ7rHhMdzADATOrZ/eo74bABr2bF7jE4Qf1lXWtwdkqKv2eRj7nPZ5huoQp
3by8mI+ysLuDYFaIHA2ZdsxE3oyGz+fAtFVYc25eZwOma3JkhlUZ3YgRwihXypoZxUg5m86rCriG
MDG37Oy1b+loo/ipFsQx4lXZiuvgKhedfyJeLIlRf0rFUZaKpiPGM4m42BQTVRabycYoyYJo5NOB
/v4PcR8z6bgvhi749InpSp/LaKoX8xHBZT4Tc/Pu9LES4wJC9dwQuzvPel9ZCwEtlrcn80f/xbn0
DnuwrBMmuNq9kikw1ZtS9ZgSo3rb+ojjZwHdOtdYGNmWN873cwPB4aUwL/G3nZhnkg2N2ZlULoYK
fUBUJZCjzsALYSGgQWu2LgzDAqhQzxKnsRbsAMKsJbozyeWc7ycXILUV4i7GRufF9ixDcmcYr4Ps
HU1ZPM8/fGV7Zn6i+Q2LCqzj7sFIwaSOfZS4gGY3AXMVKU9md3J24EK5ZNvDAXs0h3knT+mg4sXz
mnv9/Vk1Pzwi1iy+SiqkwRZEFH6IK9wT1M+fvQyOc26Yux2R1/zqoz6PFcPv6ovfnlEp/ttsmRXd
PL3vXv4ZhdbKomsehwmT4HMHTRFuzDEQ72YRoSUSsHOmrMjijn+UAW+++8H2YWR6IjvlNTQ7puFl
zVSPhviDoajhxbCzeKFRXtUAoowHLO1x5VF309TQaOcb28X6q+PJZ1dAEloiXqFHyHlSdTtUPJsN
nkRJRZ+l9RTBneaantTjs5ycMijAIujD9M+iTJh0kEK4CODjObotEY2VRJCDu3tjhhWZnRhebCo7
6rzHyzL0EV+wdY9++OBPgcbnHPtLcDe+S2JPtEs+mmK1CaP8Tqx+LyQ0dz5XFHf9iuz7riypgoYW
tNHBxLs5p6TvPmxEl0+bM3OzufJZSbsePbaSSHR3JZaTrYU5DP3KJMbr9vL4wjmHSBNzkcXVxtep
aIsimPfpitf0dI64eSyO8JkuM3OzRdjNwlHK5STOHsRVmlGflVrH8P0+Mcpli9o8vNfyZ/Uu6Glj
7vUex88c9I4azL8kozeHF7VHx70VEPyObH391SOQC++CrjYXmXfSPi9Q8hAt4s9rt6wF2wkgCWBB
jAKPnPvHowvxKwjJtbHVkhejlAHs9xIhz2uuz65wodDo95Bg761EQ/2UrsCSpvutVzc4t0kF8+hu
KRLte0lF1Li/98xUX3k5yo9RBH+tMlz0fhxN2UnsYbrdDySklfadb8zgNOHyWz+G/hnmjw9SZ3vM
/uy9iPu6k/3wll0/+y5hIz7vIK4OB7oOG+AzO1sCTJHVEFzPV9RF4qWBKr8/sBifomjOHcVA2ux7
zLxXbzSzQ5WDX6xlvDAdu0uHNMxA3NgTO4ufUzmzMwGkNy+PuYqc8p7vmkJ2hWpippycEfc/rQSO
fXl/+dLlfKEKM2LpOqPyFqceP8qqCrgPJOKPiwKNcpSGsHKDgKS+N3j88yhDkLPgTgda3fQ5ViIB
vvqRuIVX82bHXiKS6FXGY2xl0TQcZUCwJxJVosvTew16Hij44y2wEm5rfVhelwVoCtyY8+odfT7k
VqX85ntYIYt6y5Yo4qaj0GpHnG64dEmLHt7jwH71Ml26qIVsvX8y7Xsee2s9NXApA4lop1OqcylD
GIYqaJlh6auE6hdpRkt46My7lkn0Pd85uOtdSHaryuXf+rlDa2Qdc9y573pLrSSAm+BSCWuat/6U
fAuhvdHxeNcq3ortx4bUfXLm3WCORouFLqTODegkmjqfbHzK4LHPMUZXLY34cbiOkDyWFvEvt2fU
p8dnj4qWHOnzIhgRT+5e/dOfzBNbuexlPC3Rg7S7P/px/vIVGtomJLYvRGh2XX6EBVogpjdtk/Ai
cjGqgvLG7G6ToSlQxxbqu+cxvbHdZLIVl8KXf+hnV9yT8b6oa3C0jUGMZx+iL56KKCRhR6xg30R8
Go8GxHXsMFdXu4QVl1gAwZ4ZXj77GbWh0rpwNKuJbJmdelOw5zUAOY3Ev7gHfdaHCIOZtE/K2zmP
ZjHdSCh+XfZUXjw+Ho21coYpkFdU1lqtY8U2k+WhOr2Y4wg5n4Y4u8n652Ox3fmWebzFQQWPrSxi
MTaW0TTQl/tbH5qjMSmnpR4aKN/4CE9rF5fjPRgaOUkpwYuyWHX9V8+idN/0xLDEORq0amfAdPaf
uIvX725ava4GXMpQYubm7ekzy9gd4vqI2fc8cB4NfQb6p/bp0vSGkiqL1Q0sO3EpHKebzhedXYPg
CSXTRrLpxmpOM5SNqw2d2XUfTTVqayQf7gfmCkrnzfn52sIHKS5RkZ973/WRwXqIlHm32zGZw1AC
tMK5yqzzKY/4Fw9BTrwSr0efdLxorrAR5x4T936CbsS24KPlYpiJTTYNnxkvCtTsjQ9JVscAfSCX
baS3rU1s0jh8GFqnRzGVPsyTJsNbT4p+Br6VCEZJ+uxoeLH7P/rKseprMv3wrd3VJd7U2NLHzrvZ
qGAv9PVbTVL9+Lwxcky0dbLv5t/5CPcuZ+5itD2qy5kN3So1CBGE0euKle0jTR5rYoVm4M1CrAZQ
ZxeBCk76RqMFzg2ksn8zxzJuyTQsrnfw11tGrOnZltyY1wDhdWkz/3LTIl5MlyX68gtumhuN6rzf
zUhvioC+pNUdfWBY7aAo5wlPK81Cs6h6LdTH4Y37+Yyi/ucXpvh2wtxWKPrjh8w7v9I5SLcJX3Rq
pfz8sB95C8SK3LTBfzKdblmLvO6zM0awrmfCrKhrvTnWAkMxb9WLOK9hjYbM5uMPr4n79UujZY89
nKYoY9/zo3dmqi3htHyEXz6Cbqqf0g26Tb9kN2UX6lPNPQNJHzViu1tMkqkufEOuj23P9HaZ8unB
4xDkxCm//m1bjph5gLhznIntK6beV9cyRNVV27D9F48nW9n2imWSMxWTtIumi2nK6Ov3qKwhtRsr
Lz9Dks0eRdeiL/lU9iIMdZoyN0ViR396MTV3B/xcJ/uS366kQLg1TKZx4ZbMYvt0Qb7bFdnj18w/
l+VuifTX8Y7X3DK6b/3RP/ip3pdPNNS16oIZEWCm1t08LpmOij6Kvvvhi859/WTDgsc5IxB10aRt
NFvh6jpkHhrenEvn9xXgivZYweIBzYecVJBLhsFUGT31mdUoAItsPsw/Ld/JzBjN5K9/pfzDuN7f
bWwjcVHEVIzrORpq/9lA8WRLvFhMe33EgGXQm7f31VerpNV3mxmmSB2Iq7y1RHybDxfVp76hMJ/8
jhfbTNqw0ghYXI8vb7qcNhiEvfogptbJ+lCncNw0ZuV++StH7Q9PvviDV9u9pLPPUt6Cu/b2+LWl
74j+6hUX7E5X8s3Wx1k2JJQkXciscCdwKp5uN8iVZ822Ufvy5jgU7jA8DZGosl15Q5ZlGPT6dqUK
1fbJKts/MEot32HYMxdRYxWtBJemrohhXJ6o5/mth6PFK6p88kyfLmqbweUzJGy3vofdeF9bOyQY
mYFlRVZ4g+NPAUNurhiuA5lPMG1scMjkUSYoWiTqVbNF4vg5sH1YfX9P3apScFOLn5/Ue9ktDBjy
rUynMZc6vnDcClhoERxszic0DcdZAMHsRIwkUutD0D2XcHkXDdutIo83cp/vkHlJe6YTs+bcX5hn
eSG1B+aJ1sC56We5nM+rCEs4zL3RAu0KwX1/+sNvLTtbAHVYLJmaXnA3u82zBr3EFV3W2dObanVn
IMfCI7M/49Wb2QlCJD0rRrYDqcvZXV1FENfbDSOCEHh8VC4qsDo0CDG7uuMmtZeoTl2fOdWO6WPq
uz7EpXv74mPjzQzSHvyXwIlRPAjinz7MlWFQgDQf5iTc/GwM9OUbslu94nJaXvMlXMoLx3BRsE71
j35HjRafiJpeaMeKywGAb2VCBe+Yo/qw3TfQLRObDl89N7vKeFa6zcSYtipNtMQGvcvizB3K1Mst
6Xe3yw6q++FDVNmZOTNF2P74A8fe61NOy7tU/fI45hbeDo3dVbv+yau0i2AkortbBOsvfuKFIX3K
H38r4lidibo5WfqErvUZjo74pCv0mBErdqEAHSjlHz081CrewXLBZppGu8LrO+/mAt/ZAtE/fh19
/awE7fbMCbFKnFB9NfdKMZz3+Hnxz96k7S5LOI32m2lHvtAnyGcX8rUjkN95o2ES3OHhBBlLadkl
s1hoLcix1jFTPxy9wTl/QlnqLwpd05MY9fj0yoBVrs+sAK91JmFLQl8/h8XlNPAJdE+GKoufRG+f
WjIEN6GAfCX4THcIdHxUHqqsTTbB0rd+ukU9ir88gQqO3yDu66krL3iS0wAxn8/NW8sgicsbvkTK
mvP2PBqyHFxPX30Rd3zC26uymPyBmAhHnEvqJkSP/duh4s3Syok46xyS2+v81Sv7jpuv1Rb0blzQ
7nZbRuyjKVuEh5gx/BqwPtWUnwF/LIP5p+qiDwM93JHU7XdY8IygG090NUMSSAaxmyT1xnTuvvnk
tmc7cWcj/vMvjEkJOyzHhlMlWN7l1szfxPb7nT7+8peYwYuYGU0jtuCNIbc2TfDGfQ5lvzusK0ju
TcT8izvp7CjbW/TNa+gru7NuGixJRa0veMyKHN+bLoaMQc7CO9vFYlEO2anbgrMj7JsvbX75ZgGO
urb/jBf1ive//ITOdJV3PLmgJZKeNSPGNx/48cWmqihh/sRxt4F7e0PDsNHwdNrmHdXzEqDOsM3M
dPfi07CoXCQY+YJsWZvocy61ZyRsE4uZmbDvZn0qBDiBMNPkehjKnguqoCzE1xHTmvQ6Ty6NDR1C
b2Ju0yaZwya5gv7c9RR996e39ncXnTazgKXufdDFePOQZZGPLtstn0rHzIAGv/XBEj4q5Wug8R2G
5nWiDCo34v6RiWgKpBXTjqT85gdCBc6+upLIdg98VuJIAKnRQrbvHbOctJBfUdG6MXO+53tMXzmF
bvVcEH9aKsmIMwOgqtGWKrTsojHVix5S5wq4XeRrNNTrcInYrWZEO72GhE0SqiHIgpAY+U7iQx1f
ZXD8/M5cPYy98bffjmGumTatonL+5rmoPrI32++DNR9q/9Oi4LQaMJy2sTcNiyBTjsZr/OVv3rCc
614W1ANhxlOZ+DRYowoPy62/+PLWabzxfWD3qaCyZusd+1S7HjV7p6J5esGlqK9kCvl6azOiXM/e
mCLPRt/1JbrXRH/8FwSnecbfPAK9Lk7aoHZXlWy30pNuWraWjz4LVyDemooljc9y/sMH+pKijM86
iUP0WZANwy8v8L79hCXIZe3gEbGeM+mBpT9+qoWt+NW/doDM+FXhMW325RK34Etc22B6NNMkGqtZ
G8FKcp1Zh6JMVjUqakjVzZ14sbmJRvlyOKJU14/E9oclGl5ICKDRkhMum26tD/YZBFS9QWD2fhrR
aDn+7ZcvEuwZY0e/+TZydEFjpj7rSf37P/izdZh1KEN9WOrDiHA1xkxvkJWMeN3doXjJH+bPxlBO
mtJXKE5RQyd+h4jqpBdkM10fib1/6N6PL5D8OF8xzMa+W5tUFSG4SC3NbJA6tnC29Z985scPc+xm
AnzzXCwvcOuNaRCDbEbGjtyE3vcmzU1kYHHB2TZ4LPjnh1/C3rzRBWz9aKyCxx1+ea13PezLOW5M
H5IH2jIi3GdvGmydwkli2z/1w4tKw8CYnBAC2z6Z6rar5aHEFIvrvuBULLQGzPN5Rb55hcd/+/fr
T7h3HSMq6FOh1OGLMVc9r6I+FfkVllxqWOath26Ol5ABy4UrM+9bB/W4IxLgonli3oYPzvwjWyLz
iJWff+d9mmq+chr3BV5889hpeYUCkmy1I/Zn30UDyNVRyvmGEFXiH692ib5TOOEn5t3iVyfqpAf4
5d3qvSzQF5+O6Mv3xJS0yXtdiqUL4WX2f3lqOSyvjYi++gA3jYK8lqmxiE5SazHtOD3LIXi/RoBj
l/7R780JvVTQ6zQiqhRuEHUV6Qj5xlXpOjwzj+qkAri83yXTRLPkc6M2qtytuzUVvnzDpTO7gT81
NsPuXHDmz+cd+GPlU+rpT8QWEAVKt7o5dJQKV+dRrmD07Rcx4wW6zj8l2qJsc4kpXIaUT867Cf/k
Yf7Xz021/2lkRx333/oOuvaX/6ZbiNnRQGnSjTjbQeOrA62+/DjreRDAdDQ+jCywGolhpbcQXkYf
D8Ne69hxNYagjbsIr9Jr1PHjSgoguNs3jMRW7vj0cG4/vfjD53IY7McV4ue2IPs9cTzacKlHSaii
7/55SS/vbkeIO7oiKsJpyRdvo4DHzmiJxdEYcfOlqBBcFi/m1GTFe5mYBRQdPpFfPv315xRVzzwh
JF+PnBWXVwOCI3aUN6399cvB/ecfifnNU2elpc2PH/H0zccG+BxakLPThRGl2SWzfhlnpXGkFUv3
01sfZ+/RIDlIAyp/83++cLIAQt+byZ5d1t4vfwdrP03MMHZbPmTFIADfLlyyjZwVGpaWUmxaHzxy
4ShIeOL51fq0KjFthXvCp0tpiZBi+u2fnMqIRVsewLcfTB+WNib9LPsydKN3xcu8OaM5D+8Vqh/H
BTl885XhmwcoR3udY3m7p8msxKxFzd6rqODyqZxscwXQLbY5Xk/zp5sCpxTh+/6Pv2kq+Rmib/+T
bXt/k8xMMoVfv5D96bf+8tpv/kqMcnsv52+/GnD+qsg26jgfU6S7v7wQr7n8LLnvNQJ895fOga8n
4q+efDgmTP/4u2QthZ4AD02U8Rw8HnwIVKkF6yC8mauedt3MHHOLqt6n/6Vflq2FIYi1J9O//WD+
6WuMgsTI2DagWjJibm/h79+tgP/8119//a/fDYO6SbPX92LAkE3Dv//7qsC/b+nt38ul+G8m/rmJ
QPtbnv39z39dQvj70zX1Z/jfQ1Nl7/7vf/6S/tw2+Htohtvr/3n8r+9c//mv/wMAAP//AwCkxTP2
4SAAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 7be211a83807169e-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 26 Apr 2023 21:55:16 GMT
Server:
- cloudflare
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
openai-organization:
- user-adtx4fhfg1qsiyzdoaxciooj
openai-processing-ms:
- '311'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '3000'
x-ratelimit-remaining-requests:
- '2999'
x-ratelimit-reset-requests:
- 20ms
x-request-id:
- 665023d2f6ce38148bbb6db93c66bb82
status:
code: 200
message: OK
- request:
body: '{"input": ["Sample text 2"], "model": "text-embedding-ada-002", "encoding_format":
"base64"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '92'
Content-Type:
- application/json
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1R6W9O6vJbn/f4Ub723zi4BkYR9x0nOJCiKOjU1BYgICMghAdLV331K/7t6um+e
Cx6EWmHld1r5j3/89dffXVrl2fT3v/76+12O09//63vtkUzJ3//663//46+//vrrP35//8edeZPm
j0fZFr/bf/8s20e+/P2vv7j/uvL/b/rXX3+PA19Q5yVYKZuW7ix1zX5Dre0xABNeX2e5CviZms81
0sd7il0Y1XxL8bzt9YkEuIBShzOyRHOYzvviHcJ7sOcQ3y4LY7wDOJB7rY9VVxe80ffjEppne8WO
dMf6qAmbEKjHViZygrSUlXZ5hkrRC1Q1PAKIs0ENOOJnhbbCbklZthVjyDYSILtj1nsLFTY3sL0J
B4qj+w5Mwno0ZPNq6jRQYBPR7XlW5Jfmd9jdEDNdYL3fwPQcytRA5MCWtxfYQDIuBtWG5KbPqXAZ
oXZmL1S/RSdlt4tygm53wNSQhjKaHq3fQLFe3vhg3a/6sr5LDv7eb0GlTKduVVYJ3bwzql1cgrVQ
+wSeQs2mRhJZw+dsKTc44scbq3tzieZtznzoDlpBHUfGw8yu7Ax68/lAgiK+PCadphkYu7JGrNF4
j5TMVaD9sCV6YXFUdYEQ3KA2Nhh774dXkSZWbzB6XB3sBTUDK6lEAX4cN8XeoxMrdpy8G0hl2mAD
33iwuscxgzgSUsLhG8+WxAhCCdHPQg2nQMPaPlYXdst1okqf0GEq2SrI19exxepbGyuWCkIJHHQ7
YV/lzYhNSxHDodEgPtz8MBpVSWqgfEkQqQ7eNVpfwzGE8SoX1DRVzhsDVSqlOn3qRAyrXp97pPhy
kSU6NQYVRuQ1HE+gUDcX6itXGbBHFyMoZ+KAxGyn6Fx492eYXbMXtt7bI1vTSY/h3EUFPvgtGdYy
pSI4xNUTFQJ5D+vrWfvw4oEKuw7ZDlTcXV3I53mCA/RRKq4vzhDeNuKeBs/rJVpdEMXQ/CgYu6dT
AxYZdSLEzm2kVvyWdeJadgbFpLewtjmI1SRA3oSH7jlQ97HrqklY7wYIlFuNbfaA3vjxtzZEFGo4
/X6PhdSfXuJ2/UKWJp3B8pT6BAofN8NurPHDLKJbAXfzPqAu6V/VXMHsBu+FfcLGtz9X93Es5f33
kfFD8hgJ4g+EbuzoWLG5x8DOSbPC6LRP0ZzzExg5nrehv8mvBJrnTzTVenaDXsEfsGXcn2Cmk462
2+3lQR1e8nVKr8kG9jO6YTUzzIpHvVhDfeRK7HAvC8zZ+VrD8pL41M52irfwn+cJMCoqBHwgqvpY
S0/wvVYKWd6aX7F8CRVY12BCIjk73rfeG4wO3EidZitFJG+iGbw01KFV2N7BuufPBO5R8SRiK0rR
HO9DQcYhcEnz8K7VwqaohhoNML7f2EMnRFIlKeFPGkU78xUx5TiW+0UxMXWybvYovZ4ghCfUU1z4
R31h+L5Kd2VnYmMwi2rdvRkC2/VEEUBR53VVo8fg9mwnxBM7S+mNaQbQzssLBzfTYXxlhURmpmMi
iTl1Nas0KPYwXgTs99kJrEPfJbB1o5nsvZgxqjyODRwz4Y1xuCsqZu0/MdSXlOJDXTTReNQYgc0G
mVRxT43ODvBcQysTW+wIHyXlvTcw4X3CCsbH4zNtrRIjEGeZRfNSvkVD/u4h0D7dhv6+z64yiALG
g7ojaapC9u13+0995dNr0lm8cSX81oOazvxEbM3CHFqHwx4f9I8d8Xn6LAH/hiJGFOTRrKEwh6+5
pGjPvVq2dAnpgJCPE8WdWuvr8TrYcG1djxq1Uno0xE4DuSnQqHK+P9OZdyINXiTcU1WTr188NRpA
SvQhUbM46dxEzQjFEN2xtb+bwzIddQFeayT+9mtFL8aFwI3CDHqQy6PH1PvjBA/x60nt4DFE6/JQ
QzkWXws12FuvFlFyBHi75CG2LVMBs1h/aniiukaD81EAq35NOGgL7xqRYC6GZbjYNvykZksPaal4
fF91OfhQaGANL8dhed7GE7RmppC1nztvWcwrBPvd6YJmL917ZOuAGYAV5EQ2D/u0171pheGcf9CK
blM0tfZRgm3SqOR9NI8DcVjfAJyfHKqnflvN7hpCGPI6Tw+t9QIr4oYVTkJVIjZslYEFxcMGJEkl
InbZ6w8egq357tD0wBBMPz6S1NHA0eVcRGuViRC6EV5x8Ga113nGNIMzlDqyGgPzvv13BrHV9Oij
bD8V2Qx7CY7sssHGMIcDEx73HsRfoBI2i1Ot56zrYGhSgrjdZY2YHvcKnELFxd/1HpYWjivw4ntM
M3OzTynadhpsCt7CyPHv0ZzUNgTHj5/TpEbMmz9SH++cej9gm09aNitHjwAx9O9oSd962sWvzSiV
BwsgTksoG5eTzIGLFV+pfXUPA5O4QQIhr/LY9wfGVisQFWBQypO5fGfVFI/rBjrTPcBO5DVs1OpP
AUcyL0iGKIqYVYWh9MVXfGLY06f3K0f7qxIX2HatNJrbdWng0G0CbD5XpncX/IEw0NUaccH2ODD7
mUIQeTcZ7fg6qqaMHyXohPHzD5/OhxkRsJXmI1ZXZa0o9d4i/NaPT83xoK/XUXMhmrSFbMOdMsw/
fG6v3Uy1scMVpZWiyU+0ztSQl5dHw/eph4tYMcL5Vh+t2jRIYHHxiI1oJ4JOYI7462eqZOiUdtXc
nkF9E3Sqq9IwsCx2MnjX5yvW46muxvObKbBXxSONK7vSJ2jONjRmzSL8SEV9xA/VhIkkRAS8H96w
1PcpA473VDHeNe+K/N5/bKKAcLpj6BTitYCdkjTYfPXSMO0yW4ObS//CZvayBjZ0TQmlLsiIXPld
RL/6AWZMyLDyrDV97EdXk0zNb9CyHDfR1POyD1QMVWpzQzSwDng2fMrmDgGcpfqs3bgc1P2MsZ7D
OJqtvV3I4zU84/uniXX2VtcZ9BH3wP6zOaQrhucS5O+zRZ2bM+k0cA0Tvvn8SA9H/gDWbcvX8J6G
HFbF7BUtwactf/WQZYnLoX5fKw22ihsRiR8yne1HYMPEaQBWQGBX8/P+MOAeCg11UykDZJY3Z5iY
Q4+uqWOkwvZScMDbIAOjA5iGgeDeh57xcKnOobVi4f3dQN4vEQ2c9BORw/pKIHzhN9rY5Sdi+Z0X
Ydmeb9i/bR3wURa8AV6xOxC54/JqBgAIEHRPSl1zOui0uYQ1HOdx8+MrsNAxuQGdPkrEve5Nuvbe
pQBfvKCPr/5eP/o4S9v0eaBKEveA+LrvwvvtUlG3m2JvaWG9QtbeciQcTk/A5P0rgXO6f2OHVq+h
yAq/B189g5XLrmXzPbv58Bw55Ps8F6zdXRb+zTc3rR5IRQ8EHg+xifHySbwl1g0TQuGgYUN3an2U
NtQEL8FSyGcjnnQ29+IMC6zrhB+aGixi2RLI7/YGPpyBMdAksDpgEYFRM4Rvth6yMIYTE0saRHY1
MKu6hZLIGQsCvker+fDYQ8lu6w/VlzZm882WEqnPTx5hnl2yD85u4c9fkPotflLCULcBeph12CZn
RydfvgHf/YEqa13BUj4+M6zqIaHaJfe9z+vcwp8+J9wtVaL1DoYeDldNpZ4aymzEB7IBRkgA6fjE
YutbGxrYXUKd2sM0pbO0aU1on6hPhn74iu/5icCS5Q6RKooBy2I1hwR2V6wl7dNbPJ3bwNTZIaqO
9pCu55DV8JFcObIZ+5XNTt/5QFKJgZg48QPLAafsR+22I7A7S+k8Pl4j+OojspPFTzULmcjB42tU
8RGVd71/a1UNr6+oRRvzLVckNIIz2I1nBck0lbx5oywJjNAZYf+zauAPn/300iGaAm++h5UNK5jb
SPIvaTRbU5RJj6Q/oM0J68N6GuwNJCgOkawEqzfX+5cNY15cqHNctWqKHNDB08MQMDpbnsfO85aT
ZvWxw7/9tu6vibhvxzKgf9bjcn+4UHF4n6z6Ix0YBsMIBdZ42NkrPVuG4ejD0K9Dqu+euTfIE3cG
ennjcDB/jgM7r7YEskBZ0fvmhyl98UYOn+XHQRy+XQBr23cMj/f9ierZRWLL3OUmvKw0QJfw+kiX
5SGZ8Osv0XsD+HRh+DjLZrys2HlZkff++mcQ5mWGZPQpBtrHxxnOVnynNqdWw5qK801OdkDD6FjQ
dOqadAQWG49Iun2WiHXmHMLbxdvSb/9X0y0/dUCkOJp21/OSsr7gV0jXDNLk9jlG6+4NENiNKiVC
j1SwSJdbBjvutKFGFNjsq39cGO1MA2txoESrs0E12Fy6F/75HbqStw+uDS9Sa06LYdoqSi1/9x+1
PheVMVrZClCv+ZOwy2ZOWZRXBkRchREI+7ViZYTiHz9SfdrZ3sxfFENm44Z+++lRsZYlCeD0+U6t
YkmGxXbrAmruZcWe79GhwXObQ241CzK/fTHq/RFocCBlTrGMAGvF3dPe82dt/PInHb7PuwFTfqc4
OPapPtNXbIPEL3yKFrHXGdkfDTloAKTOKVurlYuPREqbwMG4YeOw3BvvBpFe20RCMGbT67JX4Ho6
Eaot1jqsUhQ3MMXpk2y+zxtT4TFKm7aZENF4wqgjP2agP14x2itbZxjyuyzBZxwhjE+0ZeuWlZ30
rgKOlHzDD7/1AnqZcKT86s353kAffOjAqA82hTfrsYqgKRj/xvPxYD4FgPTGJltX4sD4UsIVfnZZ
ifb5bFfzZIU9JN5tiw1p0KJpnU0B2kJbE+B7uFqs4LrC7NhuaeBF8zDVO3qCJzu4U8/nt8O0KeTu
p6+RHOnPlGwKX4C/vGa/1tBbWb1qsK73E4KuIeqE3Lz5p48xyvNLumg3r4OX2Qi++Cow4h7HHGaj
OlF0wtVQt45G4Ddf+vqNLqLuul9h6s9Pqu0I0RkXiDYkXrIl/JSP0azVnxL8+Nq3pmCg+WxuwOMz
KEhSnxMbQ+CKEumJRzVsqQPf2kdR0B9VTOZiuqQscH0TbPMkxqga/YqxWvXhZOOUqvdHzwZu2ENp
fV4y6mUbu5pHts7wMTbHP3yy8N+A4J77B2y5sZculXUb/9Rjl7RNSSQ/Rohv95ZasugMw2jtc7BZ
tYzs5lQZOE0QTuCXb1n6VgG74qp14HZxtkjgWVUtt9O7gRvo7b/5yNlj3aH1oSA6V6xEoe8tp8Ig
EuoPBWrHOIsW3HAhBIV8Io10aoYZHxoIB0m8Y91R+nQtnciWo+Qu4r6W23SV/VaB1uHm4sc3H5sP
5pOTrOerxQFQABs2/GSAr1/Dpoe9lEr5iYNizd5kd9jZEalvsIPfvJDw2/4zzLtDTqCyvjVqRrns
sWGzrUHUriJ6zS1Kp4ejrXAUIoyt7XEC/UXqczh57ojeQnaKFmWxIDy9NmfqxosD5vNSGLCp5hs2
bqmS7qqmtqEvtjya7f2LzeCxKvJP7zmHfKz+5D2lu3lQt4yHYUROdYJvPjv+wUvyLqYE7lvZIez8
9r1VewkC/JjYRfL9SfRFcoYbdPP+je3VuDJCSMbBl/x+kOG0Dt4aC8QGJj6eqfuwuWjdsr6HPCcz
jDSeALJlfQfTnAzUmg7zsFy5isAuEc5U4bfHiGRegKR3VRyoqc12RGHjNZBahkk2eX6JaFBcbMja
JEdC2zY6u/JyDMVHZOFDc5sqcqmPifzLR4zjRR2E17NG4JFcOMQ/T4gNRrdZJW7CGtlv2yZdDw2M
gaxnGuGduGQL2ys9pJE3E3h48/o6n60cKP78oJZx34K5avSzHHAniIT1EVWrYC0J9Nbl+O987pfv
ZBjq2Jo4Aywm0DYwAMv9h3/V+IpfAoxgd6GHfhdW7NDoBTgMN0jtj7kwcrwOLtAWLyOPca8Btn8h
EzwOc4fN4iik4xF1K7z1akY17lEOsyqtNdzudgpF0LCq73qcwbrpO4w+91b/7o8afPEC8Zq3ZdRx
XHFvOP6DeupF05lVWj68EFehimoQ8Cdvha1p/fDf+6PvV3cNqRlzk0c9IudQvBKBOnvFBeN0nxIA
XeIjTmhyfT5YnQR//PDrT07e27X01fNf/Ms8Jl+r8I9f9rtzkk48b5wlphcmtTe3AEhqYppwb28Q
eXngkVKiyzbYOsX+u/5Pxm5MM0HT3G/Y9slVX/NV4WBiaD21T4M2LLV+TsBW1a+kEbmNTi/Eb+A3
3yKNW1x1FqtlBjMQ77HHPjZgMMmLn98iixZZ0eLvQwkeBz5Hb79FlTA7Qwb2vZjg4HTH6dy2RvHz
ixgZehPR4LK3QXQQRnSvsFetoIHJn3zH5Qf477wfXYozDsfTTie8YvegiF8vqo95xWbOR6EUjHP+
p9+Zm157+M2bcHBtm2HW6lfx4wMCUjVj7MUbGYyflkUNd38CX39W/PQqNgUYDxNBtwYK53CLv/1f
ffX1CjXQulQTtnswajcug1Z7jqkOzkd9Nt8lkb55CQ1GF1Vrcn+X8MSkAmMUGmDBYYXkoYMBVp+X
Vf94OtwAo/xY37xQjXj9bMWgIDHG6ubcDaQDAMLhbMxUUZmuE/2aCNI8+AvGdDlVHD/cXbiOJCO7
qrNS5mvRGXYN2Pwbr9bQrWHwvOfYZmURDddo54OttB7J/uZM3neecQZke7z++CRaCTlzMH/HFjJv
Wl1NBbJD8IoZR1VU3j2KrRsHF/HFCKcOacVu7UOB59jYEt6wh4FtIwLB2x9T+usn7mBehZ+fQEsg
fvQ/fHW1OPnrD9+ekOhZDgNdr7HhW260s6rwBOx9yX/5m09pFaYGfCTdgSrfPGVtu9wFwbjmRNJQ
6X35xYXXvXHB5xEzNnqSQwCNDyp1hE+RjpckgBDGTKBKzgeMbR13s7+oq0k9fXdP2aHxCni/kIo6
aSJWS0jeyj50ui395qdsXleFQP0Djth+vEJvjZEd/5mHAG/0U25zdXP409Nofr7BEngTARvk6tj2
3jqb/ZI28Ps+VFx2LZipOJ9gHwIfW1DR/sx3ZF7iLvRSF03ajOvdBaTefci2McRoiVhfwOvevOBA
ij0262MSgqO89NRx2+ew6GccA+8ouFSLgyJa3PlqgvBskD/zheXrzyX+rIx/5gtdbQR/9BzWw/nu
jec3UH7fk/qnCuhEstdG/s5LfvkBm/mLbQJO2dlUHT6dN2eXroc1Ss7UHXVTpwk9zLB/bCU0SLQd
xjXU6l/eSf2lXKrxZq83Ob/KFtrMzwPgzHdP4FU5FwjwrzmdQjJpgD8eUmyUepwuBVJOstJgiyor
LofdZb/voCVcj1j1gJx+8z5NynenCIFleQ2DcBwb8PVf1Pvm/eNhNslvPoLV95gNM7C6DaR8ElBU
S7VO5jPOwLmHGVZkzoiWGqvZrx6sLAVJv/MqDlbKohH5fIzB0igfG26QrdNLplbRPFEhBz88BXfx
w1YV9yYsicAToG5P+vjbj++i5r7zPNGb7PhYy8W6z8gbI/dPXgRvC+fRZ6ca+jolWw1883eypVoe
sfCd9NA5yU+Kglmpdms4JHuY9Tfsm5t7Shdb6qHjPVT6+FQr+Pz8JRC/XfrF/z/5tRQfQ6ojpFb8
+i4FaGbdGStfPOieaG8CDbxdHIz7Evz0mSRcUUjKbaV6/OtMN8A1/N2Xz0qdgmXu5EUxMLa3Vum9
el5GEu3UAPvnwgQfUXI4iAsUY2Pc2Pq6As+EQYsC1GBCopl/jsYvv6L27IfVgtdPDDUFXbDZiAsb
/WrugLwPG2yUaz9MWiWOf/ZLt3fjgb2FQ7c/SMTG7lx+9JXtzBl+uOqI5oExQDh37OHxfFGJwPwW
LJJTJbJb1CvNv/7nm1f4MC9vwne+W6bsFEvn3++xkfFbj5GXAGFwCVUaypyRLpvNNv/x30//DMQM
qxMYWYfxk2qb9Dc/A48zq8jwzW8moRk1uGseOfZdNkbjPlZPoNY3DckyKg5UUu4a5DvJwb5qnjy2
e2sJ9O7nFqei7Xtr7O9CuT5tX188GdP5AMD6q5ca7XGqlnuj3+CnbA/Y2EPire/xlIPUX59fPFfS
OSuMDl7U2aTaN0+fc0uJ//SPfidbfX0So4T31cxxYHJBtZj0Iv6Zz6pqzfTFS0wNtmf6RGJJ22hh
e7uHhoMeSCpyIxX6q1uDZ9QhwuLJGPhhuCN4d+IOSb2ZpH2ZttJPL1KsDbpHIziKv/pR/XZ4nfm3
MYGynmvUu4sOWKwd6n/4SP0tIX/4EP79OxXwn//466//8zth0HSP/P09GDDly/TP/zoq8M/kkfyT
44R/UuHPSQQyJkX+97/+fQjh78/QNZ/p/05dnbfj3//6S/xz2uDvqZuS93+7/I/vu/7zH/8PAAD/
/wMAVLQZa+EgAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 7be211ab1afd169e-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 26 Apr 2023 21:55:16 GMT
Server:
- cloudflare
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
openai-organization:
- user-adtx4fhfg1qsiyzdoaxciooj
openai-processing-ms:
- '172'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '3000'
x-ratelimit-remaining-requests:
- '2999'
x-ratelimit-reset-requests:
- 20ms
x-request-id:
- 5b810c395d48248e473bc4e53de0eafa
status:
code: 200
message: OK
- request:
body: '{"input": ["Sample text 1"], "model": "text-embedding-ada-002", "encoding_format":
"base64"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '92'
Content-Type:
- application/json
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1R6SxOyOrfm/PsVu/aU/kpFJIs9AwG5miB4o6urSwQREJFLAuTU+e9d+p463T1x
ALGiycpzW/mPf/31199NUmb34e9//vr7VfTD3//j+yy9Dbe///nrf/7rr7/++us/fp//38isTrI0
Ld75b/jvZfFOs+nvf/5a/veT/zvon7/+NuPiQ5nS1Anz5acM4uJ1ZNpa6bouyZMjLJdQMZs0H071
gcrA3keVHUtt4GxxmpbKpd8Q5urcTppOaEXZRcct8U/hJ+LRfchQhzZv4iqmnoz4sN3CaV17ePPq
P15npqcc9Pf9yLCTWmi8r4mBLoLgMxWNqKR6tKTwcM83rLRb8OZ4jLcgX4URN/ux8KgSvxtUsFJl
ji1XiBfJs1dO/JwwcyO5+oS8tQGQGluyS6ibDOB1N3AIDfAQNarXy5f4iE5iTpgpd5bOk8GtoPH0
HsOptJOxux9cqNitYI6nd2WR7zcusGJ7JGpq5px/KtyjfIMRUzeG7bHJkVto/O1AVxpqk1m0BRFN
SbH/vdeHi6MBXGZXI/jlC5wv2PWIOOlvTDvOQTJpbiSjOiUWBn67JNy/Xn3wyyUi2HaSjkVXVwX5
yBFTYytLhsxY9LBYXp/EROPEh9rYuqjFqkWiNiiSWeR2C/JN7Yl/Pr8iflyNAbCk7Yg1r4/JpO0M
G8xTsGGOfa35tHx1Mypq4cX2/uB59LA73SC1IoMR65B3w1IPd0qRVwqx/fuKT055zWVf2nV487pf
+fRQ7Roqeq6JqepFN54Wnx088MckesuJ3t+PqyvE75tLZ3o8JPMhMlr4iF2GZWU36/w45bmSc/HI
dLy0+GQ3VoMEB9XM552YTNkhCiFU45KiTaXpq9qOJbg894jo7eGgz6zoCpgyqhHttEbelK2XPRQv
pWLO+/XoRr69jhBErUy2w1R11K1iEbQNbYlNLi3nPpF6KNr9kaloj/mYvhoK8Ts1iZvG124+NMxA
S6FGeNwYjc6mdyOB3jIdo4Sq5ci3wQzWLgqZKgPhc56QEbFzLBNVjktv1q39HQQtCEhc58vvfCOF
o9V/8GrLRG/EdZWBtspkorfhAk3Ds/VBavSQOc6ClJOmNT3o7+xIdPKu0LiT9mcYqLRlD+uglnxx
WI0gLsUdMbfUSqbMWFMQUfDBrWaXXcv2xxEuihQwa65PUecf7zbo/RIRuz28k2lJ9XAhn0WJqcl0
jJpTezuDoF08st9Pli6K2b4CRweN2K3aJDRcbRuI5lRiu+tcd62elwKasl5j3/pG7RvWNdSZb7Mt
fY1Rfw9e7YabqoXFSyZ7rHhMdzADATOrZ/eo74bABr2bF7jE4Qf1lXWtwdkqKv2eRj7nPZ5huoQp
3by8mI+ysLuDYFaIHA2ZdsxE3oyGz+fAtFVYc25eZwOma3JkhlUZ3YgRwihXypoZxUg5m86rCriG
MDG37Oy1b+loo/ipFsQx4lXZiuvgKhedfyJeLIlRf0rFUZaKpiPGM4m42BQTVRabycYoyYJo5NOB
/v4PcR8z6bgvhi749InpSp/LaKoX8xHBZT4Tc/Pu9LES4wJC9dwQuzvPel9ZCwEtlrcn80f/xbn0
DnuwrBMmuNq9kikw1ZtS9ZgSo3rb+ojjZwHdOtdYGNmWN873cwPB4aUwL/G3nZhnkg2N2ZlULoYK
fUBUJZCjzsALYSGgQWu2LgzDAqhQzxKnsRbsAMKsJbozyeWc7ycXILUV4i7GRufF9ixDcmcYr4Ps
HU1ZPM8/fGV7Zn6i+Q2LCqzj7sFIwaSOfZS4gGY3AXMVKU9md3J24EK5ZNvDAXs0h3knT+mg4sXz
mnv9/Vk1Pzwi1iy+SiqkwRZEFH6IK9wT1M+fvQyOc26Yux2R1/zqoz6PFcPv6ovfnlEp/ttsmRXd
PL3vXv4ZhdbKomsehwmT4HMHTRFuzDEQ72YRoSUSsHOmrMjijn+UAW+++8H2YWR6IjvlNTQ7puFl
zVSPhviDoajhxbCzeKFRXtUAoowHLO1x5VF309TQaOcb28X6q+PJZ1dAEloiXqFHyHlSdTtUPJsN
nkRJRZ+l9RTBneaantTjs5ycMijAIujD9M+iTJh0kEK4CODjObotEY2VRJCDu3tjhhWZnRhebCo7
6rzHyzL0EV+wdY9++OBPgcbnHPtLcDe+S2JPtEs+mmK1CaP8Tqx+LyQ0dz5XFHf9iuz7riypgoYW
tNHBxLs5p6TvPmxEl0+bM3OzufJZSbsePbaSSHR3JZaTrYU5DP3KJMbr9vL4wjmHSBNzkcXVxtep
aIsimPfpitf0dI64eSyO8JkuM3OzRdjNwlHK5STOHsRVmlGflVrH8P0+Mcpli9o8vNfyZ/Uu6Glj
7vUex88c9I4azL8kozeHF7VHx70VEPyObH391SOQC++CrjYXmXfSPi9Q8hAt4s9rt6wF2wkgCWBB
jAKPnPvHowvxKwjJtbHVkhejlAHs9xIhz2uuz65wodDo95Bg761EQ/2UrsCSpvutVzc4t0kF8+hu
KRLte0lF1Li/98xUX3k5yo9RBH+tMlz0fhxN2UnsYbrdDySklfadb8zgNOHyWz+G/hnmjw9SZ3vM
/uy9iPu6k/3wll0/+y5hIz7vIK4OB7oOG+AzO1sCTJHVEFzPV9RF4qWBKr8/sBifomjOHcVA2ux7
zLxXbzSzQ5WDX6xlvDAdu0uHNMxA3NgTO4ufUzmzMwGkNy+PuYqc8p7vmkJ2hWpippycEfc/rQSO
fXl/+dLlfKEKM2LpOqPyFqceP8qqCrgPJOKPiwKNcpSGsHKDgKS+N3j88yhDkLPgTgda3fQ5ViIB
vvqRuIVX82bHXiKS6FXGY2xl0TQcZUCwJxJVosvTew16Hij44y2wEm5rfVhelwVoCtyY8+odfT7k
VqX85ntYIYt6y5Yo4qaj0GpHnG64dEmLHt7jwH71Ml26qIVsvX8y7Xsee2s9NXApA4lop1OqcylD
GIYqaJlh6auE6hdpRkt46My7lkn0Pd85uOtdSHaryuXf+rlDa2Qdc9y573pLrSSAm+BSCWuat/6U
fAuhvdHxeNcq3ortx4bUfXLm3WCORouFLqTODegkmjqfbHzK4LHPMUZXLY34cbiOkDyWFvEvt2fU
p8dnj4qWHOnzIhgRT+5e/dOfzBNbuexlPC3Rg7S7P/px/vIVGtomJLYvRGh2XX6EBVogpjdtk/Ai
cjGqgvLG7G6ToSlQxxbqu+cxvbHdZLIVl8KXf+hnV9yT8b6oa3C0jUGMZx+iL56KKCRhR6xg30R8
Go8GxHXsMFdXu4QVl1gAwZ4ZXj77GbWh0rpwNKuJbJmdelOw5zUAOY3Ev7gHfdaHCIOZtE/K2zmP
ZjHdSCh+XfZUXjw+Ho21coYpkFdU1lqtY8U2k+WhOr2Y4wg5n4Y4u8n652Ox3fmWebzFQQWPrSxi
MTaW0TTQl/tbH5qjMSmnpR4aKN/4CE9rF5fjPRgaOUkpwYuyWHX9V8+idN/0xLDEORq0amfAdPaf
uIvX725ava4GXMpQYubm7ekzy9gd4vqI2fc8cB4NfQb6p/bp0vSGkiqL1Q0sO3EpHKebzhedXYPg
CSXTRrLpxmpOM5SNqw2d2XUfTTVqayQf7gfmCkrnzfn52sIHKS5RkZ973/WRwXqIlHm32zGZw1AC
tMK5yqzzKY/4Fw9BTrwSr0efdLxorrAR5x4T936CbsS24KPlYpiJTTYNnxkvCtTsjQ9JVscAfSCX
baS3rU1s0jh8GFqnRzGVPsyTJsNbT4p+Br6VCEZJ+uxoeLH7P/rKseprMv3wrd3VJd7U2NLHzrvZ
qGAv9PVbTVL9+Lwxcky0dbLv5t/5CPcuZ+5itD2qy5kN3So1CBGE0euKle0jTR5rYoVm4M1CrAZQ
ZxeBCk76RqMFzg2ksn8zxzJuyTQsrnfw11tGrOnZltyY1wDhdWkz/3LTIl5MlyX68gtumhuN6rzf
zUhvioC+pNUdfWBY7aAo5wlPK81Cs6h6LdTH4Y37+Yyi/ucXpvh2wtxWKPrjh8w7v9I5SLcJX3Rq
pfz8sB95C8SK3LTBfzKdblmLvO6zM0awrmfCrKhrvTnWAkMxb9WLOK9hjYbM5uMPr4n79UujZY89
nKYoY9/zo3dmqi3htHyEXz6Cbqqf0g26Tb9kN2UX6lPNPQNJHzViu1tMkqkufEOuj23P9HaZ8unB
4xDkxCm//m1bjph5gLhznIntK6beV9cyRNVV27D9F48nW9n2imWSMxWTtIumi2nK6Ov3qKwhtRsr
Lz9Dks0eRdeiL/lU9iIMdZoyN0ViR396MTV3B/xcJ/uS366kQLg1TKZx4ZbMYvt0Qb7bFdnj18w/
l+VuifTX8Y7X3DK6b/3RP/ip3pdPNNS16oIZEWCm1t08LpmOij6Kvvvhi859/WTDgsc5IxB10aRt
NFvh6jpkHhrenEvn9xXgivZYweIBzYecVJBLhsFUGT31mdUoAItsPsw/Ld/JzBjN5K9/pfzDuN7f
bWwjcVHEVIzrORpq/9lA8WRLvFhMe33EgGXQm7f31VerpNV3mxmmSB2Iq7y1RHybDxfVp76hMJ/8
jhfbTNqw0ghYXI8vb7qcNhiEvfogptbJ+lCncNw0ZuV++StH7Q9PvviDV9u9pLPPUt6Cu/b2+LWl
74j+6hUX7E5X8s3Wx1k2JJQkXciscCdwKp5uN8iVZ822Ufvy5jgU7jA8DZGosl15Q5ZlGPT6dqUK
1fbJKts/MEot32HYMxdRYxWtBJemrohhXJ6o5/mth6PFK6p88kyfLmqbweUzJGy3vofdeF9bOyQY
mYFlRVZ4g+NPAUNurhiuA5lPMG1scMjkUSYoWiTqVbNF4vg5sH1YfX9P3apScFOLn5/Ue9ktDBjy
rUynMZc6vnDcClhoERxszic0DcdZAMHsRIwkUutD0D2XcHkXDdutIo83cp/vkHlJe6YTs+bcX5hn
eSG1B+aJ1sC56We5nM+rCEs4zL3RAu0KwX1/+sNvLTtbAHVYLJmaXnA3u82zBr3EFV3W2dObanVn
IMfCI7M/49Wb2QlCJD0rRrYDqcvZXV1FENfbDSOCEHh8VC4qsDo0CDG7uuMmtZeoTl2fOdWO6WPq
uz7EpXv74mPjzQzSHvyXwIlRPAjinz7MlWFQgDQf5iTc/GwM9OUbslu94nJaXvMlXMoLx3BRsE71
j35HjRafiJpeaMeKywGAb2VCBe+Yo/qw3TfQLRObDl89N7vKeFa6zcSYtipNtMQGvcvizB3K1Mst
6Xe3yw6q++FDVNmZOTNF2P74A8fe61NOy7tU/fI45hbeDo3dVbv+yau0i2AkortbBOsvfuKFIX3K
H38r4lidibo5WfqErvUZjo74pCv0mBErdqEAHSjlHz081CrewXLBZppGu8LrO+/mAt/ZAtE/fh19
/awE7fbMCbFKnFB9NfdKMZz3+Hnxz96k7S5LOI32m2lHvtAnyGcX8rUjkN95o2ES3OHhBBlLadkl
s1hoLcix1jFTPxy9wTl/QlnqLwpd05MY9fj0yoBVrs+sAK91JmFLQl8/h8XlNPAJdE+GKoufRG+f
WjIEN6GAfCX4THcIdHxUHqqsTTbB0rd+ukU9ir88gQqO3yDu66krL3iS0wAxn8/NW8sgicsbvkTK
mvP2PBqyHFxPX30Rd3zC26uymPyBmAhHnEvqJkSP/duh4s3Syok46xyS2+v81Sv7jpuv1Rb0blzQ
7nZbRuyjKVuEh5gx/BqwPtWUnwF/LIP5p+qiDwM93JHU7XdY8IygG090NUMSSAaxmyT1xnTuvvnk
tmc7cWcj/vMvjEkJOyzHhlMlWN7l1szfxPb7nT7+8peYwYuYGU0jtuCNIbc2TfDGfQ5lvzusK0ju
TcT8izvp7CjbW/TNa+gru7NuGixJRa0veMyKHN+bLoaMQc7CO9vFYlEO2anbgrMj7JsvbX75ZgGO
urb/jBf1ive//ITOdJV3PLmgJZKeNSPGNx/48cWmqihh/sRxt4F7e0PDsNHwdNrmHdXzEqDOsM3M
dPfi07CoXCQY+YJsWZvocy61ZyRsE4uZmbDvZn0qBDiBMNPkehjKnguqoCzE1xHTmvQ6Ty6NDR1C
b2Ju0yaZwya5gv7c9RR996e39ncXnTazgKXufdDFePOQZZGPLtstn0rHzIAGv/XBEj4q5Wug8R2G
5nWiDCo34v6RiWgKpBXTjqT85gdCBc6+upLIdg98VuJIAKnRQrbvHbOctJBfUdG6MXO+53tMXzmF
bvVcEH9aKsmIMwOgqtGWKrTsojHVix5S5wq4XeRrNNTrcInYrWZEO72GhE0SqiHIgpAY+U7iQx1f
ZXD8/M5cPYy98bffjmGumTatonL+5rmoPrI32++DNR9q/9Oi4LQaMJy2sTcNiyBTjsZr/OVv3rCc
614W1ANhxlOZ+DRYowoPy62/+PLWabzxfWD3qaCyZusd+1S7HjV7p6J5esGlqK9kCvl6azOiXM/e
mCLPRt/1JbrXRH/8FwSnecbfPAK9Lk7aoHZXlWy30pNuWraWjz4LVyDemooljc9y/sMH+pKijM86
iUP0WZANwy8v8L79hCXIZe3gEbGeM+mBpT9+qoWt+NW/doDM+FXhMW325RK34Etc22B6NNMkGqtZ
G8FKcp1Zh6JMVjUqakjVzZ14sbmJRvlyOKJU14/E9oclGl5ICKDRkhMum26tD/YZBFS9QWD2fhrR
aDn+7ZcvEuwZY0e/+TZydEFjpj7rSf37P/izdZh1KEN9WOrDiHA1xkxvkJWMeN3doXjJH+bPxlBO
mtJXKE5RQyd+h4jqpBdkM10fib1/6N6PL5D8OF8xzMa+W5tUFSG4SC3NbJA6tnC29Z985scPc+xm
AnzzXCwvcOuNaRCDbEbGjtyE3vcmzU1kYHHB2TZ4LPjnh1/C3rzRBWz9aKyCxx1+ea13PezLOW5M
H5IH2jIi3GdvGmydwkli2z/1w4tKw8CYnBAC2z6Z6rar5aHEFIvrvuBULLQGzPN5Rb55hcd/+/fr
T7h3HSMq6FOh1OGLMVc9r6I+FfkVllxqWOath26Ol5ABy4UrM+9bB/W4IxLgonli3oYPzvwjWyLz
iJWff+d9mmq+chr3BV5889hpeYUCkmy1I/Zn30UDyNVRyvmGEFXiH692ib5TOOEn5t3iVyfqpAf4
5d3qvSzQF5+O6Mv3xJS0yXtdiqUL4WX2f3lqOSyvjYi++gA3jYK8lqmxiE5SazHtOD3LIXi/RoBj
l/7R780JvVTQ6zQiqhRuEHUV6Qj5xlXpOjwzj+qkAri83yXTRLPkc6M2qtytuzUVvnzDpTO7gT81
NsPuXHDmz+cd+GPlU+rpT8QWEAVKt7o5dJQKV+dRrmD07Rcx4wW6zj8l2qJsc4kpXIaUT867Cf/k
Yf7Xz021/2lkRx333/oOuvaX/6ZbiNnRQGnSjTjbQeOrA62+/DjreRDAdDQ+jCywGolhpbcQXkYf
D8Ne69hxNYagjbsIr9Jr1PHjSgoguNs3jMRW7vj0cG4/vfjD53IY7McV4ue2IPs9cTzacKlHSaii
7/55SS/vbkeIO7oiKsJpyRdvo4DHzmiJxdEYcfOlqBBcFi/m1GTFe5mYBRQdPpFfPv315xRVzzwh
JF+PnBWXVwOCI3aUN6399cvB/ecfifnNU2elpc2PH/H0zccG+BxakLPThRGl2SWzfhlnpXGkFUv3
01sfZ+/RIDlIAyp/83++cLIAQt+byZ5d1t4vfwdrP03MMHZbPmTFIADfLlyyjZwVGpaWUmxaHzxy
4ShIeOL51fq0KjFthXvCp0tpiZBi+u2fnMqIRVsewLcfTB+WNib9LPsydKN3xcu8OaM5D+8Vqh/H
BTl885XhmwcoR3udY3m7p8msxKxFzd6rqODyqZxscwXQLbY5Xk/zp5sCpxTh+/6Pv2kq+Rmib/+T
bXt/k8xMMoVfv5D96bf+8tpv/kqMcnsv52+/GnD+qsg26jgfU6S7v7wQr7n8LLnvNQJ895fOga8n
4q+efDgmTP/4u2QthZ4AD02U8Rw8HnwIVKkF6yC8mauedt3MHHOLqt6n/6Vflq2FIYi1J9O//WD+
6WuMgsTI2DagWjJibm/h79+tgP/8119//a/fDYO6SbPX92LAkE3Dv//7qsC/b+nt38ul+G8m/rmJ
QPtbnv39z39dQvj70zX1Z/jfQ1Nl7/7vf/6S/tw2+Htohtvr/3n8r+9c//mv/wMAAP//AwCkxTP2
4SAAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 7be211ac8cdd169e-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Wed, 26 Apr 2023 21:55:16 GMT
Server:
- cloudflare
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
openai-organization:
- user-adtx4fhfg1qsiyzdoaxciooj
openai-processing-ms:
- '114'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '3000'
x-ratelimit-remaining-requests:
- '2999'
x-ratelimit-reset-requests:
- 20ms
x-request-id:
- d39b785f954e1f1cc64746735c44b522
status:
code: 200
message: OK
- request:
body: '{"input": [[18031, 1495, 220, 16]], "model": "text-embedding-ada-002",
"encoding_format": "base64"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '99'
Content-Type:
- application/json
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SaS6+zOrel+9+v2Nrd1KcQQvDM7nELECB2EnItlUpACOEWrjbgo/PfS+Q9OlXV
WQ0WS3jZnmM+Y9j/8a+//vq7CrM46v/+56+/i7Tr//4f87NX0Ad///PX//zXX3/99dd//H7+f2/G
ZRi/Xuk3+b3++2X6fcXj3//8Jfz3k//70j9//b0V0oz2TVyG/Rt/ZNCN/sr2Me7CatyjC9C+y5mX
y43OQnuxAIvnCnstoOdMy07C9hEVhJkt2odNlyJNVtPEIEoy1f743G1jZLXFlxxOSA8nvUg1eOgL
D6/UpEFVYO8qqKnns11X2uEU90xB2Ft4zPyMSGcv90Ihft0eeDxtwR3erNbgvRZXmEq31O0M8w5I
JkRhxjHIEe8+Y7d9RWbIdlbj+NPo3A2Quk4le1l0UBc5bQCvLtjjplK3qPWm5xE1kkGYHTeWz+3O
yWFydYrlGuxwuiw+JoTHRcZ2TVRl6Td8OhAk2oUYtvvRJ+kSd4jcHcRUPbfCro6bBnxjonSpJE04
tCqWUXROMbOc3spYFY0ShI+FQry1v8jGYv2IUOccQ+YIx2M4Tu7FRAGx9ng7VPeQuyf7CJcSZKJc
4jCkyzhQQLqEiLkv7x32RskqSDaPhOzS85ixM9I0VHmKRZ6SmIaDH0kNdHvoyQGg8CfxNFzg2wYt
sfB0Caf2k9sw+ReZWXe15MMldkVkyreCkbF1EdWN6w1sj+gMq2qC+nCfmtsXdEDw/rjiUzLYkYxF
3GGBWY9sqCK7hGp1/hLvEqUtX/tPE/L11SRe3GLe093uAfhauhSx4hSO6Gk04G/ZG6/XdNIn7g3J
dp5fpmwDi0+X+JujJOFftrdKMRyXb/8GYV2UdHVdqLoYWE8JTvtaJkZQn3Q+uSiF9zfV5vlC4bgR
8w7sbpWzgzC926HcVBWoaikTbJk5olZTi0BisSE7NWn4KBC7g2TcXdkeENG5nlcUkmo0iKkbj5Cr
KTkix7oBXlzflc/Kpw1QqkTDqA6VbHSPiQif9H5m1nlN+HjX3x36jU87RZnLyfr1AMaOhLz7j8BH
fE0aeFzyHi8sU3RHvbjEEPeLDVGOzyWaKmhsKJ/ZmR0ih3C+eD06cEfxQtSpydGkfrYBaK6tsVhV
Fc73/XWA9Vk0yb56We3gHO8lJMmQ4rpLM7c5+JcBmnd0ZF61vPHaYZEBBCdA9M/lGw5xr5+XU7uQ
fvvRr4aqOUN2WXvEWJ0sfdUohxy2vqQRUkeV2x8fWgfJ8bVhujfmYf2muYbe30RjhH22s36sG/hm
jz3Trh/G+3qbxZs+j3Z46M0NYqwcE5CxfGAEraKMZivlCNwo15jN9UPH5lGCflV12i43Q8bHhk5w
501M5Wx68knTzQgu8QWRpziysJNW7gLtd+OJ4ckq+dS8NQPOYebP+mCgIe7dMyruyy87VDHT6WlT
5CCevpgQLTmj+pHnDxQeISNeuVrr1Uk+drJbe1fiFjfR791RtGXnXjXE2Nx8LsjrD90GZLfHWyIe
/eEbjBTmeifa9UMQx6KGwZFOLp3IOfPH3XfyUH9x7kTvs1bnlrMpIV7M9dIbk8/ITTFBoFPC9C0U
fOLdVIGiqISYhViEw8M4BttPc2PEXTPb56wcU8jXF5Xdj2sr5CtW5tAL45aZ1l5r1ytS2UALZtFl
/8naSjwdJWgsd4dHWViEdIsmE+J+uaCLd73Rqf86mlAZWkOceyxnY5d+HOi044Io62OlTwK5yRC/
WhcvV8PXn9zjRKG+Sjr76f3gHN85fM7am3nOQWq7LdTpn/Fg103C6aefwyMT2KwHqLe7SZPDsTDw
8m0nLsUroQL340lErZLCp2hz1GCXxA1RDCdyuy30IlzW54p5zJBR4w5SCoF0ydm837OBlkayrV+b
hpHi7mUUbaIbuu33Jt0cVqeW7ftnBMiOQ2YefN4OZ9RK6Js9Me3d7hnyNzk0m76Lv8wtbjt3/TSP
JTRSqGGx2yltx6znGY7jpWDEKAo0nuRSgq829Xg1tBnqfnpLzDRgankrWp6exRLC2pcwum3PfBwb
V0OhUq3w4nA02sZhnwbm/kGjRv74g75TYjDle8P2Ec4Qc0/2GV764OLt7rsK2c4ORfkl0IA5Ltm1
6zfZ3+Sz4BywKF88NIz7dYXMVLvO+1nNRuniCSDa4JDL7W3zyX89jM2oRhFxN8IWdc+ifqCcKStC
vveM05RvKTiWisk+MK5hf1m8BfTbj/YleerT4dJW6L0WVsR9e2LGX+05AV58TEKOm8Kd+tzEaOvm
Ijv7kZt1j/wmwnMxhhj4/uYPYX2+wOPynZhGjTMa8bWq5HMmvglZD4z/9Af+jGeu//YodWfZIfpA
L7nk+d3yrUaQm6bBcMYHd0DSkKO2vp/IzA++cNyoAizvekqHZJKzCklDiSpPs4gpP59ZubnWRxj3
0YKQNhqyuf85IK2UMwn03ZL3xfoRgxC/1mRXRYk+bmFNgS6UM9H8CHi/EasHYNHrCOFt5tLL4mOA
Oq2ALs0m1PubZmMQn3n72+/+9CwVEZhZnXCyPj79wfbKCk7N8USiZFD5cPsMMZymMsVTu1zy8hzt
vd96M5VwVx+W730MpxOo7DJ6bdsno2lCFxSYShEDPsrrtQxUvFfEcw6Ptm4SqwPfi154MXl+xhev
l4LOgn1gh4P4RaOqCTnUi/UGb+7L0c3iwxTD3elGds3vVz6qHyag52vlMWVzfWXd3M9l9j1OTK2S
WzucgkACTQi/RMsGh4+77yJFSrKM5/FEaHzkiQHZ1ZaJwb0UTW3yOkNRHY/kWt5798/6irX0pOW8
HnwL+gLk++dEzEop9NZyVjK6Vt4aC+cgzrjxdQY08w6F3vi4X94r3lZVv0ss+o/SZ+7JKOGLuicj
t8VeH2lhJdvNoTqS8ChRTu1WogjQWafFzJeMmYiis+Ce2T7a2z4fa7+EUn59Zj5YttU3GDvohUEi
6q6O+OBNyIH4WtVMlYQV6t1jJaJw/BrM3txDf4jpLQIviM8EyzfnD89Bdy47Zh/DLuzxNYcf79KN
+lHdNZJ8DdzjxsEbqyz0aqw3NnTnYmRqGU7+9NPjmU/oamh1Poz1Kob5/8EC0176cLxLAiiPyibK
Lf74VKBqgtbntUsTphr+lPqohG6/6Gc9lrN+fR4rlF1Ei3jpELZDIZ0mVJDqRFyz9tvxWfDjH57U
sViFw+Q6GOVx+GSO/3y7QxUpJfTh2mXKJXbcUTw1E9BSW9HWqKNwbD+3Eha7vfGnf3KjQDKKD1pD
Dm5X6ZPl5AoE395mdsLalsl3R4PbZHK8kldC21TQODDvL6K/yMvlb6bHgMXDSBRmnvQJ5AxDpOGE
LuZ6Gh3yBFR5ukflZ1W7dGqzCcxGW1MxG1W3D+x3Kn/Oas4IFhI+xDSO5ePmarH9SY7Dmbcq8M7x
Co9WLei8ywwZiTK26Cc9hf5UrNMLGuUOYfGe4mwE+VXJd40SPIzuKqSltMFo3A8dOQj9xGlYiwYE
lSLi6pJ8EU8nyYD6UK7/1O8Y2O8EhKWB2eGxy3SeHKMUXvrk0mnx6rMuPuxiMJ9sT1FfBPqYXaQS
eJ9mzEj5Jhy1/FCioFIRXT71g88H2ynRrB9Me5ZtOIpjVcKzfLnEuLyTkF+VvQwPd0GZM7gXNEFQ
G+jHB972kfjjRkwCaMc2xZvhS9BglNdh80kvB2Lu9tBOOxsf0UHYCmRPVhWf9fqGdknUkPtye0XV
5DoeSgHbZBfYe59Obl0hcfIapoyu4Qq/+tbAOGH0tD4he2RSBcjmmBxi5xGOhll7IOU0x8LMk9P1
Ldto5gOMLtsmzNW0SuGQXzBRbhl2h199WFzmbH9d2m0/tbEH7LY1iHvXaFjZXWUj89N9CenLYzjQ
UvHgKfIFleb1mFbHOoajYlTMQlIQ/qnPKEgHcuiyJuPZ9aEAuQk2s9Fd9Yefn3SP8g73KeG8yO+3
BXrp8YsmyRS0Ne8KE4S1OWL+dqx2eO7aEtTTpsddOqCM3dTHBME9uOJ1caWIm3WVgua/H1S8v1Q0
KIpSbU/PbUW8ZrkMO/F09SDrXw51LAeFlZrmAoQbmTDHchp3UpTE2L5vUkm822eN6BvzAZrBmP7w
ItfzpINZf//0m8ZqPgIsv/cjXaIVtFN6rgK4PSOBXa+Lsz/peWgjL4jOTHUpafk18RT5lQcd21Wf
WB+1fH8GfVXnRPsMms/7PBxQOEoTca/ajnf7nt/QZldLbHddfFze6dqwvWvsQjdD1/r81RUU3VGt
0FV6VsIJr4Yb+N7k0tXUdj43606El9C/mSb6YtuN+2UF5jO44SLQD3y0WhajKq9MhvduMPPiyYG6
THJyiBzG6+uy7NAYGMEffuoPl+cE7ecqMC8ZPohKwhGDeyLbP38/Ro4EqKaZSexj+MrG0dl5f75v
7vatPy6XJ3v76wdzf+TDZ7IeMOsLXm+kE/rDs7NfYZqAPz4Xj+gC+a1vmAPBN+TfkN3kMbDedP05
cp9aDbXRbpgeVDDcyWeRoyYgKstp7meHOV/Af/iZON/1Kqx/439svZZYfa6GK7xiJlJspaMcjx7i
SHpfNrNfZGFxK9y5fjAMivcmeO/KPq1fyNsYa8+Z9+8nbMn6GkCoNCs8uYeNTu9aYELqLI+4EY/f
rLtnbgPukwRUXDxtf5j3G7q3s19d8wVn7ugEMOc1M/8WLpcE+vjDY0ZEc3euPwy9mz7oUK4P/zXe
3/d30WHBqx8f3FQnJ2pgfRCN+6aDxSLM6OIUxPp4XTYBmM9nyHZEOLfD4iXKKOvjHR53NfBGoPsU
Fovd6sf3nIvDxv75U1rN9bmaeQfNvMBs75TP/NU/JA89PniNJNlnxX0yQA2wREXdkMLpwIMc9jQk
eOYbNCZskuGe3Vd49KUyo/ZBHeBJ44o5xdrjdRMPGGXFq2d6G5d8LNaXSQ4/5xM7zHnX6JD4KM98
hLnTJe6U8vHxJy/AL9q51awfcErPAlNjD7fTyztREEwnpWMTftyJmaWNDt1tmPOWhzv2GdyQ/E4o
wcKpzKZnaYvwFKcN2zevozuo2leBmV+I9XLLlleoEv7kY1g8MX1wmONBl8oBs/GtcodOew2QHGOB
2KlA2ukzpcm2PhRAisDeh0Ng7y8oFn1n5uXnzIfHDk4yWmO+mjCnuehHqCD1bTacNOyS42RAq1BM
1+c1QcXcDyH4MptWhtW7/OUeg+0lrunsV3do9ZufFMiBNpYThnR07uavXggxmilj6NmZaHGNJ3y9
L+pfXpSDheQj209fyx2execBfv/oyN6XjXA918talddPvNDV2p/iQ5Jv1619I7jdWvpIy1sAxX2R
0q3QT6jnXrqAA9QZ0d/OA7HlgmpAWGTSz32buj0rAxOW62pB7NEu/dnPSr/+QnaBfkB0dM7DdlTj
CKfwuoXjN7QkuFZJwch5s+CTvz1jiPvtghj4mrR0zqsgaoSY3Xb7NhxzUZ39/KljP75i2WXvyL3A
t3QVH0S979Iihra+eczKrmu/vyzuAhJr+YmHqO/5j59AZqsPcT2uubQCGsMquXnMQDcIp0vMcnnO
K/FiZ8aoDquThnYP50mXL7tCI90dAjkzspg+H4bHh+Y9pkDe7gtfluGaD298VH55xpy3PttJupwf
21lviKIoPp9+fLNu33s6BpbKp8XLimDbHe50Nc8PNyxjAe3nwmjdpYJPn+XBQeT7omznb7A+sK8f
gAahwQ7idOMs+H5ypGWjiUHTj+H0OV1FcI/SbvZTERqWS7eEfRn0zNY0e85nrByWyypkEXpUnEWN
XsmBZXyJs9uZOpeE8iGb8qUg3jmKfPqZSkGWmRhh2X/2vHOO9xzyr+IzVdqMPj1cvgsUvB9ryrYy
a/lBWFdo+b25TDucPXfcfWUMeHd+MVd5p5xBIC8gK949lWxvwzndVSlIhmUzd7nwXFHAege3D9/R
IZ2SlgdWOyBsLBjBrquEs5+LZPuSErbbM9yi+CDHKFH2Jv69T1mZAXRBuWdeuSr46LCLieSvsCQ/
P8FXrDmjYLhbs94c0HAKzjLsd/xEn7uy573lfrTtrH+YsU+nj636sGHOswjJzlU4sG/4AMXWOrqJ
8ErvvRNoaL+gWzzMPL1qEkJlvPD27OCjrdu/OnxE7icp8ZQNoBfyfRNBH65cmnaG4w9BRSa0zb0V
89RFlvH0vMhBSeBJ7hvpxPldyxYQof1p9oc7f8BX30DO+/Zku7m+Z32hkF33y7n/btvx54cGm6gU
ae/WHyVBq375Kk6z1bqle3oekHFYMOK8+97tfvtj5klix42UdeLpIcNLj15MvyrPcPit90Hv10z7
DH7Gm+VzQrM/ZVq2Wmf0c3qmKJCuNZYuydPldJekW1Q/2bx/Wref/aJsrA+E7R72pPOXxyREd2ZJ
XC0veccP3RE2iyKnm/Khh/QUiBU6w16g5ewP1s8yoLB7i/bcL2/h7BdttKcP5+evMnZVVAk2u4bj
2a+jYs5PkZR3OXOP27AdA/ueoP3NXJKfHrCxaSJQHo1N61351me9vqHCajeMrAeCRrTBw+97WHjk
vU4FEkuwZLces1BfIT7nfSjZPAs8Ds0hEyKnfUjLu5rS4yUO/SmqxwFo0WnMfhRZuDrJaQn9ZR8R
S8k2/q+foUayLgS/qdCyrxwfoXm/r/hT3kRO40Mnozk/YcrqOCAuTt4NBOwaxHa80e3v2StBW1/W
mJ1vtLbEVzkBRdGc+XuXjLFyK6D4dXmwOU92p1wMI+h9sZ75oc9GJYkqNIHf0lm4M2q3W01ORutK
TGWpuzyspAhdrfKGl+fNoRU7bZDBChYq1dfTOqQrci7/8JQDgeWP+T1ewJ1XMRbDb+MO5FaD7EhH
k/jhznPHgxDKsAWRzzyr6iwV9DO8cRFQHu48nZ82ywjUFN8Imf3X1CbXIyx6rjE3O03udLhkDehX
XWd7tx35aJgfDMtlExIjl7pwMkxkyrLk9HgjdmnGdtXpj/8kWFf3IQ/tBfx4m7l3DYczL6bbXl9R
5o7NKus1Q49APUkVC/xnj8ab5qVQXW4PdpDWe9TL66UEwvrxnvOjN++/IZHQzkq3zGyUmDN8Gb3t
XuwTvAJZQpz3XgpWsDUJplrr//J2ac6TiNuGtVvssW5u5/n+k1+uDcsDuJhKS7SwTNtJ0x0bzf1+
9uejW7w8w4Elph7TvjLXO4lLE+q08wLnpiuj+o1rGTnvxmJ/8orP6SqAQ+7xnGcabntTrwaUz8+Z
uH0hu2yo7AeAc11QqVqwkHWZIYFskYzhZ57xoV5Ukazl1oYuzQL70z1jwS8PYfvykfl96pfz+SJ4
tHx+P20X9/pxu6tii27F3tGn5bvHv/MY5hm27k/LZauhP/5MvsT6EJTSGbYg8Nl/IZ/vqmcl42vu
kr3bnlD1y39n/WNXqwnbxj5gEwqrEmi3fR3csYkHD3pfqJkqMsVfR43ewHx+hYO9o7b0+Die4eBS
gsWD77eTeJIuYD4fNzznle3wLOoAAumW//RZp/eMPcBCZfrLN0Nq7h8C6vUEEasInLDXd8Hll8cQ
/YhefAj3lxTIN6qJQ80hG99ka8BtCkuG/WDl99toVYJb4ytxTs8i7AViU7QjSfBbT737nccqm3M9
84SNBjVNEuC9tyKG6cpzHhBXoBzbAW9i/Mq6y0JtYKsXT7ZfIDPkdaxM2+sgiOyyOn31cT6fRPN5
Mh0qSMLx59/9PhyJeU3X4XzeKIDfP0dmcE/j1GH9AubzT0IcJrY9+/bnTeqASwJqHttJfugXae6P
9HPaRrNef+Ufv5M5L+e0fnEPdI9O9N1GQ9tnF5DhvVzf8ST6t5anEzzQ8q0syVUcNm5/Cqpo+zt/
FF8dDce5P6JkExZ0FNmY8V11BUiSKcVii+qQ72xfhHuo50RV31lYP3ejg3Z3sWWkjjYh7z6iBuFj
u2MHeSqy6eXdNPjtH0y1KJu+6J5DpB0KYqdM0KebqmNAddDgiRqfbGqTxwJ20iDTJW91V/ydhymP
S8C8qDXD1U+vLvEN4c1Jjv3+l/+oTVoy7/Yx25F9izOKuM3oKG82fFj76zPg3Sll+/k8eCrWZYP0
VfVmXiOr4aQolQl//24F/Oe//vrrf/1uGJTVKy7miwF9PPb//u+rAv8OXsG/BUH8NxP/3ESgXZDE
f//zX5cQ/q7bqqz7/91Xefzt/v7nL+nPbYO/+6oPiv/n8b/mb/3nv/4PAAAA//8DAPzVCbHhIAAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 7c0b986749b8f9cc-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 01 May 2023 22:52:31 GMT
Server:
- cloudflare
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
openai-organization:
- user-adtx4fhfg1qsiyzdoaxciooj
openai-processing-ms:
- '18'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '3000'
x-ratelimit-remaining-requests:
- '2999'
x-ratelimit-reset-requests:
- 20ms
x-request-id:
- 0d93269b5615ee055622aca99d9e4aa9
status:
code: 200
message: OK
- request:
body: '{"input": [[18031, 1495, 220, 17]], "model": "text-embedding-ada-002",
"encoding_format": "base64"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '99'
Content-Type:
- application/json
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SaWxO6vJbm7/eneOu9ZXYJCCTsO05yNlHxgFNTU4KIICgCCSRd/d2n8L+rp/vG
KgElhJW1nue38h//+Ouvvz9ZXeTj3//66++mGsa//9dy7H4bb3//66///Y+//vrrr//4ff6PK4s2
K+736l3+Lv+drN73Yv77X3+J/3Xk/1/0r7/+Nhu9on7/9KJZFRRREw+dQKPUjAE9rMyjbnrqTPHl
c7CJYWMEP8GzpfZ+7hJi248X9HfvB+Gis+tn15R28BuMChKNDbN57gERpCyKsfPy5Z5sQNvCJPnM
OCQjqom2IzH4qlwgq9C3MqafrCOUQiZR92jTbMgDoQIseDyRnutzP21j5QbTwAWEMfSN5vdOyAEN
DxsamYUSDeo0OzpQCpsGkdMmgwYphPIKvvGmoW7PTvNVgI96Eqj/KDc1l4ptDLL31aGGPab2jIpm
gBEBb0SaY9DzUNghePeeO+pDXiWjwOMKykhqcAD9iz1bp0qBSh1uaOhpVUQlYXfQ2Ls+oDqzKsDS
+XuGVe56FMmt03+79e4INbBvMF5Hc8KfRrKDosJKitw97meJJSmIGnBHsFdLMF8vowI+YVuhefBE
QMbV14BfTdTo3qiPdrcu7ilUhhBjS8BRPeaKeYQ+egc4Eo4cLO9ThoJCMmxcH0rNiNenYGhWLXZf
LzmaLDzkUIsPGeHKS+LTvYgVbSOeZuo8U9TzIrcQvL2SkW4qQnuyzStZX73HBsdvf6inPJArUMM4
wZZzczl73aYDNL4axDZJ9vVwnbUKxpK7I00yXGz2Xpk7OFWnkgYvV4xouvq62r15mwSWyteeTpsy
1kceOnQLKEwGks0t6FbsSK1pq4P5As4hjBRI0PRhhi0aPB9gxvMSI13ec37s7TMM3rTEaGeRfqI2
FQGukgv6PMqmn0/4GMOSrGvsRKdVRM+2d4Zb5N6wTV5GLWv+S4HeqVSplfWnZDrc7RtUBh9jL/y0
YDa7jwLB5jhQy+l0m+SVn0NkIw9vNkBJxtaXQgj6S0+d3frN6bdLJ/BQxBe2XjHMiJI+fPhevSx8
4KOSzLRTz9oqJBPRYTkBNq20GwT17Y4N8y71syx8cribrohugPmsJ/CCKTRc5YCdXAmy+T2blT5C
H9HLt48SAo4qhJDcLbwJs3vPM9ASaEhqjjQvGAHFWhPDTX1LyWrIupq4PTxCr3ja2LWMB5j13kar
qFHvNDZPIR89FgrwPbcpxunVrdePKX1BYwUrjOyn17OT5JXwKVkx/TOfsKUHcMAvY3meHe9Srz/A
IKcGEUUtTuayxBB8uveAgHQMsjn+flMYhENPLUBUTsWzrYBgZm80NeUt4qN5ZHAsnDtRLkRLJns9
yTqyooiQrXmpWXK3X9AXOowvZXevyTwZoZavWpNuGvxM2KPYuqq1cTG1/ZIC+sorCPdx0VG3dHZ8
ErZXpKWW52Krvpc1+/T2AZi+MCIVXtuom078DNQxIki661lGFKWKwa3fVzgQlYCvm3giusQlFynd
/lVPjxApamE1Mg7m9QH8O1+13kTUlyvaQ/Y1W9jGtwZ7WC2TOXCvN9hf1hT/8tU4qnULe1HwqPtw
Wnt27+ILXov4jU3rZWTSoY9C6JSJifE6L7PXZf9wgbFVPHr/CIndR+imgMvBNGg0Jh5Yg2KE4HXd
+uRSdnpNEnHtA7A5D6iLnDabbnexgp+uGdBb2HY2HzdlBctJUjGiGz+RQoJbEM1Qxc5XKZK5jqcK
3tBhRGs5ftcsHYsXYPxF6Ob9qDkPWOTD7FlEdLMZqmgctt0Xkmtg00itH/2k5dyFSfD+UjNtLjaN
taMMnlthR7IlHpZ4HeAVHtJfPgFMfiUyLDhRsJ+iFR/k3ekLdent0DCLdoDpvn6AEZKe1Pv2fTI7
3bz7U6+ConXs+bLqNNhotx3erg0DsMMcvOBmnVnUUTUZzNpXE+HQ6C1qHVb2/LtJfWik2pu6mmdE
cl75BTjqoo0D8Nn3zMT5AdrXcEWkefpEsyVdJkAGdETqWVQAuar9B9AMF0SVL0r06cCWwMvO6hEP
d6M9XLezBiVdi0nnJoeIfLzwC+CnCujmIbxrdumeBhw8IFETNU/AHbUn8OpETyQIngGYw8cdGNYr
QNaZ9gTz2jq7YHlfaBg+sKfzzEQ4ct/BtxmVCd8JCoRrsprxNoJN9tkN+gccn+xD5LjiEbOy7gjk
lfBGz0np7PE7XzXYbk8Cti26+81XC0qDc8LyPOBTNH0+0HY9gpSdwGqe1KEBb7djgANftAHreNyC
axSc6c1K1Z4MyceFxanzMCLiNZnbra+ARzoVtBA3PJrfpxApQTn2OLhs3pyvWdYCvv88kH5HRt9b
QWFoh75WkVw6E6e6pn/AirIzta+HTT/L59wF7+wuYVdxOZ/bzo/BMn8Emk7GSe8lFjQTfYvR9tDW
JD52JWR5yZAcV4k9+8LO0H7nL2IeJqNmkUI93NoS4/aa1bw+Pr8wdOUttsuK8e6y6hTYsNMDiept
3893PzZ+6w1Nx1WSjMQcBCjsvyVZxZ5sT8c9ImD6pnsc7m/MHp7WCUKolQU+jMThfN4yBE3V4kSb
kZGx9XDy4SocJopWLbbp6zNZulYcJoov6jOiiWcRuAcPTua397VZ9MrYL5/iramt+4+bBwpc6jPd
wnIH+if0crDOLZs6wbnvJzgHORyIccHRaf/i1D7VDtTs3Z5mD6uuyToud1BoiUumerdO6Cnbu3Af
FkciCn0Usaq45yBVIxM7WtTwcVKBAN7eIyKqL9p81D3r9e/85r+1fiRPxYL3W/vE0eXpZSwdzy3U
2D4jK2f94eN7JxfwfatyjEPfSsaDF1baC8UfpIBZ4OQX77OfWxQdqwRMg9fHsKuJisSDnCVzil8t
+LbiDge71zmZNkf/pSsb54hPHJ8TNtXcAegU59h5XTc9r/xjCx7uFFDPvIwJIf3Lgpp92FNvd90A
dgw2L/i4TRwv820z2760cM+rhkhX0kStoFoCZGaVkPm+yZM/40Elgti9H30+i8XWgBO8tRS7cp4N
L4scoXPGGjqy0MnWQJgN8BhlB3uRP0Sd5moxFJoupJbdsYRzU6qgO1eY+t3c8WE3mzf4CeoWibnS
JbPbSAo8YTH93Q/061tpQb/pN0R/bQvOxXOkwfuhnmjU2xt72ByNFzzyScf2fg4yHn1vMUiMpkT6
umyjacOlHCz5gl7mwcomeZGDz+9qQ43U+GZ0uOcuLIlUU+OCT4Ah/yVDIsUPpNanRz/9xvfTw4gf
i77Q/EED6I1mHI3Jm0+fdepD4doQ6pdGFPF6d9fgCDYH9HbSVzacgxODdoJcbCz1fp5ULvzRL54+
1pwk4ioE76aCZNBvh4Rf1p8JSnztkjWHL8AHyfvCm7G3cSyHm2jYp+sP0OOKU5s7DZ/V24TgsDYq
aoFH3c8gfE/aBh1ntFYgrbmxvaYa5GVH7at5rplvhEyrOQoIH8Q6+QZTmoO3X3nk5bkfQA5bXwB3
7fPG+NYF9rI+HGAPaIee4ZGBKZmvE1wV9EZxqkbgK6Rr5RdfBMCTkcz8HBF4wK1BzcbV+VilDwtI
NjfIR0+8erqfoxYu9YbauTX2LBHXIfSK2ibd61Lbk/OgLlj0JoEbaRfxNHsWcFOnKcaF8simcDhq
sJMo+tXDbPr2SQ6xnshEpB2rOX0qMVjWN9J7JPX8azqpWpe+QnQj1DJ2/5oTOClaR4Tm2NXTqfuI
kM5HE9+j/px0llqX8PvmHVovemKMivsRqGFsICDZWrbo+wPs8ICwbe6s/k89q0azxDjxYzArqPah
ebICJHdOlix6/agdVl8XzUNqg3noPsLPzyCQP1nE6Hp24ODDmaIjtzh9Gf0H5ptcwlFsRBlLxJWv
RYa0xoZe3aOpq2++WjzIdtGjj4hP5hhCq98gwj2aZbPC+gk24TnE9p19OZOO+xjSZLejbvDMow8f
nBf4hpOII5fue/Z734X9YeitQNyPCXcKGJzGCIGTcQLzlTcIRoN6oObe0Tj3G8GFB1Q7aLeMZ8au
FkIfNQH6rpCUcXv7nPTseOLY9a0taKY9fYGKuDkC8FRmY+w+J7hLbldq4EedTUo85TokDwsjd097
eumzAST+Z4+UG52TKauMHWzMo0H35OMm4xbVDvgqcrOVjefcz2rRMBjwCdJrI++T6ahmLrDlQCSa
jk0wJV2aw0fNBBoes8CeuZC68HU7ONh1mFHzTUBS0OTHB/aNGHIaFhsf8KpRaDijMhvSzfTS6bTf
UP9tmpyvbx8DEAk9yHyXp55tam7Au1fvEEcTS6b9TjjD+GI9qNN2fsSuz8nRN+g8I7463uvpeLoV
QHaVC93o5a1na/9YQsaeDJsWl/s3Ci8VvHhWSSZ7VOx+/9UEeFppBY3Dk5S0Htc/aviUR2r2b5pN
SNGOoLt0dxyX78yenaCNgQX98JffkunHCzznoVNUJKyeza7UtMcw+9heNUPPr16fw2ZOXaIv8U8u
XWdAL64INcSK9UzfyS0c7/bzTz0m5uX+0mZ0A6i/7WlCHtZ2AtJpPiGBbvys3xWjBtVDhLG9md58
mnMTatXKlMinMaSeFcx2QP/UZEJ0eQ84U6EP/MNbpOhQl9HUZ08Ed95Rx57thf0QllgGxzPzicJm
MRt2W4PBi+BkSFlXfs1wPLVQGD4rHK81Kxni9KzBxf8TeZRwvehXBnPxsaLh5jr19GzjM7zE+pWa
s7fKyDKfEPf7J5Kf9SMa4RnKv+uJKNxgNF9WTw0aF6lH7AoUm0SvbILqhA1sb41TxPo++8DVeYqp
4zTrZEx4XMB9dB2p+xRJ1JK6IvClbxmOj+tPTe7fgMH+oZTUlFKSMHfyHXhbnwUCLDwks7ntWnBp
3D01u0sMRty+LfA5vS3EdmCsyR2FueavSLT4ZbNfb4tTqSz+n/BlPJNuxiFw4sMZL7/nnLvmDm6q
S07DMv/W38rtFK12Njn1l/mbtS8ToXSW9xQNUsM5vk1H+BhFB7sBiyLexMoAbXdDKLKf735Ui3GC
J29+UzvcbaPueewKIPrkRn56Vlr0DpiKs0TjXjWANM3sA4a1DhADj7qecy61MLB6beEjx2jC6XsH
axBccLw9xxlXilOrlSfpir7nW15Pd82JoVc+r+Tb623PfCBDmNnpDf/qN4uMxNcXHoff+e2dTZf9
xYGkjkO8q59twsykNAA9bjscdlBLPsDXHRCHuo23cR6ChfeIsI0uHZHXbmATBw8f2GT7msz7Uwc4
GQvyq7803m70jJUED+DImY5aa48ANU4HAsWGYmztwNh3jaIVULwRirp0c6jZcZMaEM7Vkdr6KYwm
KS4d6MS7M3Zq1YikHRMduMU2R9LCP+bgezD0JV9j98SHmptqcoZj4d5pqKEvGO8v+wxRHu9x5Dxo
NBiX+w0+bTUk6vMcR/PlcZbhwg+QdB6JPUssS+Fl7bbYcZpLMtrmoMDZuO7JkHl9xHkj+OCcqwm1
nrmYzHcjJNA/NCK2PzaNhnX+HWBlWAO1PvrU89zjMvSe7Yl6+LNP6A0PTIuhs6FBcAxsktyjFsYb
I/yTTxZe5MPBFXIkL36f5aZ+g92m93GoFGM91NV808V56qnn+mYvi74TgqXeoVlKUdLvLNJqi58l
8iS3PXN7eAa17dtE8YqqnuSpJHByErrEv2RPz/ByAAtPoY6srsCv/ursVOlIyrZJwg8r8wzXiX6g
W3El9mNIvNcfPxmXqQOY4R0E+IT6jUYsYcn4Os4yTD7DhXo02tUT9fgLjJ2j08BNmE3zGoSgsN4y
uRwCO2KfQkDAzfPPH741yNOHQBhcc2qbu6pnH8/6QBREBnX7jVfzj68fwWPPOmx4wttmb00qQdpu
JqTuToY9ty+Yq7/nQeLZsmdvXMd/9FXMAxJ9tvGUwkW/YXup/7wZmtcfvW+HuzEaoaYX8J63Mo21
b5gN22E8gISSGGlVVtgT2PoyPF05pBs/DyN5WH8GbeGhhL3iPOPkVPvQc+46DsNrFhHkv0Tt6UCX
Rmi3BRreteGPP5D++cx7OpmjD043UfnxkHrRWwyoWXDF/mxe7FkVJhGqBH2p2zZWxq731xmA6+VM
+rES7GHrD1+I+GFLury58ElVWAqtzlWxu3aDaDbHIv/5LTK/1l7Np82kQaXfJKgZHqgWHyzbgS8V
bzhuUpz9qa+Xjf7ClkrbhCpVdwQLf0K7lxnxacRDBaydQynKDqCmKHyU8NQqR3zZ+TIf7lX6BaA6
PWngizX/6QVt8bf0x48Yf6wJpORAcbjkJ35XzBKe7xAT5nR3ezqZxxwyUnt0+xGSaPIHp/zxj5+f
W/SF0sIlnvCSz+uprQMGVRWH9Ocvh9/vFz9L/RvdJ5zMVaE9PFpR3xpRMqVNU/z0wR9/wGpUI31o
jC3eBhnn/WjmDCRN59KIvcxkXYC1CxZeiLGz/gDS4Qj+0Z8/f0p6L7N+/gE7tDvUa98N0B9+Dhbe
xkDOd1A89ALiJX9ytlO0En7JXOBtlJd2N+4vMTCJlZB5FYwRi+NPDMxcv+CtYkQ1P5xFCHVQ69vx
NL4S+tNTthyJ1P2Kaf/jyfCn3xY9m3D9rBtwupwMokisz+YqfRi/90t/8SR90jWDvjyYSDeyLvlT
r4r4A6n7kppIjv28gIdr/cLewj/ENXy02lKfsJvWYj9oOXCgncTuwuPzhFmgQMD7tg8iLXxv8dcu
LFvlhDPhyDkFlUrAQWoMujxvRBf++PMf1IIPlEzXraqp37u8oebGvmbz5x6VkGltTa2Fh/PTaj+o
96Yxqa0dE5srQvmFQ73aYzRLu2ziU3oGD0V+IVYMcbYm87eAe3Dn2L4emp49rC0DxZnYePN+2Hye
9rSFQ2Nt0Wu3fgO2jssDlGsQ/3hD9vuuW5J/onvu9/3LVQIEriJ+E1hdlGS6zloJM604YVc6R3zp
5xxBEjRf6qfoASac0gP48U5r25cJ78T14cdDFz+2ylj/xppWfpQvDqbUqb/xcHfAwm+wC9trNDIW
Gb/x0+hUAnvhJ62+PlmI+nS6J8y13wJw3w9/WU+fbOFv3x+PoeEDbuzRDE4iXHg7kh7CuydAYS9I
k8NuWQ9zQs/1IdW51AVIGKQNWCdeSKAl+A8kXt60p99ma4Fh0DOMn+o5m41NedDDmLs0ODRVL10z
9QPryNtj91TpPSu/t1Bb+AdS3uYTfBR/IKC57xX6p58npS6DH/s1Yke45dHkH1MBat9zTO3t/mVT
cVzl/+YxLHSSWXT3OVQ27hHjR0CyGQnKBD3ptSKc8DNguRv4v/pD74+itnkpuiGg2jkik4s6zrw8
dOESr0Ruj0kyrqsuhPtTLv7pB5Kmml+6nQVHUqZGmLBCc25QTeKIFqXoJLMUYgu8ttHwh5+w0+n2
hegYlNRRe6MWB1C46o9HO216jYYHun1hqgYmTRGhoGt9PYSIBivy61dM5uM8wG2/wRQVL7MWnZrJ
8Ncf88D9aX8vXWeBxQ9iV7lUYHqwZKeFD/lC2uFsZrKQrjSwnXyZWqfhyakilB/dnUuMLVM9RcVT
y5m29IOwZYVO1O3rQIQXZJ0W3uJxVrDIhSImGNFHQBJ+7QcHaqdD+6ffwQ+r4PyLH2z4wszJUyjL
f/OM9vvtyVLf/qyXepl/vvR7VPw9+0t96expBrIIowEckHCsOKD4PLSwgKNFJlq9wbQ26pu+5Ed6
iEJSTyLEMSxnRSL6yqyW/m54hMYA9ji0FAHMVXox4HMwTPrjYwyktIK2F36xueRHkp5qC/ye/7D7
Cv34y29SwR+E9iHlJPBjFz6E+Y6Reh5qIh33CAgzacn+1So90beBBR8uC/CmFA7ZzJB1g0aqvPHx
4Udg0UM7ffk/BLfnIWPyK5PVxU9RLAxjPYcsOf74GrYlcQRsp7Dql//+9GP+9GcOq49LvaxjYBbW
uwN8btQH3ZzzlT2N+FXBCGt3vOgBzk9gA2FTXkIceh23J3XXWnCqLiWS3PidMHP9+cIfD9BPo5OJ
q9OtBE14DMlkEqeX8aQiKNy+XyS8k3vU78O3BnfGxadGVjqAaP6ggIU/oq44SfaMzsMNPgfLpIaL
AsD9QGDg11/96ddfvw/+/dsV8J//+Ouv//PbYdB+7kWzbAwYi3n8539tFfjn7X77pyjK/6Tyn50I
ZLiVxd//+vcmhL+7/tN24/8dP6/iPfz9r7+UP7sN/h4/4635b4f/sdzrP//x/wAAAP//AwC2t0TP
4SAAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 7c0b9867da38f9cc-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 01 May 2023 22:52:31 GMT
Server:
- cloudflare
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
openai-organization:
- user-adtx4fhfg1qsiyzdoaxciooj
openai-processing-ms:
- '20'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '3000'
x-ratelimit-remaining-requests:
- '2999'
x-ratelimit-reset-requests:
- 20ms
x-request-id:
- 25535161a3c6c4c4d2d95830fc85649a
status:
code: 200
message: OK
- request:
body: '{"input": [[18031, 1495, 220, 16]], "model": "text-embedding-ada-002",
"encoding_format": "base64"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '99'
Content-Type:
- application/json
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1R6SxOyOrfm/PsVu/aU/kpFJIs9AwG5miB4o6urSwQREJFLAuTU+e9d+p463T1x
ALGiycpzW/mPf/31199NUmb34e9//vr7VfTD3//j+yy9Dbe///nrf/7rr7/++us/fp//38isTrI0
Ld75b/jvZfFOs+nvf/5a/veT/zvon7/+NuPiQ5nS1Anz5acM4uJ1ZNpa6bouyZMjLJdQMZs0H071
gcrA3keVHUtt4GxxmpbKpd8Q5urcTppOaEXZRcct8U/hJ+LRfchQhzZv4iqmnoz4sN3CaV17ePPq
P15npqcc9Pf9yLCTWmi8r4mBLoLgMxWNqKR6tKTwcM83rLRb8OZ4jLcgX4URN/ux8KgSvxtUsFJl
ji1XiBfJs1dO/JwwcyO5+oS8tQGQGluyS6ibDOB1N3AIDfAQNarXy5f4iE5iTpgpd5bOk8GtoPH0
HsOptJOxux9cqNitYI6nd2WR7zcusGJ7JGpq5px/KtyjfIMRUzeG7bHJkVto/O1AVxpqk1m0BRFN
SbH/vdeHi6MBXGZXI/jlC5wv2PWIOOlvTDvOQTJpbiSjOiUWBn67JNy/Xn3wyyUi2HaSjkVXVwX5
yBFTYytLhsxY9LBYXp/EROPEh9rYuqjFqkWiNiiSWeR2C/JN7Yl/Pr8iflyNAbCk7Yg1r4/JpO0M
G8xTsGGOfa35tHx1Mypq4cX2/uB59LA73SC1IoMR65B3w1IPd0qRVwqx/fuKT055zWVf2nV487pf
+fRQ7Roqeq6JqepFN54Wnx088MckesuJ3t+PqyvE75tLZ3o8JPMhMlr4iF2GZWU36/w45bmSc/HI
dLy0+GQ3VoMEB9XM552YTNkhCiFU45KiTaXpq9qOJbg894jo7eGgz6zoCpgyqhHttEbelK2XPRQv
pWLO+/XoRr69jhBErUy2w1R11K1iEbQNbYlNLi3nPpF6KNr9kaloj/mYvhoK8Ts1iZvG124+NMxA
S6FGeNwYjc6mdyOB3jIdo4Sq5ci3wQzWLgqZKgPhc56QEbFzLBNVjktv1q39HQQtCEhc58vvfCOF
o9V/8GrLRG/EdZWBtspkorfhAk3Ds/VBavSQOc6ClJOmNT3o7+xIdPKu0LiT9mcYqLRlD+uglnxx
WI0gLsUdMbfUSqbMWFMQUfDBrWaXXcv2xxEuihQwa65PUecf7zbo/RIRuz28k2lJ9XAhn0WJqcl0
jJpTezuDoF08st9Pli6K2b4CRweN2K3aJDRcbRuI5lRiu+tcd62elwKasl5j3/pG7RvWNdSZb7Mt
fY1Rfw9e7YabqoXFSyZ7rHhMdzADATOrZ/eo74bABr2bF7jE4Qf1lXWtwdkqKv2eRj7nPZ5huoQp
3by8mI+ysLuDYFaIHA2ZdsxE3oyGz+fAtFVYc25eZwOma3JkhlUZ3YgRwihXypoZxUg5m86rCriG
MDG37Oy1b+loo/ipFsQx4lXZiuvgKhedfyJeLIlRf0rFUZaKpiPGM4m42BQTVRabycYoyYJo5NOB
/v4PcR8z6bgvhi749InpSp/LaKoX8xHBZT4Tc/Pu9LES4wJC9dwQuzvPel9ZCwEtlrcn80f/xbn0
DnuwrBMmuNq9kikw1ZtS9ZgSo3rb+ojjZwHdOtdYGNmWN873cwPB4aUwL/G3nZhnkg2N2ZlULoYK
fUBUJZCjzsALYSGgQWu2LgzDAqhQzxKnsRbsAMKsJbozyeWc7ycXILUV4i7GRufF9ixDcmcYr4Ps
HU1ZPM8/fGV7Zn6i+Q2LCqzj7sFIwaSOfZS4gGY3AXMVKU9md3J24EK5ZNvDAXs0h3knT+mg4sXz
mnv9/Vk1Pzwi1iy+SiqkwRZEFH6IK9wT1M+fvQyOc26Yux2R1/zqoz6PFcPv6ovfnlEp/ttsmRXd
PL3vXv4ZhdbKomsehwmT4HMHTRFuzDEQ72YRoSUSsHOmrMjijn+UAW+++8H2YWR6IjvlNTQ7puFl
zVSPhviDoajhxbCzeKFRXtUAoowHLO1x5VF309TQaOcb28X6q+PJZ1dAEloiXqFHyHlSdTtUPJsN
nkRJRZ+l9RTBneaantTjs5ycMijAIujD9M+iTJh0kEK4CODjObotEY2VRJCDu3tjhhWZnRhebCo7
6rzHyzL0EV+wdY9++OBPgcbnHPtLcDe+S2JPtEs+mmK1CaP8Tqx+LyQ0dz5XFHf9iuz7riypgoYW
tNHBxLs5p6TvPmxEl0+bM3OzufJZSbsePbaSSHR3JZaTrYU5DP3KJMbr9vL4wjmHSBNzkcXVxtep
aIsimPfpitf0dI64eSyO8JkuM3OzRdjNwlHK5STOHsRVmlGflVrH8P0+Mcpli9o8vNfyZ/Uu6Glj
7vUex88c9I4azL8kozeHF7VHx70VEPyObH391SOQC++CrjYXmXfSPi9Q8hAt4s9rt6wF2wkgCWBB
jAKPnPvHowvxKwjJtbHVkhejlAHs9xIhz2uuz65wodDo95Bg761EQ/2UrsCSpvutVzc4t0kF8+hu
KRLte0lF1Li/98xUX3k5yo9RBH+tMlz0fhxN2UnsYbrdDySklfadb8zgNOHyWz+G/hnmjw9SZ3vM
/uy9iPu6k/3wll0/+y5hIz7vIK4OB7oOG+AzO1sCTJHVEFzPV9RF4qWBKr8/sBifomjOHcVA2ux7
zLxXbzSzQ5WDX6xlvDAdu0uHNMxA3NgTO4ufUzmzMwGkNy+PuYqc8p7vmkJ2hWpippycEfc/rQSO
fXl/+dLlfKEKM2LpOqPyFqceP8qqCrgPJOKPiwKNcpSGsHKDgKS+N3j88yhDkLPgTgda3fQ5ViIB
vvqRuIVX82bHXiKS6FXGY2xl0TQcZUCwJxJVosvTew16Hij44y2wEm5rfVhelwVoCtyY8+odfT7k
VqX85ntYIYt6y5Yo4qaj0GpHnG64dEmLHt7jwH71Ml26qIVsvX8y7Xsee2s9NXApA4lop1OqcylD
GIYqaJlh6auE6hdpRkt46My7lkn0Pd85uOtdSHaryuXf+rlDa2Qdc9y573pLrSSAm+BSCWuat/6U
fAuhvdHxeNcq3ortx4bUfXLm3WCORouFLqTODegkmjqfbHzK4LHPMUZXLY34cbiOkDyWFvEvt2fU
p8dnj4qWHOnzIhgRT+5e/dOfzBNbuexlPC3Rg7S7P/px/vIVGtomJLYvRGh2XX6EBVogpjdtk/Ai
cjGqgvLG7G6ToSlQxxbqu+cxvbHdZLIVl8KXf+hnV9yT8b6oa3C0jUGMZx+iL56KKCRhR6xg30R8
Go8GxHXsMFdXu4QVl1gAwZ4ZXj77GbWh0rpwNKuJbJmdelOw5zUAOY3Ev7gHfdaHCIOZtE/K2zmP
ZjHdSCh+XfZUXjw+Ho21coYpkFdU1lqtY8U2k+WhOr2Y4wg5n4Y4u8n652Ox3fmWebzFQQWPrSxi
MTaW0TTQl/tbH5qjMSmnpR4aKN/4CE9rF5fjPRgaOUkpwYuyWHX9V8+idN/0xLDEORq0amfAdPaf
uIvX725ava4GXMpQYubm7ekzy9gd4vqI2fc8cB4NfQb6p/bp0vSGkiqL1Q0sO3EpHKebzhedXYPg
CSXTRrLpxmpOM5SNqw2d2XUfTTVqayQf7gfmCkrnzfn52sIHKS5RkZ973/WRwXqIlHm32zGZw1AC
tMK5yqzzKY/4Fw9BTrwSr0efdLxorrAR5x4T936CbsS24KPlYpiJTTYNnxkvCtTsjQ9JVscAfSCX
baS3rU1s0jh8GFqnRzGVPsyTJsNbT4p+Br6VCEZJ+uxoeLH7P/rKseprMv3wrd3VJd7U2NLHzrvZ
qGAv9PVbTVL9+Lwxcky0dbLv5t/5CPcuZ+5itD2qy5kN3So1CBGE0euKle0jTR5rYoVm4M1CrAZQ
ZxeBCk76RqMFzg2ksn8zxzJuyTQsrnfw11tGrOnZltyY1wDhdWkz/3LTIl5MlyX68gtumhuN6rzf
zUhvioC+pNUdfWBY7aAo5wlPK81Cs6h6LdTH4Y37+Yyi/ucXpvh2wtxWKPrjh8w7v9I5SLcJX3Rq
pfz8sB95C8SK3LTBfzKdblmLvO6zM0awrmfCrKhrvTnWAkMxb9WLOK9hjYbM5uMPr4n79UujZY89
nKYoY9/zo3dmqi3htHyEXz6Cbqqf0g26Tb9kN2UX6lPNPQNJHzViu1tMkqkufEOuj23P9HaZ8unB
4xDkxCm//m1bjph5gLhznIntK6beV9cyRNVV27D9F48nW9n2imWSMxWTtIumi2nK6Ov3qKwhtRsr
Lz9Dks0eRdeiL/lU9iIMdZoyN0ViR396MTV3B/xcJ/uS366kQLg1TKZx4ZbMYvt0Qb7bFdnj18w/
l+VuifTX8Y7X3DK6b/3RP/ip3pdPNNS16oIZEWCm1t08LpmOij6Kvvvhi859/WTDgsc5IxB10aRt
NFvh6jpkHhrenEvn9xXgivZYweIBzYecVJBLhsFUGT31mdUoAItsPsw/Ld/JzBjN5K9/pfzDuN7f
bWwjcVHEVIzrORpq/9lA8WRLvFhMe33EgGXQm7f31VerpNV3mxmmSB2Iq7y1RHybDxfVp76hMJ/8
jhfbTNqw0ghYXI8vb7qcNhiEvfogptbJ+lCncNw0ZuV++StH7Q9PvviDV9u9pLPPUt6Cu/b2+LWl
74j+6hUX7E5X8s3Wx1k2JJQkXciscCdwKp5uN8iVZ822Ufvy5jgU7jA8DZGosl15Q5ZlGPT6dqUK
1fbJKts/MEot32HYMxdRYxWtBJemrohhXJ6o5/mth6PFK6p88kyfLmqbweUzJGy3vofdeF9bOyQY
mYFlRVZ4g+NPAUNurhiuA5lPMG1scMjkUSYoWiTqVbNF4vg5sH1YfX9P3apScFOLn5/Ue9ktDBjy
rUynMZc6vnDcClhoERxszic0DcdZAMHsRIwkUutD0D2XcHkXDdutIo83cp/vkHlJe6YTs+bcX5hn
eSG1B+aJ1sC56We5nM+rCEs4zL3RAu0KwX1/+sNvLTtbAHVYLJmaXnA3u82zBr3EFV3W2dObanVn
IMfCI7M/49Wb2QlCJD0rRrYDqcvZXV1FENfbDSOCEHh8VC4qsDo0CDG7uuMmtZeoTl2fOdWO6WPq
uz7EpXv74mPjzQzSHvyXwIlRPAjinz7MlWFQgDQf5iTc/GwM9OUbslu94nJaXvMlXMoLx3BRsE71
j35HjRafiJpeaMeKywGAb2VCBe+Yo/qw3TfQLRObDl89N7vKeFa6zcSYtipNtMQGvcvizB3K1Mst
6Xe3yw6q++FDVNmZOTNF2P74A8fe61NOy7tU/fI45hbeDo3dVbv+yau0i2AkortbBOsvfuKFIX3K
H38r4lidibo5WfqErvUZjo74pCv0mBErdqEAHSjlHz081CrewXLBZppGu8LrO+/mAt/ZAtE/fh19
/awE7fbMCbFKnFB9NfdKMZz3+Hnxz96k7S5LOI32m2lHvtAnyGcX8rUjkN95o2ES3OHhBBlLadkl
s1hoLcix1jFTPxy9wTl/QlnqLwpd05MY9fj0yoBVrs+sAK91JmFLQl8/h8XlNPAJdE+GKoufRG+f
WjIEN6GAfCX4THcIdHxUHqqsTTbB0rd+ukU9ir88gQqO3yDu66krL3iS0wAxn8/NW8sgicsbvkTK
mvP2PBqyHFxPX30Rd3zC26uymPyBmAhHnEvqJkSP/duh4s3Syok46xyS2+v81Sv7jpuv1Rb0blzQ
7nZbRuyjKVuEh5gx/BqwPtWUnwF/LIP5p+qiDwM93JHU7XdY8IygG090NUMSSAaxmyT1xnTuvvnk
tmc7cWcj/vMvjEkJOyzHhlMlWN7l1szfxPb7nT7+8peYwYuYGU0jtuCNIbc2TfDGfQ5lvzusK0ju
TcT8izvp7CjbW/TNa+gru7NuGixJRa0veMyKHN+bLoaMQc7CO9vFYlEO2anbgrMj7JsvbX75ZgGO
urb/jBf1ive//ITOdJV3PLmgJZKeNSPGNx/48cWmqihh/sRxt4F7e0PDsNHwdNrmHdXzEqDOsM3M
dPfi07CoXCQY+YJsWZvocy61ZyRsE4uZmbDvZn0qBDiBMNPkehjKnguqoCzE1xHTmvQ6Ty6NDR1C
b2Ju0yaZwya5gv7c9RR996e39ncXnTazgKXufdDFePOQZZGPLtstn0rHzIAGv/XBEj4q5Wug8R2G
5nWiDCo34v6RiWgKpBXTjqT85gdCBc6+upLIdg98VuJIAKnRQrbvHbOctJBfUdG6MXO+53tMXzmF
bvVcEH9aKsmIMwOgqtGWKrTsojHVix5S5wq4XeRrNNTrcInYrWZEO72GhE0SqiHIgpAY+U7iQx1f
ZXD8/M5cPYy98bffjmGumTatonL+5rmoPrI32++DNR9q/9Oi4LQaMJy2sTcNiyBTjsZr/OVv3rCc
614W1ANhxlOZ+DRYowoPy62/+PLWabzxfWD3qaCyZusd+1S7HjV7p6J5esGlqK9kCvl6azOiXM/e
mCLPRt/1JbrXRH/8FwSnecbfPAK9Lk7aoHZXlWy30pNuWraWjz4LVyDemooljc9y/sMH+pKijM86
iUP0WZANwy8v8L79hCXIZe3gEbGeM+mBpT9+qoWt+NW/doDM+FXhMW325RK34Etc22B6NNMkGqtZ
G8FKcp1Zh6JMVjUqakjVzZ14sbmJRvlyOKJU14/E9oclGl5ICKDRkhMum26tD/YZBFS9QWD2fhrR
aDn+7ZcvEuwZY0e/+TZydEFjpj7rSf37P/izdZh1KEN9WOrDiHA1xkxvkJWMeN3doXjJH+bPxlBO
mtJXKE5RQyd+h4jqpBdkM10fib1/6N6PL5D8OF8xzMa+W5tUFSG4SC3NbJA6tnC29Z985scPc+xm
AnzzXCwvcOuNaRCDbEbGjtyE3vcmzU1kYHHB2TZ4LPjnh1/C3rzRBWz9aKyCxx1+ea13PezLOW5M
H5IH2jIi3GdvGmydwkli2z/1w4tKw8CYnBAC2z6Z6rar5aHEFIvrvuBULLQGzPN5Rb55hcd/+/fr
T7h3HSMq6FOh1OGLMVc9r6I+FfkVllxqWOath26Ol5ABy4UrM+9bB/W4IxLgonli3oYPzvwjWyLz
iJWff+d9mmq+chr3BV5889hpeYUCkmy1I/Zn30UDyNVRyvmGEFXiH692ib5TOOEn5t3iVyfqpAf4
5d3qvSzQF5+O6Mv3xJS0yXtdiqUL4WX2f3lqOSyvjYi++gA3jYK8lqmxiE5SazHtOD3LIXi/RoBj
l/7R780JvVTQ6zQiqhRuEHUV6Qj5xlXpOjwzj+qkAri83yXTRLPkc6M2qtytuzUVvnzDpTO7gT81
NsPuXHDmz+cd+GPlU+rpT8QWEAVKt7o5dJQKV+dRrmD07Rcx4wW6zj8l2qJsc4kpXIaUT867Cf/k
Yf7Xz021/2lkRx333/oOuvaX/6ZbiNnRQGnSjTjbQeOrA62+/DjreRDAdDQ+jCywGolhpbcQXkYf
D8Ne69hxNYagjbsIr9Jr1PHjSgoguNs3jMRW7vj0cG4/vfjD53IY7McV4ue2IPs9cTzacKlHSaii
7/55SS/vbkeIO7oiKsJpyRdvo4DHzmiJxdEYcfOlqBBcFi/m1GTFe5mYBRQdPpFfPv315xRVzzwh
JF+PnBWXVwOCI3aUN6399cvB/ecfifnNU2elpc2PH/H0zccG+BxakLPThRGl2SWzfhlnpXGkFUv3
01sfZ+/RIDlIAyp/83++cLIAQt+byZ5d1t4vfwdrP03MMHZbPmTFIADfLlyyjZwVGpaWUmxaHzxy
4ShIeOL51fq0KjFthXvCp0tpiZBi+u2fnMqIRVsewLcfTB+WNib9LPsydKN3xcu8OaM5D+8Vqh/H
BTl885XhmwcoR3udY3m7p8msxKxFzd6rqODyqZxscwXQLbY5Xk/zp5sCpxTh+/6Pv2kq+Rmib/+T
bXt/k8xMMoVfv5D96bf+8tpv/kqMcnsv52+/GnD+qsg26jgfU6S7v7wQr7n8LLnvNQJ895fOga8n
4q+efDgmTP/4u2QthZ4AD02U8Rw8HnwIVKkF6yC8mauedt3MHHOLqt6n/6Vflq2FIYi1J9O//WD+
6WuMgsTI2DagWjJibm/h79+tgP/8119//a/fDYO6SbPX92LAkE3Dv//7qsC/b+nt38ul+G8m/rmJ
QPtbnv39z39dQvj70zX1Z/jfQ1Nl7/7vf/6S/tw2+Htohtvr/3n8r+9c//mv/wMAAP//AwCkxTP2
4SAAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 7c0bc85bcee4253c-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 01 May 2023 23:25:16 GMT
Server:
- cloudflare
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
openai-organization:
- user-adtx4fhfg1qsiyzdoaxciooj
openai-processing-ms:
- '1067'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '3000'
x-ratelimit-remaining-requests:
- '2999'
x-ratelimit-reset-requests:
- 20ms
x-request-id:
- 482e97acc620bbf5e2f7d3dd44145666
status:
code: 200
message: OK
version: 1

View File

@@ -1,843 +0,0 @@
interactions:
- request:
body: '{"input": ["Assistant Reply: { \"thoughts\": { \"text\": Result:
None Human Feedback: Command Result: Important Information. "], "model": "text-embedding-ada-002",
"encoding_format": "base64"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '207'
Content-Type:
- application/json
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SaW8+6Srfl799PsbJu7TciIjVZd5xEjlUIHrDT6QAqCiJyqAJqZ3/3jv53dnff
eIEkj0/VrDnGb8z6j3/99dffTVbe8uHvf/76+/Xsh7//x/fZNR3Sv//563/+66+//vrrP36f/9+b
tzq7Xa/Pd/F7/ffl8329TX//85fw30/+70v//PX3XQk9tt8IacmL0+aAZtXYYkmRk3i0zh8b5pFi
rLzv0M2o1HUIhpPLtvVdK+e2udog7hYz05U79abP/jiDvFzb9HMyBc7pcmfBlXkJ0Vv87niwkiU4
sljB4kI/loM4RSoURpsz1eO6N+lmkEMxVCa7P9Ys7oPZj5AyWQMhDrfQQB6Ers7xVmJeLXv8Izjl
E26iPzMzyWyP98HTVF5ayfH4Kp98xAsYAeWwIWatnrJ5EK8YqgWVCZG1jo/vYRChR2nAgtXpbUx7
bwyVLb+PZJsJy4zp86VBhbH3yfaYXbJpV8Qq7Be7kezyzs9mGaQczmTDSGAeC29azBYFv241gsW8
j7m4fkYwlVHFvLVUGO0qnhMIzdbCRbQ5Zf0wfQC87Z7RJc+vXT/OU6Gc7oqFY/DWZUM9TwVnf13h
9TVJ47Gzb7dNhIWQaRtB7vpYtBOIM6nFoF1xN6V+BYi8n0eiHac3mjr5ESmhZlH83jQu7ze8xqhC
3pO4Ual6DKQ+gfV+ZeJNuhb5TMkiRNdOCZnxzE5ojjenBgYtw8x5OrHXH/RxAXFWplh5+G45Tout
DmtPNIidLFjZbxG2ACo9xhJ6196YNCsdxN3ryez+2Br8GIcmNEWUMHN3XnZzliQiKMa7x+Mabw2q
nHwTmLdoWEDNlk8qoQmUu3NDRzWLvPnOnRQyCgHx8qPCRznYhZKUSFdiBo7H5937LYEh7q7EFgXT
m7faKYFMck50SXWtm6ISQpBjsyFX97nwhnIvNbB5zoxCkEt8ekmujVbCJf1Tn/N9OUkomSKd7DhE
3TiMJsjyqVWJk5fE43Oo+cru3chk+47Kcl7blxzMMDkx6+IXfDxgM4d4teVE448mG2+F3YC07Y9k
L6t6KbJ7M26uy6Ihunpp0LRUD5HC9ssPcb19iWb/eirQK2lKLN7mCs3f9UG4EHzmvjSz46K4jSBR
wWFXE2TjI702Jlpp0YdsTweW9fXxISpSb77ILSjKbl5bkIOi2SZ9HR9DPNloryo+7CeiOfWqm9hT
T5fFSajYSdPNbLx91q68iS53Ym4Vo5wUT8GgHE4+2e5EF/F3ieFXfwxb+jL+7Qe6zscHIw/3kQ1a
76RwiCqBLqJg330eGdTwnE4Ncbj8yfhhq1eg6lbCzFoV4748pBGIWeGySHnpxqxrnYDSlvXMX+/X
JTdHhuFRPijTrkZlcMEsIphk3SQnxIxsbj+yC3f/reOxdgHRxVOo5cSYNPZnv7uNGcLBW8dMy9e5
N3vSB2B/e56Zvz57HQOpSsGqng7D7Zkan6t/fsLFFVOmfXQcN3n3qSB/7Xd4JV9Pxpp/nlg2NvGL
Hr/rzbREakGzsz2xz4mXTdXVdFHa3XxmJxIqJ8uS1d//R9Q2HPm8302t4vs0Ia4nXflILrMIV25p
9IMXRTcDk5+yD/FE9CllnO2J9YTrEUKSCS8az7c6keF4yrdUejkaEo+HvAdotZjpneZ006JGGLgh
nyjSu8n7ra9c53ZF8OWy4TQrqxMsD/Ibr/Sw5vNLFUz0eFgW0/TikfXO843hDZnF9LW141O38UN0
/5xLojXHIm6bVWyhzcZeUnGhr0oWZWsVjv11y/RLGGd8UUQLNG5FGUtdzhG7Hh8jpNNOYf5BexvM
LBf9r17oWE5iyTxRPUCqOQcq+Zt9NkjnsoaG70JiNDtm0FUsJyATZ8lU8ci8T3V72Irz0gKmR6bn
cZfjA6hUkJielK+sv8ahiyS9CEh+0N7eVMeqD/tHX7DcOlUdl+6hAOMMHY4bQfG49nrY6Lt/zJEv
RjdLjzmHcFhhRoRsx+dV0UpIafucuf7a9sa8e1SKSw5buhzJEk2fGw4Ryt8fhi+vB5/PSoGVJfYs
gi39Hs9jK0RwiMAk3lNbIRoMmg6Nzs90dcJZPMessZFjRz1z70SKB+IWVKFZw4iDpb6bhW0v/eqJ
WEXieKJFShmtvZwTo9KQ90n9agEf11fxymQPj3/rE+Y4CIiWu9jg76oE5RwLCYkbeW8MO1oUgIoi
Yno0XYzxLvkNIor8YtqjvXnczy8SqOYmJLt6cYtnXSsFVO0hJffTKzb6d6EcYP7AQJKXyNAc9qiW
o6XrMH0cNETvy0lW1KwdiX6uMm80S9cE+gpstm9In7HjHlmoGxYlM0XP7phZihRexzwjfj9fu0nd
GylsjXKHEd2cDL56r0fYHQSdBfml6egt8dK1OhsaUWsX+DQw+4Q+19Qm20Z681G+LEcYL4ZOPCyW
WfMuVgd4yM2NELcQ+Bi84hk2UXYnQVZEcXPWuyccfFxi/j74nAYt0cFf3jdEy5XR+9hWVSnX/lDQ
8eZF3exfb09UvZcpPurKAY3zi9eQdiaw7WZsjAElyEWiFHWMUP3RDd0WBKjsRCMJe+4QOxhDtVnc
/CuJFlFofKrm3MBT/dyJ/YESTRv2roGvtR0VNPfR/c4vcqqPwsxyMI2RVb0O7OhMxHscdrwXgtYG
KXEUYhXR0eAD5xUapvMaC9JNRr1qQQHn+hOS3ddPTL/+Vu6oRDfx9CxnJwpvymPnXolDD6+YYqs1
Qbs/Nbbb4iufPtE8y/aj9DGKP694jEU7Re05Sonz1j9GR66fG+TocyYB5FlW3Qq1hVLSJ+LHes6n
73psvv2L3aqpNXrFME5wCVcZXeqZWDK6Supff6BFUSWch40AcKlm4+c3u24SCxs2SFkztzz2MYtF
RYf5NL4YEY4xYpf12lbOBDHiPMnbG8JK69E5bEP261fUuZYLxfG2FTPXXVKOWygtxdt4Kl1994t2
4rP5+QWm3ry5nEmc1vCpX3sWOKgy5tSmB4Ty14cF9YzLyYq2TzSr6ZFoprov5zVqZAT+5sn8hB4N
plgbADXbF3SeihLxxlUK2Y/YheyEVYNGYZlX0O0Cg93UxojXsN4lkOJTxrbN9oU+/f0oopWQpVhW
toPRf1xbgvb0kJnaORYa8tV4g5W5fTDbX25jQVjmNYgVi+n01Td6S4wUZK7tWWx2Yta35UMC7Fod
hfNqgUYnGSmUiWVTJeVmRpdwrJG7jCjD1dR6f/z+7dmlTD9XyJvWfmJBVy4r5ji+0HH/cpegvG1t
Kn79AttvQ105Facz0e2EG93HjwrYqSjF3SoZMz7nGyp7eXonznITd7PixLViUxrj6WpUHrXyQwKr
KYgYeShl3F/33JW/vECnr/7+9gvEvYfw4ls/LBCuEnLssCdGExjGdPaOKZpCKyeutLwYbMs/GO5k
cvFkyEo23iWzhQoVM1ZWp50xW/khRetjmtMuPPFyymcxR6HZWMyhaBnTtb2/wcJstvTIvQ0a64gJ
sI78AyPLweCrRer2P/9F58vpnU3v/oOBtPTKXE9S+Pxc9jny7PWO8jeWeOuufYAyfYzMaZ6bbGRV
pSLJXK6ofHu2cS82hb0Z2fhkd1nrEL/aUwThbkd/fsUYV2nayqupen77idYNxG0oJIIls2C5enhD
t7uIqH56L+afVzc+mEMIimEVJsmrzS6eh0/Vg0Q3129/WHlcjiJJxuVewMq7M7u16lg2vGE8kUB2
HGO6C1iFHukK2ebqu+sd3/YhXokx0Q/na8ytyzzLS1yccBd/XlmfRmoF3DousBy0rdHrFjPRx8Uq
+fHaMAzd6Y9+kbR7G2yslfaP3uKiO3lN+zRlEG99RIVaFTMmm0ECWE4qdupuijHkSwrwWqYGTSsU
dnytK4L89StYLvS0m8iD9OiwOR2I+6JzOQme+lQe4bGm8vbloTHdJia869T6Lz1TXKmCquIR3iw3
cTm1gzQij5EzHheHTTYuxeCEzmETMv27v199bNHjWgyUZ4R2zbc+IbxeF3h6jLNBrb0qwbc/EL/z
N9noznuq+J/bhu7yV5LNi8e9ApX5NjlmmhJPZ//rZxQ9JNbL84zZMGwBDQsJM+9IPuU0yjgBZ7fJ
8VIWAj4UhlRDnPRP/JEvZTnexXeL8LUxWJaiRzmFEPTy5k5O5NufumkzPyo4x3f7x3OcBuLyCev9
2mT+LQ2MSeudBD0HTljQ3Cy+0u6JhNwjPRHiqBUffnyNCkcnPz/cB7U0A7bKGCvLz8WYbdMF0Hvi
Md9nLZ8OXiugL6/h2esqNF24J0H3cHssn0+6sVpVj6eSDOcL0wZUZVTFnxmux7dKjEcjoBHl0whL
7FhEn3cKmtnNm5H66CZi7s73kkfN+wm//rTLtrY3qflNQubndSPOpta68dcf0WWeKLBEy6ZF6lKY
nuiAl6G1MXrUBCESKleit1jlJY3iwIZWlV367hQ75mZqY7gm3P7jrwTHt/GfftHJ20U2xa99KL86
sWM757BHk3/oW7SpDzuSFO4aNdfw5qJqcW6JecGtMe/ajwTgDCldCYplTI/rpYb5lB2J70HT9S+w
Z8hvskiMcDx0k+9cXFiidocXEruVX162QEObiKVBdfovXm0PVkTsgxd0Y+egGxQnsWJ29HIy/nj6
ItyXAyfq64r51Jln8c96BlltxeNL7kZ0ZHsFiy9p4w3Y8egfvkZGsvRaqbjegK7qhO3cHee9n3Qh
LLtwTTCCPBvv/eQqoFQdFW67wpu+5xlwjkq2C2tsTJ15F0Am0p3pU1Hy5hqHtvLjqa+/K8fstrHg
y6+YNzvi8bC9gDS0Sv3l9cqbBJAXYF3HhumLaPSolkUR6FHL6QPfu24owocJK5pcqCiKQ8aVbQ7o
2p8K4ntOXQ4HdysCf+g7hiW76vicHXo5j3JCiCwEiF+o3cLxEiokcHRsTHhHeziJ9M2cpbUvx9d2
fUJmmJ4YOfZXg9/lW4ic/X3FjOOnLefj4UDRr7/RH08HXaXCXrqpzJrcY8zLRJWU2/26Z66yKL2f
Psrbs3n/w4PC7Wq1m+2wu9P38qBz3gBbwHqxEOgo0VXc9KcOYBlwkcpfvpix9bSg3B0b8tX3jKMr
TUEZpIrF3/PySS6tBJ/NdGE7muvdKrtcc5Bm0SJWeY+QsEqUBkIhuBOKALL2x4dfvSee/lI82gRn
Wz7Uro8Xl9cDMfUcN2irHpaM6I+2Y8kjseH7e9nXX6O+6k8hfPmESsmCdbOVrlMYut5i+LxxOkFq
UooO96Sjs53EHvd8lqKvPpIjWR09LrbTDHM/vbBcHv2sPR43PdK7NWLagle/PCOEQ237NFtOYsYI
iRcQprGIPyfzwL95kgrokj2wcDgK3tOPG1X+5hEYoiNGfXmIQpjuZ4QfsvrsGPWWLVwvdsIMudtl
nXG2MFzcu4tlsbe4wN9hpXS7OsbfftB1YjuN8M1bmC+5AZpT1ziAcpqEP35zbsTRgtHQdaKZnHcf
KscCNBd5R7/+zBtBUGbEBUyo3D4O5eAhr5apNBf4/uXDyjr5Ljw1lbHwVepcyG4bE4y8NAm+JQYX
O7Ft5T7Y+Gwn6DXvjdOlRtuzdWfax9Yz0Ql5/uM33LNlFo/e0vXRfJpfTBflJepfLIxgp3xkhl+V
b8xDp/WQN8L9myacDK4vXRe+eS8J3q+w+/JKpQjvo8rMeSuUvE+jGibf41S2ogsaut1egK3gh3SM
lm32cg6yBd98FJe7RWN8fnmZ9jzJFK2lwuvly3pGfh1NLHjf824mcVTDj7/T3RahYdfRAl2qhBNr
Cj8ZX+V9BNJ8E5mavGhHl5Nuoud7JMz4vJfGZFmz/uN9XH7eS29+rYoC6tdxoD89WX/zN8DiymdY
PEk/v2jD+1Nhsv/YejxbpJPRKZ8XmOfI+uPf0Fe/yI83RUBYRcdPE7DcA7ucpItao0EolxRu6WCM
Yd0dYNuGb/whHe3o2U1mKPjiSAWpwkhYpWmDog40Yi1sp2PBSpZBJvKdKo+l36F31QGMF00nVn/d
Zf18piYkQu4yS4UxY57/TpEyyNU3v3nw1omSm7xrdJc57rbtJsGzn0jdhgUjsjAgqmpTBNg1OypL
UWeMn2P+zTefNVHPUpFNzkE2YSG/KCG8eZXsl6/tUSjRaRUUf/In5DbyjRlNUHrzOiILFN+GJ/GD
Z8/n5KyFymUNHtvSW8fnfl/0yNjsX0w/KZqx+uotfOrFg/iKUfIxyf0n0pdZ/Du/5eA/ri5aHUSN
qBI9ZnMwOPqPf5gu9b3H/RQk+R4pMdOtrcon7Hg92tyDE14+FCMWa6LVEPQLmX55lU/WbaHDdfX4
YIgyHc14Mz1hfyvOFBRxi9bffgXaxkp/eXg5XPWHuqlXvcKsi9Xz8ZuvozXRPTxf3GVMm+DuguDW
d7JLl4IxDEN5gO95Yv6t0IyVtwMXOlNHxIoei5KmXnOAb35OzC8/D1/9Rs1gEbrM914sLD4HAe1c
+cJ29WIRV6/t+gBpsPTIL7+mXIIDyB/RpihrWMn8FGRkuUFInKYJstE6mS5qVa+lr5w7ZVv4z5uy
fLM73dSwMr5+I1LGPHD/5MnzBSkjSgb6IW4fq0iYXKUCFh02TJ2egzcH4rqQj7o0EB8UwoVjvl+g
5lKsma+ME591AUXw/f14JZetJwKbn4oQTw1Rv/OSD1YkG4XpXqSif1A7xvCmAeNyAlqoD8ujjVo3
KBE385en1Yw3CxNQOKVvXM+GlwnaQ36Cpws73ErLi9f/ztuX13HBwEajn9ohZHQR0IXEFmX/2lgj
WDvDZsHyszF6k8kV2JOc47WTvvisemxGfJ2cyVZwTW/lxFcLnoFYE7X/rL2Z3ZsZoki9kMP9AR73
dEmGItV7upBWc0k39LmAe2saGLpoyPrLWn9C2kQmCUxH8ppfXlPoG2A//8BcoTyBv7xu6Fw1Rrn+
5ZPK2TGJ7V00r9dHVVf0WDeIPh0y9Du/MBwWNds6T8NYBezZoN/8CFt2nM1zMZ4UHG57Yk8Iexy1
Jxm+/p6ch0/RTbI7NNDvt8dvnmWX/WklLxAuO5M5wtCVr2sgPH/zJubvjKj8w5e/+YHbBz4fOzRI
yLnF/Zffq4xKxTVH9EVs5myKoRujyyuBSNtv6NItvmTx9cP68d5hsd/fs/HLm6i/RTn7+ec1w5sW
vvv5Jz8Qg3anwq8eH6b16ubOlDDKnu6JBbzFfP7p93d+hqvicIv5HDoY9jJqiB7sHe+Pn/7qMwuq
aIFo6hUH2I6txLYmexjjRmx8pGbNSKxg9+ymqrs0YDrN6jdfMfqs9SvoA+QzYzPv48mDpoafP8O3
pcJ59nApfOclzE+Ej8FB1tIfj2B00r1y7kOjV65zP9FZuVPj689k5ayMKju8Yjubn0K4AGSkD2bf
2nc558r2pJDDKSDubos4v9qbEH3zTKJ2Ts05XYW18u3/vzwYUVGqZbCuc0Ms9CHxWO4eM9Tb8cDw
2pHjsXP4DR1P1oquY96h/i6yBl5z5ZPd44YzXp5KCYTWkegCUN1NXz5C7rx9M2vL5njce3cJnWMx
IVZA83ICPtm//PpP/Q4uzUwQ3n1IjFiZ42YLnQVjX6cYeq8s+WQkPZCg7pndshdiYXtZyMFlnqmw
O9+733xMth4njWkrLSlHJ5F6UC4K4I9uFRm/M3dEW34diaEnpiGuUSHBd/5K7JcR8Dk59hEs9/2d
5f2WlUMuZDc4QJEzA1/1uDNn3//l5Rg1deUNVvwskIMuV6KfR8MTv/O8zd47ELbLuz4bFzftpIi3
o47F7aqOx3E6POXvvIqEr3PK2+SZj3CyUkoC93nz6LQPRDiVxZ0uLpcLGpf6SoIv/9P5ez7nxflh
wdBRi5k3ARv0PlUp1PYiJ1vnWXp85LWtvEl8xHM0Xbyh2woC2r9ZQZVVGHvz2U1GtFwIhHkdf/DB
3pIa+gZvMcu9dTYt1TyEn17+8qEJK5K7iZ8R+tPfuKfFMvz0fOvkzODtXKnyenkyv37hU/YfV5WV
qKum7zyDGONGPc1IuSyBGTmq0Ri/Zwn2Uq4SbZneuj96uj4aPp3PY+kxK7mOaDhA/csb4vG5yTHo
fZ3he77vYr5V7hSybNyy++l+99jFfPfIyw3CtHfuev2cFpby9+9WwH/+66+//tfvhkHdXG+v78WA
4TYN//7vqwL/Tq/pvwVB/DcT/9xEoH1a3P7+578uIfz96Zr6M/zvoalu7/7vf/4SlT/XDf4emiF9
/b/P//X9a//5r/8DAAD//wMAaBDGhuMgAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 7bf024783c8096de-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Fri, 28 Apr 2023 14:54:47 GMT
Server:
- cloudflare
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
openai-organization:
- user-adtx4fhfg1qsiyzdoaxciooj
openai-processing-ms:
- '325'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '3000'
x-ratelimit-remaining-requests:
- '2999'
x-ratelimit-reset-requests:
- 20ms
x-request-id:
- f59ce3c6f76fb37599125046aec3d2bb
status:
code: 200
message: OK
- request:
body: '{"input": ["Important Information"], "model": "text-embedding-ada-002",
"encoding_format": "base64"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '100'
Content-Type:
- application/json
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SaW8+6Srfl799PsbJu7TciCjVZdwjI2SoEj51OR0QREDlWAbWzv3sH/zu7u2+e
RCB5lJo15viNWf/xr7/++ruK8+ej//ufv/7+ZF3/9/+YryX3/v73P3/9z3/99ddff/3H7+//9+Sz
jJ9Jkn3T3+O/m9k3eY5///OX8N9X/u9D//z1N7ldGbNR1kfT59Co6Mw2K/aiVIi5YisGtOPUMVc/
L9re624DJJoVM7PigS5k25cPVXN7MDsSyvbbeMOAFt0tZSRIHC4+lqELC7w4EyLkQtT7lXUGOVv4
xP6aWj52jb5AVWAsmZY3H0Sr5pvJ3tn4sEc2Bfm0J7cMHo67IOq27vK+rnMT1MzcMUfRtvqg6h8N
neTdjm3dzTmezPutAtE+a8zpTl0+tXmwQGccXom7e9hxzz/GhBJtFxP7GR/ygdlYle8eUunickmi
US+7DkY/14n9El2PVpWkwUtRLlR+oi8atsc7hr47fYjrGEU+7dKwBOXerPD4dMN4bJMaQ4iMB9mb
m1gf40wI0CpbypSHFxSxaJpMFGYhJkTCu0jAbizA/dgTsn1POGdT4z7lNQ1Vtg9jFQ1ws314R+iM
uTUE8/pkmpwddJPpWvlup6phKZhH+0FcY0v4dE2eMvpe7IxoFwYxvSZnGazE3ODNEK50Zp8WGjpM
tkp2+15DvEpQAXW9jZkG6UYf3ru6QivPD/ellBr5NGy3AuyJR+jXMuS2t95+Awd4Jsw5Xl95Hwbd
E+KbOhDte3t443FRl0jp+RsPxKxaOhYhheubMyqHkhEJweK2kXUVUmYa/a4dJyTL0ETai5DqHSD+
3ZtX2DzXIj6v73c+JVluAnMblxk1nWJaDFYJJ1LdmOYPe1R56duACR0CYl2FQqeNaptw2eYRMQPm
eP3tET6gv9CG+dpRi1dppT7A/cCFHCIvb/ssUUxwk8/AtmdZ4s1d+GwA1+Gb7EdRjrg8ltqmzuQD
wevzHo2raAuKAlHI3FAq4u69rBr0FT4dHnl94PxjdqUUjvsvs7SQ5DxwVxu0DvYbgje3jLPLUung
IUU+cfinQM3xbT/lOKlbtr8GKh+Cbz7B5p0hsideFY+Lz2GA18dzsNBFEh9Ph/1Rvt+vI9ubG+SN
py96wuNSMaa3LEV0HW4GWJ7vBXFegeDxvD4WcAl0g+gtU9E0NFEF922ZM+sqGLpw3SkplKtsR0hr
brzWcqJwabx9YNawafVRJ4tSlkf0wTA9gmgKe/cI3mPPiLVqRL1TFqSAOy0NujwsIZqu1DeQpNgx
c8rUb4fruaGwWBFGl1k3oMnU8xT1eBMTs8F2PqRPHKCPVn6Y+0jktutWzhXOn+LK7Msj493KIRNc
hQdnvtMeeZ/trk+IPm+baFKwzId8lYToGB835IyzT9SwvU7RVf4qxDT6Tz6tjyyA62qpsX3XeS23
nChQxhe2cO+Xb70+HXZH8PNWwZNK3Zhd3UxFn1oumJExveW7zgvRtlXfzPhKYTRsW/sMZ+E90pW1
0bjItxlF5095pfLJtHU+7y/42IwRw1/e42F5OjcgnO4ZbSrrHo/nnf6EQBGfeFiIIR9MbROgJk1q
slOrJh8DfnyApDtXKms+4z0JvALlW+dIPC++tUMtpCkI09Wg/C2/9eElr46gYW+PJ2uRtVO0LFXI
/exKM3v34GyuX1gsjQLTy0WJu82zCmCR7XSCfdXzhn5bYPCmbUpX59eej6iRRDAcGfDiIUhtt0mf
A0Juz9m2GpOIBmWVwZCWDtlnuzQfF/ryjnAgIaY9NIcPr+pWwS29Ueb6XEYsV90BZVp+Yq7mE0S1
10gRvS8LZjfJSp8KczVA0Dp3grvnLZ5ulWfDnRYG0ZWTnHfiDQAVOpTMO6EmZ4m6TNGvHsz1XUYs
pdCgyNVXmN3Duh2wRAtA7aaiIB0/Oj0HYMPwuK+IJpgD4mW9esDyYZjsmLG8HdtLGkKgr2MMpn5t
xzJ8GMgf6hfzkjDxBmEfYpg/01zBaV7fuF0isxovTE9I4E3L1klhlaUhUW1pp493Kroo4EqNX30U
o+5rtnd4UKDEw+2kj62JTEi8RUB52fke1z/1A71Xpkds32/y7iahAlVFGLJ9/Yh03l6jFH56uniS
UJ/S1beCp4QxFazs6fGuWWOE5PsNL16NO/+eKgD11j/xNymZ3n3y8AHlVz0yw8qe+mSVcIU4aVvi
qcY3pqbeZnK08X22FyWid3EvdZB98I3tGrTJ668f2UjvF1ti9QHmfBHVRyBJoeH+cPX1FV4+j+iz
uZ7I+fY+elz/vgB9CqIxXVvsou7LFAzj6RRieLnHaBTYqP72G/Of1qsdy9r1kYmeMZ7ls6XW8drB
oR5vVHqv4pYdO8mWvH14ZqYXdmioE/kK1/fI6DrnUzusrFsg2zFdz/p11YfTQxvkJn3VFJ1Q0/LI
SQPIHleXaIj6fJA/moEGJ3DwtOdZPqDX1lXCJbsQkrNPTvfkkKIa0RcxxaDTh8RzOoDUd9lW6Ew0
ruqNAZ0aJMSmuzHm46BfYdtlAzF0/uX9dHyVUrnFBc7rA8/H5CzfwXkqI/Eno8trIiVXpFaEEGsn
t5wNn4uNziE6EsLTZVzb7bBBK0k8UGm01jnjhZNBv9RqvGg76jVHqTeRVxRnZl6OQzRY33UDy1Z/
4Y15cVv6tc4VqjhpmWc/cT6cl3EFxeqr4+ztb9rxfbgfkbdmGtk367alk5U3EIXXB934vpsPheqU
yHiahBjudM65p8gYVT6EFJDURZ3TNQCU2Wu2N8ftXE/CHbbSUsZrU0lbdqpoJ5dVllPhK4Vxj89+
hrZvp6PA1jbns/+BJlW3xAlqJxqWt7cJOC+WeOEqojct1t4d1Bt7/tH7qVdTgM2VdlSe/dv49WMb
9B62dBLhzbueqU9Y7y2PkfX23Q58cH2lDByF6D0U+a/e0T3fq4wU0rGd0sTTYMdEmTn9K47YLj6b
8mN/PJD7YyeiuloVsLkl0ZauBTvPh1d1qFB2phVT0UuN+HePr+hCM4E5/GPwLhGiFOonVYjxsr7R
oLdvX7m+jxad3v4mn/tHAGa3aDCkeyeiZbGnyLmKXzzaEfM+psI7cEwnZ9aGJPm0XVwCOPjjmu0l
M+TjSjsOKH6LSwxJ9eZjZd59cBfTB1eWlHjTJU7vSno6XJiviwKf0lxfoHOzj+b9zHReX+QKXbbv
CI+hjdEgHx8bqEVVISTJNI9P6qaBfXgAph/2fitW1agpg15+mUPliI+R0RRyAsaT2fmzjH96qFhv
JtBi7dF41LpPhdh74RKzrd9e9+snd7btiJZ9R31Q77ErF6410PuVFjE/ZZ8JnWRrR1SqFdEwaHsT
tEezoAthLFu+aX0fihRsYteRGfVDkcsg5HKEkS1O0RTTboH4JzwSp7++o2nYOiJszG2Fryx1+MwD
qryVFJnsrsc9GhVNlSH+pm8Ky3fV8gPFE5rvM9WvUNRl24sP6pT3mFlqn3f4vQXw72uKFztbjBls
iAiyaG6ouOwrbzpmIvzhGesyMn3qzHqD1q+BMM31ypiPplSg5pAGLBRhy0U7MGf9ubt4LfcuYuHh
0iHZ3wlMpc4zoq8gDSAdhoY46/1bH/PmIsA9JypdJvXD49HDA7RNO4Ml9qLg43HxLiEZhy27D1M7
r8cdg9FvK6I7ytHjr10mgOxVPjvoktUO8/5Avief6JqtRH26GlMA8/rhNM94PK4t20TblBrMmrQb
mkZkC+j9JJyWSFvrU4JIB7MeES0jfTyM4RT+4QkhfrUehRt6gG29C+JH9oD6IqvucBztGp/UoORs
We5UkGvrzFSFL3R68BjIR1H08MB97nWF6abyn/tXb+v14qevULuWRDqeZQl1zDZVGFnL2N5CjHcy
2vigLaIY97pTIOavxxDiznkQLwkVr4eUPtDiMnyIPrYyZ61Fz2iuJ7wUvKBdCUxSYeYb4kSlEFXF
lWK0l+yImZ/8ko/CzZWlLXljTNd0bMf29aKotY4q274n2vawfpSA3oXHduYKe9PJuN3hoqgfsosP
G0TjiyhCPa4bog+OHXNlCO7KOJ0FukFu3fJFtwcYz+s3HqtGj1Z3KtoAKXbxcB+qeHgv0wp++rmQ
F67O0/NVhZkPiHU/i+10U5mIlkqXE3xwJU4TZHUw+x+m7z9DNLyvuyNcveUCS+v92xu87FLCay3u
mf1WJX2qfWeBYtyF5CUPa94eywH/eItyrXznvEfFXdHFISBmLxb8j16t72bPvMTTvb4BPEFcqiEj
lbPXB93Yb2C9DV7E21dKTNXYvMJrv3KIZ3V5PChYEJGTezs8xHanD4/vZoK5n7AQ0Y53wbedUPHd
hcSr4IGmMepT6TB9KmamNxsNK7HT0MwvONtuJH36UmmBzhfjyw6xgtt0M+oLcKWdPvNTihhlWQaf
w3uBefqq2mFd1BiwHy7xr3/R8JG4SPUZ0HhfJdEYxUGn5N5rZFqSaFFHj2QDi7QUCDa91pv5vkCx
my3p9A01T5z7pVRZwYAX21vqMVs5nEEZHiVxQcB8rK+KLOtrssECWbUx3ZHPFcrjKZj7o5rz/LyR
YfX19sStVDUW0iygytspNsw4NF48rWwjBbeMl1h4ZKq+6hoPUBU/dBYFKxuJx0xcQHnXJuIWV8Ob
uuSq/eGR4RrG3rRb2xTabfUkqu4t9dqd7p1sF28Ty/SocPZGlzvk2dn58VPbVOjYId0LFGbNfqqL
+5EqdkVvVJQ2VTw92V4ABy0jLLyPnjfRfBSVU7m4MYtGeTsZ3fEB5LR/sO1Hs7xBSwFAq6uUPuZ+
xW3ldoSzkI905X0lnSc0miDvqhLLkbqIG6HgBXwfokGs+ObF44UmIdD87M1+q+SjVOIMITf0qGiS
qz6GkhiC8bkS4pNUaKnfrw2Y9ZfM68+76HJ5wHUnCmy3aTNv+JrtFe2dV0/FXiwQcx8XAYXIfBCr
eupoBKwN6JneVeIYdInYKlphmPsTc78n5lGWbDY/nmZG0mHU5Y+hgdm/MVP5PvP+Xq18+WC7wIyq
0FvuHdMrcD7tmFWZ6o8PMjk3RZE5XKnayTc+AWgb+8Cs+NZGzVuNFz++xdJoXdpJ1d4DDHrxJVsv
uHtzPWRw7/OECvg+ofGzMky4VOuQWR5r8zrIvgGUvfdk7uxfum1rH2HWbzyd9w4SnuY6AB2kO2nZ
vfFYQqMBdpva+cOn/CtnFeTZ0cErmMAbGssPkb32XdwImo6GsxRSCBThySKzR3ldq7dA5hIizFPq
MZ5A359hcZk+7Od/uvNheYePVnyocH+I8fy+RaRt3MPMRx+PTY32BMsIG+I3Wq1zVtcF/NZHlvyp
ZU8vn2BnZwhPy7PjibW/BVQ+/R3ZSuKprV/UuUJ9NzWirraR1/OPP6BfHrXzvpLHn5KqobBEzpy3
xREdkS2i84YZdPxolj7u9VRUbk2YU7ToSm9wlav9y6+Yx+6NzkC8HaHSrYR23+Sds09ypzAs9JA2
J7PSR8tONXhmpzWun+YuHtTRCeDZ4Y4RxLt8+L3v1fO8+NWP3rBkIwNnxci0tR7zYWdLGFL/ZTPC
Rkfng2gclc+JHX5+PG+QmGVKW3B31vMgHl/JA8OSX01GQInbMamfBvQC9an02R29YTHtO/SrD4YD
0xuwVJZAklLDq8Vmlw8ftDTATVwbr+tqatvK2j/hdnieyH4NOeLSavNQ5u9P22ql8dk/iIrImi2Z
81jeXy7MhmMt1n/8MLfu9hMu99ObSqHx5vyzWp7Rn3obxE00ys7rgcwOGmZMqRJToeAlzPxOl0Zs
xqu9Xglw4+aEJzk6RjRPWhnoUbgztZxaTuNd7KJfPnj5xpjzilaDfJXdEx6er2fLr0G2gPxVG8x4
1HedG68qQ3O+wfZe7utsznfQzL/M3Qq+PnT70gRnJTjMCY0tGossvUK/VGumjo3aTrOeIxdXlz96
9nu/cmryEiN2d/X1S1AKFAuZzozVsEIcRfYTwtfXIW7mqfHgpbUJb7Q/EUtR8pifvXUDki3IVNlS
P5aukpPCz496hXtAvCmSK/z0wi0Giw9OlwEcNgeXWUlJ9EkdIQXxEsWM7D4rb/b7GHnKtSCE3I9o
2rziTPrlu1a4nPO7c9P9yRdMk2w8pmhgAz5/E2aYapvToEwzFBurhM37PRrO1SeVnsampxs1a9Dg
KoEL+en8xYvPq8mHOpmuyp4/RzbnLy3DxtEA7AdLpr+EdcS7ZukjZJQ1Md/COxqtcRTQuiqedPFe
02hkt3UlN3jlUUrBbMtHVahgN7giZv/cesLsH1G7fmKmwqHNR9tICtSk2nb+fnL8vXxvBhikfLKZ
j1H3W785r6Sw6ireA3YH6ee3z8KKt1xcY/PHl2wH00MfJP3bIWNJO2Y+o6XXB8aWQi4qBtlTq/DG
OLRMAH4U8UCdRdxvnccEiQcBe34sk08yIRg+rDTpMn5co6EfREAG36h0uHpvj4XVToT5M9tZm+yX
dzUgJa8VnlhXx9ORdzbMfoGYsPP1OX8Mf3nIH32lP17xtlKIr29V8liirlPwQc3J67B8xMNhuqpw
uj4rPG5iRR82a0NGO219w4PSFfog3oQFyk1BJPsvITHHJyeA2Z/RrlgK8bRITwHsk1bFt9CL+QrS
8oEKp7Lx5bX95tw7VncpfH0ctrWc1BPn+lB+ftDbvLR8/eONHx9aO9lDnX4X1D952dZfpPr4zkUN
YJ/vmKHHhjfUQpWhJcMTMXgeeoMA3QIam27xuhC/7Zw/qvBw7AUzlp9VO/7y9tl/4XSJxfaXl8BH
3JjsNb/vds+pgHJkX8iuoCuPXpPnBhG09Sma+0198z4hfC9uRox4jdH67fsNgKRdiSO6fjwx8ZT9
/B1u2d31KH4rGH58uqVrE/HooS8A27uObDdHXec381JCsUQpXTyEW56el3ED54v5ZWQZV3wYfemI
JlX4ULR/5zFr2baBaeWaDHdPKR77/XGD6p3MmNdOez7MeTpKcrbB1zoqo498NxpIxYdPwuW59qbf
PISJnx1RM1VFYvn0B/C2KGT6ejTi8WHu8M8fUtgcgmhM6rMB67VBmOebDRqQHWvQd5cPXlpq3xY7
LNuArcbA35Nv8U7ajhjCkXz3Y9XkcbNIkwAitvqQbfxxOd3Y/V2+YR5gYdaDPD1f5/V878g+uZN8
Yi92R7Mek33Xta3wke0CurcsU6nqh5gvU9+UdHTz8E1gEZ8MZVsBeZkHvMRBqU/T8zAphb4oiZdf
zXy6uNxHRSgtmeVPaVvN8zY49bt8zm/vfDq69+PPz/+ZH7GZn8EZK5GOikm9eptqG8iMbUGXPH3F
/aotS1iQ24LC4sn59FpyDVxjzBieWuBjJ0su2Jpcs5+/44cuCJSZj8g8r2rpiFRRnvMndpFPX53R
5+oM83yGuYyd417r+gZNqvghhlB+9cnv1AIZq/Wa4PboRvzS3zVY7ssTs1bNWW8gmjKQR+lDLNti
+Z/5BGflyHb8tPyT78MCw5kO3ZLk5W0xBsrn1B+IaomDN5DHOoWX7XyxpIPQsrm/ovG8etPJOJd5
1e2pCdMAnC610yMf7vn+AY9H1ODpkG3mPHIwlHVcwG//6axf5U9l5kc8Zd/Df+UJzlX4Mlcm32h8
UeeOnoS4mC+GyBO3LSpg3LwWtFfGnI9NnrkQ8HRL/PvViadyV9lgriObbKPcigRfl1UQ7aNGdqja
6/1vXvCM7s953lujztSGUDGu154d9knC26/w7v7kkzMPRTMPCiAZ9Zfs/fLtDfHFK9C1rQqizvwy
0qvjgqYd3wTjTxiPhz7O0C2NKW1+vCMTywc/rxX2mz90Uqtryno955XDouEUDXUJ5JVLlItnDw3u
CUQUOcWe4Xl/T2GvnZHqh3/y1qjs1QqUMvAUKvcu8djv+87+gWCHW9GUrlgjK8H5Suf5V8yPu5MI
VmJssKQ4yKPe+3FEqtirDL9CEwnrPTLR1rkbVFk9xXyQdEZRHolXzKWFrQvCPvShacX+N++Ixpf/
1UCBQ8hILi7z7rlTB3mJtwqz1jvajl10EgAHz4j8eORPnrclOaZLt6p5mSf5RgaiH3Gd0G0+zjyA
+mZ1xlKEh3zgjROCWfEL2w9HC63G0axgKFlEIcidfHTIVoS/f6cC/vNff/31v34nDMoqeX7mgwH9
c+z//d9HBf59T+7/FgTx30z8cxKBdvf0+fc//3UI4e+6rcq6/999VTy/3d///CX+OW3wd1/198//
c/lf8//6z3/9HwAAAP//AwBim/ij4SAAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 7bf0247aaeea96de-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Fri, 28 Apr 2023 14:54:47 GMT
Server:
- cloudflare
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
openai-organization:
- user-adtx4fhfg1qsiyzdoaxciooj
openai-processing-ms:
- '63'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '3000'
x-ratelimit-remaining-requests:
- '2999'
x-ratelimit-reset-requests:
- 20ms
x-request-id:
- 974557ca996688f2b7e568f6d601ad88
status:
code: 200
message: OK
- request:
body: '{"input": ["Assistant Reply: { \"thoughts\": { \"text\": \"thoughts\", \"reasoning\":
\"reasoning\", \"plan\": \"plan\", \"criticism\": \"criticism\", \"speak\":
\"speak\" }, \"command\": { \"name\": \"google\", \"args\":
{ \"query\": \"google_query\" } } } Result: None Human
Feedback:Command Result: Important Information."], "model": "text-embedding-ada-002",
"encoding_format": "base64"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '483'
Content-Type:
- application/json
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1R6WxOyurbl+/4Vq9YrvUtEJWG9IfeLJihesKurCxC5iSCQBHLq/Pcu/E6d7n7x
AfFCMuaYY4yZ//jXX3/93SZVlo5///PX3+9yGP/+H8u1ZzzGf//z1//8119//fXXf/xe/787sybJ
ns/yk/9u/71Zfp7Z9Pc/f4n/feX/3vTPX3/fopdD0flZVNPUPC8yT+sT3gt8pY/uh6XKIzl8CTzP
74Rhggh4m3hNHTU6VhN7kC18XyqDotgb+zmfIhk+DKen3kq1uXg3HwJceS8R25JtA15/gkgZ7UnA
TptGFXfomsGpRFfq7XDr94G9VxVzjTbUY1RLlu+XQXJ53LE261NCuyg4w6QiDZHiXKrKTRMg+KJl
RuBWrkPyiQQGU2j55ASHhnM1HnIomOyK/UP46VlvDDGM50TFfqDyiqUNOyuv9Lahhye6hzPnWQTX
0DngC2N2zxHqDMiu8I6RNGxBvy4uKRxflxRn7lr0yfd0G6D9kXSMwuTT8yG7s13IOgHbO5FzPiY1
gUPXrSiGmtF3X3RSYWolBLEi6qohbVigrDwhJErOdX924KlWtIgcqepU34SIL3sLt1W6RYA/hpBd
XlYN7qJ2JXxb0rAtJOBAr1hv0EbURZ88Qa3CO68k7Hjw40+nMUeKEphbspncgg9QLSM4xqs3Vldq
2Q+N4auwuBY11W3fqDYwfjLZQOOZ7vX+0k8nZT9AZH0YdYzjPpl1ZEiwdpQNUjYbLZwf5gZBr0EE
yXfb1ScRHBFEb4QQUJTGZ6etZ0E8ZQeq7nDJSSh3AVTF3UR1cQQ9f/Rghrt9s8HqIbgl3dGzIazi
zZaqvQjC6fM+11AAnUH9G90l8yRLGdRbsSJSo0nhDx874yTL2DtEVcjelmDJT1utcDaLec/ZfmvA
q9po1EykIZnDzhRhfMgDnJovE4xm6NTQEsIj+azevJpv+HSAard6oM333erTls0GWGnUwK7DN9W4
G0wLNifhjI3tM+x5FRdQcTV5Qq0ajRV7R48cesn5RtYAv8NJj9sWyhZbY0MqPsm8CzADv/U/lPtJ
58q9HGQeHWO0RpHaSyvCNeW9eiUYqXcnmd9lxIAZrwKkMA2B6f18OHA9SAoBtI56LhFPlsVTOC37
U4SDaHcGkG+WgS1SQp9kzrlUhq5fYZWlnc+d/BLAfZhjanrVwOe5oRA+SHTCqLmvknnclNqqt8SC
PrEE+Dhf1Bk2gD6wE+GGz1BKLEj6JsbHfez5bGUypnjl7Ua9ZwX0Sex7AhJpeyTysy8BewRWBpUU
vwhJd2FPuDHJ8Ni+Juyvdmd/krtYkyV7ulJNqO468UPYwMo4R3SfayEgD3vnycA/a9R7H7Vqs5dg
Bm/9l1L9/IbVpE4lgsVoaTjQFKlqjuMjg7f0nqMdqu/JFL/cFFTxekujyH75v/oGIJ4q6th2mHDz
+4kg/OgFKRa804MxEvg9zh49qmmt840Xy2B0zk9qW49rwkmbOFAJ6js2d/6pkqxSJNAs9YAoJ6r3
kzp9EVhfZwGrRdT1k51oGiw0KaWOcDcr8aoEKVzwgrVCf3N2TcNSMYm3wQi1Rz6JwERwT9071a/p
N6GMpY2MOqPBL1xs9bFwijOM5Rbj+4m1nKNMa2C9P9rk/TG3Pnt9DwN0b1ZEfXx89Pw5PD14LjSb
yDa79mx3r6Bsf0Qd8chnCZ3dqYSZc3FJMSeFzp6lHgAxkjC1nncjHGCgy5CBtUXtjXnkE1lFIjSb
yMbJBuX6JB7aARhXfiav/vjWp1XHY8ir9kQtfPLAdNzwmyy9wQXJFLbJYH0DWV7qjXqm2PXvfDgF
UIZ9jTjTuoocTm4OdkOFyMZ4dsngzucblDo+4MN12FSD7YYQipvvih78wOxJJq1UmCm5TdXhc03m
/KWk8NrnD2qiSK1Y9Vx9wRoRAXt3u/PnPAgh9ByLUkshVTIJxz6TrbI1sakIfcIOm5xAaY0qatw8
J2HnS2wASyqf1Oi9kPPrVhXhFHfSgvfE5/k1rJXUehBqrjOz38jn9Ca3uxWhe20kFWvRqoRSLECs
CyHWqcwfZ+iO3p2sG/us98q9JPC7YgVF/u7DZ8N/zMBxpZii4/fDl342w+YxaljXbwFgZeh7snLJ
KMY38wQ2xf5ogU14KxEzh7QfLRqVcH1lAjZipfQndJPiHx4w/kSnkHu7yVHgtNphu2GXZFY/6gVe
lCimzjXPOVvpkgh2R5LRvQwswPZP14Af/J6wrl7uOnNuWgzkx0vBfgfUipw0N4LwYLvYzldiz/N6
bUH3ARn1TXoE8+68J4pyjCk2Nd32e+IGDsw3jzeNEncLSK7OKRDM+UpxbyrJV+G5BovhXWEru36T
WX1bMTQPTo5NfT/qfE4tA0p3o6aHVef5ZK8HN3jt9zLe36ioT+uWfeGzEABGluPobGKrA6wvMcH7
9GqD4RX3B3BzvxE+HmXK5yGJLVlzYwEfsVD1VC4UCU6NMWLrLEh85CwgCtiCErvtgVWt8662CkPy
HW220hGw2yNP4b19Z+RtstDnJWEZHJPMoOb82XDii1MOtnPYkt0pVMJB6FoGX5K9wobmlLwvj6AE
76AdcLgtcULWaSrCQGq93//3OV3vG8iFuqX7jvOeDF/ewMIaRGp06APGBDxi+MkOGGuf2e8JausU
quQWYLyJJT6sdEEEXzMH+IilBLBG7FKoSdIB47TzK/4ZX7J8bJ8T3UvOUZ/XIzsr95paGEXWA/Dp
UNfwZGYyVY8oDbnnngcZefCD1dqmVafvqhaEN2Ri119PITkmkgZHVUfY2vf3ZNA+6xo4JtTwq6qU
P3gBjzL40iDuqorI2daD/WnekTk7RDrHj2sDz+28IcL+SkJO7NsBiunwodqqqwFd9AJY+IKsiHzS
6VtcCZAe64nMq67m/VWytsr3yDycLHphoNtXAOyAGdT8mJE+m1Y9K5aRW/R4j5Jq4UNBeU6Bt/QD
Ho5XJrYQN/qVwPbtcdFD3xvklTuTebN6huyqRCmkFVSp8RxafT6dShk8DK/HnpRrfp9LQQmOg9hS
zXicQbeK7hDGssvQSrXVZD3Z8Q1UzZliJznlPW9ldIH6cevTxM+PlfTZgQFe4l1NUZ2EIa3iQgC9
9vwifo9uIV+d1wN0+LukjisUPnXy9AzyTfJGz12rhRsv+Zwh/5QddS3jVJEvbRtwFY4Fja/6OpkN
/8RgkcOEquWN9ryYqhKa9tSRKcovPquemy/YyuWGuhp+Av7a6Q0kJj5QdKhjn0MQE3AKkvOfz7M5
K1vF3mdrJFbtxR/vMtOU7bFxkLD4iW9OMwi6NKsRsPsdmL5wXYI6KG/YfzQfn83K+6vUczki/s4o
6OazJcK7NlBq4Ab1FMZXBndPvCK7avAq3s/NGQYX94JE1DoVHQ86gnorVWitKrtw8gwnBZMInngv
tGdAUeY10E6TA2EGkXpuRbEE1fLQIUF/lDqtvnomE9euETDpkfO19kiBgI4SNaIdCZlyDCQ4Cv6B
VGveJQufDUr9MSVq80DW5wM75NB9v3dk/epVf1P5TgYPfaVR1RxgNY1zGUH3jFTyDty8mstdfIEn
93omKVKmnt22fQZKH+3JdtFXvVa+BTB8nYyej6smIQLeavBPP86uXjgDr/hC13dU+uPb8Zq1FsRM
GSl6rorkexxPKWisKqF+EYZ6W9HEgJ8zf2C0PC/fWzcV5quviPXvye03fcAOskbnA6psHQL+rQz2
01dYj7Ghc7ZnhrL0M7wvnKKnWfc1YBz5D4zI3uK8liYZuhZ3yXolbxKiWnoEvcm3iLTxumoQdt4Z
yKh4Yzu9lH1XWp8bXNYXY30/h81aOTEQnqsV9fy3m7CWwgyGxRrhoxYFPqnfvgiFDRvoLclg9X1d
VajYwWwgmB2UhFy1coaege5k3uzvCdOvnQrXloCwxa9rPudTICvZ95KjMeiuYMr0eAupOzywfau0
iknnaw1HmwuLf+l0XpJtKluqi7HabWky3Qa73Sr0YVGNXvqEi+tSgEUtcuq/HQu0YeGUMKmGhh4K
3dene3epYXnzHti5ZRYQv+4pVqbGGolSwHVS39csUqrkcscnMxWr+Tg+Ujjf43B5XqYv+BTBhMeM
rJ3XQ59zeoNQf1QXtE1Oas/w6uvB8+PVEvDMViEr3i8ChgJgqn7cLuS1laSy3PID9jZZ69Ov4c7A
V7MrPSq5XI07QBoonk4T9fvNoZrCyzAAKNg5PeZc1zfppDkQIWmF1lc5D6crg1+QEbTFqBiO4eB/
5zO8CoOK789zW01FpLGfHqUvTR9ClkonBuWnJJLVTGvA3NMzgsAPNDQnZ5NvhlH1lMva9ala27if
b8U4w5dwRxhd0L6X/HlVA/O9a7G+y9fJbOf7r3w9AICNXfKouAtrBMPYelJPUxJAgEMH8A6+w8/v
6iTqOwd2VVkTZR9//blB3hl0fXghEzJKfURrJ4DgBnwiJsDRx/M2HaBFFIEiDxSgrZNDA/eVzjC+
2j5Y+9ktg6e03KGJ3ceEX5sxAj2fAuqxj9yzraeUYNH/2L9OPBykYmWAvhZybNpiwydDlFoIJmem
etFBfXxePxl8SeYK+9tV7lN19ylhaW08/OO7yQsLAQqrIqWaH+Qhf+ttJL+PT4NmdcLDrjWuLZS3
VoL3F+74U1R/Gnh+Si/qnkIlIdnnWcLr3DywRm5myNMi/8Ky4CX2x/ZQzcXQbyEP0i8+2n7Haez4
M4Ty0aHGuioSlsR7SdH2X0Td3SvQKUKFofz46YteST9rdD8r7vuzI1BvMp+l0mOG2zTe0r0VxxWr
/d6Ac7MTqd19Sp+VetEqb34XqRraFZ+f/rGBw9qQ8D7UHH/hx/inf5d6aQBrg10GR8E9/MmP6PMY
e5BI+Uz44vfyzctQ4Qd/JrLkJ/1M2R4CdR0Fv/WrJrR2znCpL6qNwaYf7N0cw0uZ6Wi3ewX+FJyC
M2wQ+yJZUxI+tbOrwsW/UJfv0pD89OAFKweyWfKb2V4ZX7Dw4+In8op9wroEBtycSOevp4QDUgfw
tK9LqhaRWzEY+FvI0PZOnyda9VMVPmdwRlWAJNq6YP20TAFKxKko3ptW8k2FlwX53k/QuoNvTsHL
Y3CXJx8EorbwGWbqF4p7FuDjwh+0e68kqCZOR7PFH9DKRTl8m+RC91apJZLjAwdepWzCtv7FiYhX
XwfGezPG1Pk0/vAgwUVxt1OOzZ9eDbMKwuzu7LAty/uKH9aGB8LCe1L19XH7GblFCpf+SJ01jXzy
MFcHeHkFCdmF98mfrmqG/uh999D6yXrlbSOwZtobbR/HAYys125Qr6GPo6P0SHiIvQyGt4NJ1be5
rsgNPw5gdZV7ii2p8VnQJy1Q3PJOVoPw9kdlSjLgPIcZ+3TIQ17sTQNOZ2CRpf/3w7e9xPB2LBlp
9fiTzAA4Bvwo6mrJD8uEb+/uDdqCrJFNFqCKb7qrDFfaaGBLp76+IRuxVcJzsSJzzU5g0eslgNKF
Ud3WU8DKcmjBJXTEBY8fzk5t3gBzZ2P882stuxWHH38TLo5JP2fSSgMfBiIEPe8O6OG298BJmnMC
+09djY8ClnCvXlp6cdeIr6P6U//05sLXjr7x3DOB7pbnJFj0Qz8pvgZ+eaDdFmLCrY94UWjAHtiL
tq4+tcazBbt6zqjx2FySce2RGTBtZtRY+I1rM07Bc6fJSIi9Y8Wcdy/DpX6R/D5qPTtfzobyW8/5
TTacdclbgB7evAgrb7iiJWEpzMhhS8JtSZORZKEMxWcSoQ/Y2n2VFnkLzVgJ6GEKIKCwW523Gyf2
fn7Cn9/B+gYvShxjx9Vf/XSXmQqgmu2xO6y+Cdsn2IPkgyuqM83tZydJ/2v/+cOVwkV/WErIeoFc
jXn02TYNINxevg6CU1RXk7wFOXgFOCDi4qemqClyeAR9hC3adqDwwk6A+BsfsHk2h55stgMDzenF
MarY3herIDDgq28tmsnFoVry7QHUqZtR20OdPifqyVA0ehoJS98YTMP6mQGLrAR0nboTEPW3V+6e
O1XG6g5rnMBm10Dn+qqoeTYPPVj6Mdwk5hmbxjzqTUKUGrKrcEfwzvpkxNpHhhGCA6lNbe0Pevgx
4LGFAdWliSZTs/l4MBqnlCIlOFd9/EEzIJcyIehdFPq8P+9EyE6fHCORDWDG710E05P4xv5bfenf
X5658DkR7Z1WLev7BdZF/1DD2bSAd5vkDIon41hzD0PFSTS1ytKPiOhsWj5/ImkGwcW/0IPeCDqf
xiJXyt1Fxd7FxOG08li8e1XRhp7bpwLI5TYFUEubN9aDzEho+bxqsO6zkShouwaMNRqBi/6nP7wv
eaMKNcoOCBQh19n67jVQSY8vNITgXv34E17lq4s976Mns7nREfzVozU0gS9mQnIAPz1gy/ODj5E5
NPIzs0oyPZqCT6SwIOi3no3y/Aw4rT9BrAD7Df/U2+K/blsihCWiggs5D7NegCczlbHpvhveCccq
hc5GeGC8+DeedaUFsyZzyIL/amMIyhZsmtz95U096y/m9sc32MfXIpzNmxXALmoOhOAm5Fy8EA1Y
nyNa8Ff47PZoU6Aaxhcb4LLXf35febxsjYjNa883lYtKeAtIROZKufNf3gTAbefjfbPbJ5K9OrRQ
aJ9P6q92s88jQkR5yQvw8R6BaoLnIIP4EByxrRQvPgHzq0Hqkgda3/W5X28upacseTJ2dvViR+s4
Anh9qbEZiXNPkXKsf/MOsi15Hs6K7jagTTyBVBAn4ThqPpRHIzKJuORl618e4e2iKwFLvstPpm1A
4po1mvevvGdsPRx+9T72XrwCVMqKCJYjcKn2RLM+7tK+hukTHn75lT7fM5lAK6lNjM+RCUTMnC8M
+mOI1den63nQHLRfP8JPciI//MdgiDcYrQwlq9jZ2DZQ2Z8cNP346LtXEOzcs4I9MSXhSFaBBC5f
xKl/2+x1vpGnG7gGZkXNtLNC8XZv8p+/wE5kr/xh28sZlAstxX7t5aD9uo8Ymm/QUj22YciNobvJ
y/NgVZEvYH7nmQRHIzaxdcAnf7qcpi/45W+4jP2EqS/owB24CtQ+grXfit3dAc6o7ukyvwIUf3ca
XPQD1cV1Fc7+O9LgE/GQ7Epi9O/Uk6FsPLov2XkbPxlfwijJhgVLmmrt1v+jhy93/UAWfusnn+5k
uBeyBu/v6z1YL3iQf/m0q2EF8LRov9A5fXw0XMip3wj65QB3/IPQz9/x80nJ5R9fRsFo6zzEWgbt
z70m4rJf7G1JFqw/toRdcbglE0FyDJEnfOiiRxPuufEA3+CjU5X0nc5uzlEFnJ4e2JjI2LPEIRE0
toGz5CmlTw77XFV0UPXUaF4Fnwxp28C2sW8Ur7Mvnw33LsFfvnPrNl99qsTzAOzPtcY6vFf6/KrL
VFnyanr81hqXztnppqDMQfSy9Asaf6wZtmGE/sz/eHx/n5W2RALWssNWn9nhAQFODAUJs5hXjN0K
pHi7+EqPxiVNxtXuIsMXzTMkZus4XPqTAIVnZaJ5MvtwEgaSysVoaPiw8P3PH8BcEHTsxP4ZzOte
uoFJcQZqvB/in3xFLnc3lTqfTqhm81MMP/7A2Pk0+h9/u/Q/spXGTG8Pm/bPfA6j74H7LdCfGSw1
YUYf67EOJ+MKWji/fJ2i5v5KFjyKwK/8J5mcsEx++wUWvqUH+GHh3IVRC08SywkQwMFn1WtqYNKe
KD7KTRvyylfTP/MziyVLPOCPX+iIxp5Gy/xrWPIjeNnlGtUuyTYca/oeYHTTBvQVtAIsec0ByJr8
xD+8MamaNFCez3uKHodPz9GBer/8Eu1OyVuf78qaAHHTrvAprm1Oln6m7JPzCmOAvWpCdirDjb3r
qbbot0XfB7DP3x3VJrNPFr5BP71O/Vp76AR09Re2hxvCep13YB5cqVWM2HOxpykADPpby8GSj5Lx
KFPQGaLQ/vILROvbvR9Xu1SG7/39iarFn/LAdlWIDU2gP30+uRYo5fal38gUqCScwNUv5V0gW9Sp
6zxc+t8AHFPQsJnf5nAUSB4rzOoc6l68Xh/HSpWhDARO7cndg01EGhGyUbthLQeZz1dq58GVSBOy
zFNC+tuPxf+h0yOTdfYaNjNcdcYZe1tXDtn+ubeUhhknetklu/4rnvILNA9eTo3mLvvvG9k3yt+/
UwH/+a+//vpfvxMGTfvM3svBgDGbxn//91GBf8fP+N+iKP2bSn9OIpAhzrO///mvQwh/d33bdOP/
Hts6+wx///OXIv85bvD32I7x+/+9/q/l1/7zX/8HAAD//wMAPe/DkOMgAAA=
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 7bfdedf56918cfbc-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Sun, 30 Apr 2023 07:04:15 GMT
Server:
- cloudflare
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
openai-organization:
- user-adtx4fhfg1qsiyzdoaxciooj
openai-processing-ms:
- '25'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '3000'
x-ratelimit-remaining-requests:
- '2999'
x-ratelimit-reset-requests:
- 20ms
x-request-id:
- f2850337441001a8e5bd70bacb4dede8
status:
code: 200
message: OK
- request:
body: '{"input": [[72803, 18321, 25, 314, 257, 330, 61665, 82, 794, 314, 260,
330, 1342, 794, 330, 61665, 82, 498, 260, 330, 20489, 287, 794, 330, 20489,
287, 498, 260, 330, 10609, 794, 330, 10609, 498, 260, 330, 38096, 42914, 794,
330, 38096, 42914, 498, 260, 330, 82, 23635, 794, 330, 82, 23635, 1, 257, 2529,
257, 330, 5749, 794, 314, 260, 330, 609, 794, 330, 17943, 498, 260, 330, 2164,
794, 314, 1835, 330, 1663, 794, 330, 17943, 5857, 1, 260, 335, 257, 335, 335,
5832, 25, 2290, 11344, 37957, 25, 4153, 5832, 25, 44921, 8245, 13]], "model":
"text-embedding-ada-002", "encoding_format": "base64"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '594'
Content-Type:
- application/json
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SaWw+6SrPm799PsbJunTcCKl2sO04iB+1G8ICTyQQ8cFDOdAO9s7/7RP87e2Zu
TMQOkqLqqad+3f/xr7/++rtOiud9+Pufv/7+5P3w9//4XnvEQ/z3P3/9z3/99ddff/3H7/P/W/ks
k+fjkVfpb/nvx7x6PKe///lL+O8r/3fRP3/93fe7HdujKCtm9AaQH459JDpOlmEvSuldKTb3hq4y
9EnGJcUlek0rkR107xBO5h6vIbBDk1n1aujmt2ovYMrrgXkvfcdXzeomQ4hDgdhOt+um6pZGCuyU
BTm4LOLjeNqO0M6XM9NKpUS1p2aqIhT5ihmvte7xp7SQ0USOV6KdmykZykC9QPFsK8rTi1583uWI
IYufKZUDUnBa3+gI0C5KeuJSyadFvr9D5/lnYsJcdZNT9xfwK2lJto7AQ74U/UDBMV0xw3hdjTkX
cAR4Ye5J9H1eHj9uJjzR/UoscbNJmsPSTCH11ndyDkaOaOmWNXzeF4PYRK66sehif8NOtwXBgsL5
rJ2EFqARl8yL37uumc4pwFHiI15eu9ZgXqX6iolpSJUjNbxJuWlvRTeeB7aPk84bLuJOAC4JAt5U
6z6cpEAS0DdeVOldxrvD6NmQdQcZi6el4A2rSDAh9D2JeOGq8qZvfJTQd6RvvPJiiN5zBLp+Lcnh
qhQevTAXQADxzXaX0SxWIjxOco6OAdur3tmbPEmrf8/LrMtG83i/E2Q4GZs1/sV/ctqdC6VVThiN
smNMt/KBoXZjDy/cdenNu7XsgmjJe+Ys7LwYVsjxoRs2IztICkrmzyeZAWvziliefEHNFao1nM58
w1yao2I+yPkdAu2wZYew2ySj40tPGEVoKdxHqZjmh6bLZBfLxN2ZhTHKPr7IaOEX5F4c02QcPmsV
PsFTZ2SZ9Al3F+IITSv45JpgEw1NXaewyQqH1hHwYjx0kwmLsoqx6Cm1MQPOffRslgYxcLvibPqI
LrTbNiD2ywu7WdpPoNykmeNGDIZwPEebFDovOFN5Mj7FuG6iGmK7Foh3saqEj6/lGiXX6kN2NZ2M
0SFhLU9IiTE3QrUTptjQlfmxSggeb3Yyyqd6RPmu8PFY7XE3V/PGBH1VAlX0Y9RN92tbyuvja2JO
TzPObueNikoLm8Q2kdIx9aM/FefxWpLt1a7RJHtvH6Rn5DMj1Hs+L24Eft9/9Z7M67WuL+kFMnYt
d4jT7ODP8LGTmJiNUxZzFCcWbF5BTDxFcrppWKq9Yjb5me18IvNJdNAC1SsTU4k/8+7P+3KV4kpr
MTt7rAkzGQK7m8gObY7dfJDbpzy+Hxfm0C4y+vjUl/BEzyvDghIiSsF5yp8u1r/5oxerTwxPWDoL
xrAtQjGdcYBhf8A6uSTbVVF+18N+Yi8sBdU1mZnRvNGAtDWLCvbyJvZZRCjZDu8/95/5uTpBPRgl
/ZS3E+rH02EG7F5cZhru2+CW20poI1t3RiJyRuO8TWx4ClFEtGR9DFft/T1DjitCuYSNbl6vXR1J
Y7v8xqfppmOS65DqiztzJ2NbSImX3mFfxxPRjtsP5+G5yBVto6+J+vSxwQVBDGCKDhHbNmrr0fm0
z2VooSSX+rUu2MPXAiDtSEj01hqDa/acQ3o/OjS1n+uEt/d+hMCZI6amn1s3730Fwz5dbOlGf5w7
blznWt4f9joWHIWhwa60HNTW3NEmKzJj+jBuIpGXhBmATIPdrqEEa0G0mCVybPBzYQvAy/uO3IIo
DUe42wISbCrSoxt9wnHT8icYxTpgztZ00Swk71KG2yvAqD3UHXMi1ZXp9nVgum0J3mfrZ3/ij5cu
a4r+ajo1OlXhgYoPsUnY9A4u4LOwI5YfrULaIA7gP+UF2/Gz5Q1y81IhPu5tRq7TOeHm7XGH3hFu
zNBylc/65fVEP73XfWgSnugFwHx4UmZMY5HMXbW35H0KW0LY3CWjlacUUPjM2Rbv7WTeJLKJwix4
MK/PT8YIy1GAjSxKbJc1STeii/FWzOzMmIalbSf6E1D5aBiUuccV5bPxWObQ5IFCDmPtG1/9C+Ap
xBFdNHNodFtDnwFp74xh6tYG/yhOiyxnETM83uqQVgGmsFoedOLOst/Nj3VykZeBPBJ87o5IvPBB
R6iPc6ygJOl6KY5y8CthSQ7qKU9GeFlP6FaLiJideiwm7THZyldfiU2FU8f3ND1BH4wxs4Ik5fzq
SCnK7PLFzFa2kkmTHROCOJuJE/GrMYWFbqHzECpE3ypLo+0H5wQb+eUQYl2Fblb8rQXztJ+YGmnY
GyMvo8onlxjR3GibdDs/NSG5fj4stuZNMtzs/I5MvzwzvDGWXpMKqg4VfeRky4c24SdZusBp6nOi
utVgzHdcmgC7/Zvt9ZODaFyNMdhXEcg3n4xx3fgtiI0rE/zBu2LyJrYHL58p2W6p7fXS2vNRx/Qr
UZch47xbbBayVNEF+da718/LhwSLtUkJyUEyaNqkVNH2LCfaMM5FjYdwrayF5w2P1+LQjettWgMP
Hy+azThAHEtqDPuXZTBXP22M/mpqNcIiKqnA34rBzstoBFaGS7JvFimvN7jL0XvYd+TxHDGi210v
gHYZXfL1T2i8QJZDUb1rtjV2M+o9uaCgRm/ObO9eJcNGb2JooD+SfSd6iBXhKYL7iI+ENI1UsHu1
GJF49TfERc+7x7OyuUMdXvbE8YlXjHftlcvr4jwz43w8GHMUjYGyRmhLdrp4Q/M8nu5QL3KZ7Zhx
D+f6ktnyJbAroo42C7v4yddoKQVb4qmnqeg11dKBXq+Y6PP2kgxFfh7RfFF1EgXnRTjYxi1FVyw0
7Lo33uGQvyIX9FCSqXQ7XfkcKmIJSepuqCQfaTi1XWmDU6UVM7erd9cv+25GX/9CZSQdDWY5qq58
3xcd3SzjbT9aoGD35BI//Wy6Yb8hPtq/TIMZ/iYyxnvxpspS8rdsV+4Szu9luFCqS+0wrzY5p0Ly
foMbsAudh8zlkruTL7/8p4vKe/BR39spRKapMvtS1QYXrnqOXMnqiFPMmldvE7VFX7/MbB6cUDdX
V4Beazie16WaCKUcx8hDlJJ9xdJuRFcaQe1GHgvzw6GQPp9kBG0/5ExbozAcvv0Dff0S3hzqSzjS
WqzhcM9SZnk0TViZ33N0LBMDX5iqh6uyWF1AScuWmSsIjH7dRC2KMyVnd7MRkwlvNeFPvR+6niWj
PIc5iPOjpgu0D9GYXlcUJThYsa26eHpzhnkJOxt57HCvYo+vHJki2FVHdmgQ+9NPFJXHIl5/+/vw
rV+lelAbizh68maXEBXlsvzByr2SvSmPRfrzr8SUtpU30v2HKpH5HPD6vROSRvQuI3y6kbGDGR86
etmLI0xorzKSSy7n/vviwtsRL3jdK07BSBIGgLQyw7I1b8I5Nu0IzevuQba2EXbD1x9BFq8OFOJM
SjiCVoLzPS2wNJC8YI9B0GURsw/eLPCBT1u/OSFr8ZCYq/fU4OelP8P6uDNp/u0f07LW+p+fZuq0
lMOZryAFpmcKXXa96q2wZMfwJJ7GdqsMiin29RQyZYtorphpMV0f8Qn8g6rQ03uaOn7aJDmKxtmg
i+OnLOpNfwmQ/BCeLDH7shtSP9JBX2HCHLV0C16IWQnRQVUZXrbCN962Do9A7JkaPvKkcYNjjZb2
K2G7g0zC5sOQCW+puBEVL3NjNLqLCXw5C0RPGqeT9HVfb0S5xbhaZdBN/iT08OlmRtReN40pu46m
0soeJc6sZIjKy9YEz77eiDlVFp/IPpNhsJBD+dVee715NiLYrl8WRbBtOF1q8RPVifMhh2dbdF3V
XWPY1IpIdrtQNqpqrwnoq4ds+yodj3/zGbJiwsR96KSjZY8EWDz8noXDc1P85ielbBYWXjs3BbH1
kFMIuvZGpZN07cYl3qhw3z8xIc+nyEd3o8pKt4IIs1g9d1MmtgKs83dEDp6mF1MWnmvI09UCi6+y
McaXf61lej1j4kZvlozUc9frPnR2TF/3XTe961CHkvsC25/2ntfeQzuHPIMPs/S1Z8w//ehG+UY8
x7XQ6oWOF+Xbr6moLhbep1L8SPnNk8FRF4pp92ru8Ej0gJjBZgy5q8SAYnWb0fG2vhljsL0AgBCe
MPQHzRvzQsbwfR90/s7n03lgM7JxgZl+jpuCa7acynLl7Qlu+grROWgoWp30E3OjVi7ox8AlBHYz
MatAez5Lbd+jeOelzJI9w5DkIrBhpRcq3vz83dePIxy2a+JsxkNBAecBfNS3RpIPq8P5eNT7nx9l
l9PYh2MSZwLUmrSmo3J/Iy6lhwjmJ+h4Ck9bLrCj6ioyPXjMUDSSzNfTQ4LTxsNENQQtWbXyK0XG
TmnIdrDEhBuCXsqZskPf+fsW8hcILoTcfTK3NZOE7oRXjxanS//lC1UxSGSzB1pLOV2v7TaZt42s
I2dcnSl8+cZQBvYJTinBdO3ddyE17vcenMdjyZxTlXat2vQ5rEnFCBl9D4nsI8Xw9ed4w43Bm1I2
vFEmNoSp0kPu+Np75OjsGYRsccCLwWmJjeR1mxLvUFfGXAVWD7cEJqbZZ8Sp8VjlPx5AdMVMu8En
VQ5DxF1ySJfEGx9NtoCZDXdmbV9pyL/9Qw7so8m+vIHXfXZ+w0vWE4IZ2iHeaav2xz/Yb97ru5uS
w/aCo5/fCufnwi8hzpY5sU7jPpw01K3BGe810e+71qBvrZthIz8c5pRalky4niSFRDJmrolwOHB1
MhUVLWzacjVBvNprkiLTak3Xh9XTmw7ZbYbandds501xyPd1Z0IfZgL76cXk0uyt/HiRAehtTC9P
KUG72iuiG7md8NnWY7CxnRGP3Eo0D9HtCb/82C1Siw9h4VowNX1Pp855hO/AEGx4tteRrnul6cbn
IVDRb341WIXD6UOjALbMjphTpKukf8/zBTwh0LFwtv1uVBX1Am7iN3iyzwn/w3cYXQXM5O+H0T+2
m4V8LG8GXfw3v0HiS9r/8qMYr5kQo13L9/Rd5lPCs9r04ezWOds+RCec+LP7oy/s8dU7vm8PMjrr
O4LXq9ZBws8f2bLwZvbi6iaNsn658DleE6yM+ZvTIGpHuD9IjaUgSRE/1mMJ93NEyF6PRa8vyqUE
P/8UEbkqaIHoHR6idPrqv56IP70vX/lEDp8HSVb0HO/BOx5j0s7CJ+l5lZ6U+KZlBH9OARp/8cRi
uiFWbmp8vCzMCxqOlyfTb2sHjafj8Q442b2Zeo4jxN6n5R76d53Q6RpPHj/aGP/p16Q6ed5qebNP
SNnONR61ru+o1+UXqKa7R/zb+uZNN7t9wi15m0wvZbHoMWls5A5uy8zH4oMmqnZv1D3cC13tjXcy
dFMXozKLOCHzLS3G/HNWIVDQjs4+OAmd9u/nL/8o/eCqG9nRtuGoNxoz0kveze2nCeDL4+jYJJhP
1VpcQBA7W/LTa5Fgs1dO52lDxe0l9AYQjzmKJnv89u97N6tNn6KY9Jx52bE2Rin285/eki0jrdFI
PNv/4T0K1WI0hvuljpQNuuJNkV4T2g/aBWX9JaOrx+Nj9LfDPofLW61ZKGwxF31SpTB2xZnYl8o2
hA50+ofv/PxyPcWejuoPPrBtggVvPMenk/I6nCKi48ExuBw/apSepAfbBmOI2C1atGh/yUe29RTb
GJPNMkJfPobH4+lQfOtfhp2wk7HEn3rH+1Y3lVdd7OioTSs+1fdSB+4WT6osuV/Qe6imEEMk0fi4
oqjf64YE0oW0mPa7Rfd85WoNNjocmVnW0PXf+toIhesy6307JFxzzxf4zrPErg+vbirF8YQ+Tq4R
e3Ftk8lpiQtCuyyYJ0ZOMi23kCITxRe6TAfJGKmTWsqwCyV664XBG52zv4Zf/x+F9B3OGUZv5Jhe
QDdwzfmEhukOU+bdiNfngpd+6xmSmu6JcWNDMgSPvYB6rePkxzOFN32tER9Si91e8d4Y3eiZojKb
Uqbbz8YYD7Zm/uZ7ugoi0s3EGGL05Wn4NJAjEjflLd7ooSCTX/8clJtTglCgN3PK3b5bm2cvgova
hORANZm/h+2h/vEZPAu4S/qiXElgBNGCdgoIqD+fribc6Ykw0/BZwj/3awAfcftgmvAIjUYVaYte
tR3T25fXjLfQGUHNUEZUQ+47nuqbO+Q39U2s5JqFjSZrJpBMIlSxCj0cH7v7jMS0qpg3dzWa2ta7
oMPe5sQpd33B37djrZz2L4+i/l7zH79DbMVDhrtmYczsNaWKl0fa1x+RcHZ382VTrdcrdhy3C++b
7ydwzm1Jvjy567OHsIDJn3u68W6SNwfCTGG9my/ferKNL29UgWQCweuAcGNKuJyDlj4+uKkW12L8
8bhQ/rhErSwjGbU8xCDYvcjcz8L3xMZKTn/y/zC/EoP2tHfl8z0vKGyqjE92XpnoN49k5yOE7BD4
sULTBoidbO2QM/mjbwRYvXGnxsAnofTkP3pIyKHh3bENI9gyNyKO76h/+ChoB8mhtHrsCoG3AyAV
gc2M3aSi+bb8rGF+LnRClHcWjtbT8mGuZIPW2fpkjOJ5YaGfvzXwM0u4V9k+uqZ9R6ybrYWTAlOp
mA3SKT97Gl9dnUUOfUJjCugdGVN/Du/oFw8DP7VkVU/3Gj7g3L/z3ux99UeVR0VtiHnGiM8PacxB
5G9CvM37xefV2CygbeITXhTbuVsNh9lVjO6yII4Xtrwb77KPct98E4+8Zo/J0vAGKxESCp80Lab9
bUORFYY2zb79s79u+7t8OPU7OimhnYiy6zxh5ZkhXURRkPB0cQFINo8Sw5fHT3IFe2CrKTzM0lX1
phXSfEjvocPsnTmHvTwn6Z/9F6vMc2M+Hl0K3Ku3X166RatgjFoY1eFE7G//548BdAhJlJCruqBo
7v1NjM7h0seLfvcsxh8P+/JLDEs4ePPydcB/eJx6LYaCxqYaIzXLBUY2lmbwVJ+eCDuH4scXDXES
pBTWt/ZK7NdxgZj6cZ/QS/mduEu4J3VQOhe4e9eKad/84k++CeTwxW1CwvDs8Qd7SsBPpUXcTXX0
poV5pChzrhuy/5Qemnva25DPw5J923DXKOurjeaDqbHj3S87Fj9uFthzeGSuExbhSIfIgrxJjnT8
+s/i8u5qubVuPd34xOuYs3pQ2b6aOXshY5UM2mNjw9ff09tXj2Zpv1kAGy8V2S8lDUnO2w1ka6B3
hitPQd/9ihZerLJwlqyPiXAKBB/oMzng5fd+87k59LJ6tHfsmL+sgovynENFXzndiG0ejgaxXLD6
SiSWvrh8/Z0bwxjgmrntZeuNXRT30D0Kg3krpzHGVN6v0eI03IjV1kPy4/MQC+AS9dznXd/rqqq0
VtKz/VLK+GiGdQlKurwycq9aPttoNcOP70SrtuE/HoG++kGsFeR8Plzmu/KnPjtD50JQahelUVXC
/O5dFCxJLxQIgQNzLu8L56ouBsp9LBdENdW1MWb32EZ32sN3fyYt5uapYeX4sM4/foT6XPvo8FiM
JyxHbVzwaFYXYLnXHYao7cJfP5GXQWp8eVvXUbYkJvzZnzk9w2R0o0uOlE3aMvvLP8ZMdbE8Iawy
LHwWfLyF2gi6m++Jdi5KYy6JY4LQWy5dCnXK650VjX/mFe9I56T1vcPzz35cnt/FcHRIUv+J9zY5
vZKRSM/xx0cof+7yZGp3kotUX0uZWx/GcFxv6xpoLeRUwob3X/zxGT3Y16/X4fjdDwZRrjE5FLbB
uSwN5R8+40caNaj5uZYQjaPBVPu5Dod88emBreISN9M+//JXw0eW1D6IdwkUNHF1stAkuBrD1aPq
ZrYkFkjP2MeT0b359DTPOWotdUmSbmGH7G5ne8V4B0uil4IbzptNL0GTNy0zv/2YO2i04c62DbO0
XZfwNO0xshE5MqtXIs5KyyzhO998+UfTTTNc3kpcWA7B5hWS4XKefXT08ImmG5Oh+hcvnGzfuKFd
5LGh62UoRnTB7JyLyVx2NxWaDivM3izFZG4QWsjOCp2oEn6owQPsPeVv/TMyoDQc1UVh/3gP+eq1
0QtMjZUwa1y2P7zaonfOvgwxoZztN5WGRHNvrWGt0iv58bhJe2zc334cXWzONe/FYLxDe81D7DeZ
bExhuaNggBCQw72SjUl4Z5ZyH/dH9uOhHZbUCHYQpMwr8CIpx+pYKn//TgX857/++ut//U4YlPXj
+fkeDBie0/Dv/z4q8O/4Ef9bEKR/M+nPSQTax+nz73/+6xDC301Xl83wv4f6/az6v//5S5H/HDf4
e6iH+PP/Xv/X99/+81//BwAA//8DALUBqRrjIAAA
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 7c0b9868dedafa62-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 01 May 2023 22:52:31 GMT
Server:
- cloudflare
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
openai-organization:
- user-adtx4fhfg1qsiyzdoaxciooj
openai-processing-ms:
- '196'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '3000'
x-ratelimit-remaining-requests:
- '2999'
x-ratelimit-reset-requests:
- 20ms
x-request-id:
- 0dc16b9c27373738c2c364d171678493
status:
code: 200
message: OK
- request:
body: '{"input": [[53380, 8245]], "model": "text-embedding-ada-002", "encoding_format":
"base64"}'
headers:
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '90'
Content-Type:
- application/json
method: POST
uri: https://api.openai.com/v1/embeddings
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SaW8+6Srfl799PsbJu7TciCjVZdwjI2SoEj51OR0QREDlWAbWzv3sH/zu7u2+e
RCB5lJo15viNWf/xr7/++ruK8+ej//ufv/7+ZF3/9/+YryX3/v73P3/9z3/99ddff/3H7+//9+Sz
jJ9Jkn3T3+O/m9k3eY5///OX8N9X/u9D//z1N7ldGbNR1kfT59Co6Mw2K/aiVIi5YisGtOPUMVc/
L9re624DJJoVM7PigS5k25cPVXN7MDsSyvbbeMOAFt0tZSRIHC4+lqELC7w4EyLkQtT7lXUGOVv4
xP6aWj52jb5AVWAsmZY3H0Sr5pvJ3tn4sEc2Bfm0J7cMHo67IOq27vK+rnMT1MzcMUfRtvqg6h8N
neTdjm3dzTmezPutAtE+a8zpTl0+tXmwQGccXom7e9hxzz/GhBJtFxP7GR/ygdlYle8eUunickmi
US+7DkY/14n9El2PVpWkwUtRLlR+oi8atsc7hr47fYjrGEU+7dKwBOXerPD4dMN4bJMaQ4iMB9mb
m1gf40wI0CpbypSHFxSxaJpMFGYhJkTCu0jAbizA/dgTsn1POGdT4z7lNQ1Vtg9jFQ1ws314R+iM
uTUE8/pkmpwddJPpWvlup6phKZhH+0FcY0v4dE2eMvpe7IxoFwYxvSZnGazE3ODNEK50Zp8WGjpM
tkp2+15DvEpQAXW9jZkG6UYf3ru6QivPD/ellBr5NGy3AuyJR+jXMuS2t95+Awd4Jsw5Xl95Hwbd
E+KbOhDte3t443FRl0jp+RsPxKxaOhYhheubMyqHkhEJweK2kXUVUmYa/a4dJyTL0ETai5DqHSD+
3ZtX2DzXIj6v73c+JVluAnMblxk1nWJaDFYJJ1LdmOYPe1R56duACR0CYl2FQqeNaptw2eYRMQPm
eP3tET6gv9CG+dpRi1dppT7A/cCFHCIvb/ssUUxwk8/AtmdZ4s1d+GwA1+Gb7EdRjrg8ltqmzuQD
wevzHo2raAuKAlHI3FAq4u69rBr0FT4dHnl94PxjdqUUjvsvs7SQ5DxwVxu0DvYbgje3jLPLUung
IUU+cfinQM3xbT/lOKlbtr8GKh+Cbz7B5p0hsideFY+Lz2GA18dzsNBFEh9Ph/1Rvt+vI9ubG+SN
py96wuNSMaa3LEV0HW4GWJ7vBXFegeDxvD4WcAl0g+gtU9E0NFEF922ZM+sqGLpw3SkplKtsR0hr
brzWcqJwabx9YNawafVRJ4tSlkf0wTA9gmgKe/cI3mPPiLVqRL1TFqSAOy0NujwsIZqu1DeQpNgx
c8rUb4fruaGwWBFGl1k3oMnU8xT1eBMTs8F2PqRPHKCPVn6Y+0jktutWzhXOn+LK7Msj493KIRNc
hQdnvtMeeZ/trk+IPm+baFKwzId8lYToGB835IyzT9SwvU7RVf4qxDT6Tz6tjyyA62qpsX3XeS23
nChQxhe2cO+Xb70+HXZH8PNWwZNK3Zhd3UxFn1oumJExveW7zgvRtlXfzPhKYTRsW/sMZ+E90pW1
0bjItxlF5095pfLJtHU+7y/42IwRw1/e42F5OjcgnO4ZbSrrHo/nnf6EQBGfeFiIIR9MbROgJk1q
slOrJh8DfnyApDtXKms+4z0JvALlW+dIPC++tUMtpCkI09Wg/C2/9eElr46gYW+PJ2uRtVO0LFXI
/exKM3v34GyuX1gsjQLTy0WJu82zCmCR7XSCfdXzhn5bYPCmbUpX59eej6iRRDAcGfDiIUhtt0mf
A0Juz9m2GpOIBmWVwZCWDtlnuzQfF/ryjnAgIaY9NIcPr+pWwS29Ueb6XEYsV90BZVp+Yq7mE0S1
10gRvS8LZjfJSp8KczVA0Dp3grvnLZ5ulWfDnRYG0ZWTnHfiDQAVOpTMO6EmZ4m6TNGvHsz1XUYs
pdCgyNVXmN3Duh2wRAtA7aaiIB0/Oj0HYMPwuK+IJpgD4mW9esDyYZjsmLG8HdtLGkKgr2MMpn5t
xzJ8GMgf6hfzkjDxBmEfYpg/01zBaV7fuF0isxovTE9I4E3L1klhlaUhUW1pp493Kroo4EqNX30U
o+5rtnd4UKDEw+2kj62JTEi8RUB52fke1z/1A71Xpkds32/y7iahAlVFGLJ9/Yh03l6jFH56uniS
UJ/S1beCp4QxFazs6fGuWWOE5PsNL16NO/+eKgD11j/xNymZ3n3y8AHlVz0yw8qe+mSVcIU4aVvi
qcY3pqbeZnK08X22FyWid3EvdZB98I3tGrTJ668f2UjvF1ti9QHmfBHVRyBJoeH+cPX1FV4+j+iz
uZ7I+fY+elz/vgB9CqIxXVvsou7LFAzj6RRieLnHaBTYqP72G/Of1qsdy9r1kYmeMZ7ls6XW8drB
oR5vVHqv4pYdO8mWvH14ZqYXdmioE/kK1/fI6DrnUzusrFsg2zFdz/p11YfTQxvkJn3VFJ1Q0/LI
SQPIHleXaIj6fJA/moEGJ3DwtOdZPqDX1lXCJbsQkrNPTvfkkKIa0RcxxaDTh8RzOoDUd9lW6Ew0
ruqNAZ0aJMSmuzHm46BfYdtlAzF0/uX9dHyVUrnFBc7rA8/H5CzfwXkqI/Eno8trIiVXpFaEEGsn
t5wNn4uNziE6EsLTZVzb7bBBK0k8UGm01jnjhZNBv9RqvGg76jVHqTeRVxRnZl6OQzRY33UDy1Z/
4Y15cVv6tc4VqjhpmWc/cT6cl3EFxeqr4+ztb9rxfbgfkbdmGtk367alk5U3EIXXB934vpsPheqU
yHiahBjudM65p8gYVT6EFJDURZ3TNQCU2Wu2N8ftXE/CHbbSUsZrU0lbdqpoJ5dVllPhK4Vxj89+
hrZvp6PA1jbns/+BJlW3xAlqJxqWt7cJOC+WeOEqojct1t4d1Bt7/tH7qVdTgM2VdlSe/dv49WMb
9B62dBLhzbueqU9Y7y2PkfX23Q58cH2lDByF6D0U+a/e0T3fq4wU0rGd0sTTYMdEmTn9K47YLj6b
8mN/PJD7YyeiuloVsLkl0ZauBTvPh1d1qFB2phVT0UuN+HePr+hCM4E5/GPwLhGiFOonVYjxsr7R
oLdvX7m+jxad3v4mn/tHAGa3aDCkeyeiZbGnyLmKXzzaEfM+psI7cEwnZ9aGJPm0XVwCOPjjmu0l
M+TjSjsOKH6LSwxJ9eZjZd59cBfTB1eWlHjTJU7vSno6XJiviwKf0lxfoHOzj+b9zHReX+QKXbbv
CI+hjdEgHx8bqEVVISTJNI9P6qaBfXgAph/2fitW1agpg15+mUPliI+R0RRyAsaT2fmzjH96qFhv
JtBi7dF41LpPhdh74RKzrd9e9+snd7btiJZ9R31Q77ErF6410PuVFjE/ZZ8JnWRrR1SqFdEwaHsT
tEezoAthLFu+aX0fihRsYteRGfVDkcsg5HKEkS1O0RTTboH4JzwSp7++o2nYOiJszG2Fryx1+MwD
qryVFJnsrsc9GhVNlSH+pm8Ky3fV8gPFE5rvM9WvUNRl24sP6pT3mFlqn3f4vQXw72uKFztbjBls
iAiyaG6ouOwrbzpmIvzhGesyMn3qzHqD1q+BMM31ypiPplSg5pAGLBRhy0U7MGf9ubt4LfcuYuHh
0iHZ3wlMpc4zoq8gDSAdhoY46/1bH/PmIsA9JypdJvXD49HDA7RNO4Ml9qLg43HxLiEZhy27D1M7
r8cdg9FvK6I7ytHjr10mgOxVPjvoktUO8/5Avief6JqtRH26GlMA8/rhNM94PK4t20TblBrMmrQb
mkZkC+j9JJyWSFvrU4JIB7MeES0jfTyM4RT+4QkhfrUehRt6gG29C+JH9oD6IqvucBztGp/UoORs
We5UkGvrzFSFL3R68BjIR1H08MB97nWF6abyn/tXb+v14qevULuWRDqeZQl1zDZVGFnL2N5CjHcy
2vigLaIY97pTIOavxxDiznkQLwkVr4eUPtDiMnyIPrYyZ61Fz2iuJ7wUvKBdCUxSYeYb4kSlEFXF
lWK0l+yImZ/8ko/CzZWlLXljTNd0bMf29aKotY4q274n2vawfpSA3oXHduYKe9PJuN3hoqgfsosP
G0TjiyhCPa4bog+OHXNlCO7KOJ0FukFu3fJFtwcYz+s3HqtGj1Z3KtoAKXbxcB+qeHgv0wp++rmQ
F67O0/NVhZkPiHU/i+10U5mIlkqXE3xwJU4TZHUw+x+m7z9DNLyvuyNcveUCS+v92xu87FLCay3u
mf1WJX2qfWeBYtyF5CUPa94eywH/eItyrXznvEfFXdHFISBmLxb8j16t72bPvMTTvb4BPEFcqiEj
lbPXB93Yb2C9DV7E21dKTNXYvMJrv3KIZ3V5PChYEJGTezs8xHanD4/vZoK5n7AQ0Y53wbedUPHd
hcSr4IGmMepT6TB9KmamNxsNK7HT0MwvONtuJH36UmmBzhfjyw6xgtt0M+oLcKWdPvNTihhlWQaf
w3uBefqq2mFd1BiwHy7xr3/R8JG4SPUZ0HhfJdEYxUGn5N5rZFqSaFFHj2QDi7QUCDa91pv5vkCx
my3p9A01T5z7pVRZwYAX21vqMVs5nEEZHiVxQcB8rK+KLOtrssECWbUx3ZHPFcrjKZj7o5rz/LyR
YfX19sStVDUW0iygytspNsw4NF48rWwjBbeMl1h4ZKq+6hoPUBU/dBYFKxuJx0xcQHnXJuIWV8Ob
uuSq/eGR4RrG3rRb2xTabfUkqu4t9dqd7p1sF28Ty/SocPZGlzvk2dn58VPbVOjYId0LFGbNfqqL
+5EqdkVvVJQ2VTw92V4ABy0jLLyPnjfRfBSVU7m4MYtGeTsZ3fEB5LR/sO1Hs7xBSwFAq6uUPuZ+
xW3ldoSzkI905X0lnSc0miDvqhLLkbqIG6HgBXwfokGs+ObF44UmIdD87M1+q+SjVOIMITf0qGiS
qz6GkhiC8bkS4pNUaKnfrw2Y9ZfM68+76HJ5wHUnCmy3aTNv+JrtFe2dV0/FXiwQcx8XAYXIfBCr
eupoBKwN6JneVeIYdInYKlphmPsTc78n5lGWbDY/nmZG0mHU5Y+hgdm/MVP5PvP+Xq18+WC7wIyq
0FvuHdMrcD7tmFWZ6o8PMjk3RZE5XKnayTc+AWgb+8Cs+NZGzVuNFz++xdJoXdpJ1d4DDHrxJVsv
uHtzPWRw7/OECvg+ofGzMky4VOuQWR5r8zrIvgGUvfdk7uxfum1rH2HWbzyd9w4SnuY6AB2kO2nZ
vfFYQqMBdpva+cOn/CtnFeTZ0cErmMAbGssPkb32XdwImo6GsxRSCBThySKzR3ldq7dA5hIizFPq
MZ5A359hcZk+7Od/uvNheYePVnyocH+I8fy+RaRt3MPMRx+PTY32BMsIG+I3Wq1zVtcF/NZHlvyp
ZU8vn2BnZwhPy7PjibW/BVQ+/R3ZSuKprV/UuUJ9NzWirraR1/OPP6BfHrXzvpLHn5KqobBEzpy3
xREdkS2i84YZdPxolj7u9VRUbk2YU7ToSm9wlav9y6+Yx+6NzkC8HaHSrYR23+Sds09ypzAs9JA2
J7PSR8tONXhmpzWun+YuHtTRCeDZ4Y4RxLt8+L3v1fO8+NWP3rBkIwNnxci0tR7zYWdLGFL/ZTPC
Rkfng2gclc+JHX5+PG+QmGVKW3B31vMgHl/JA8OSX01GQInbMamfBvQC9an02R29YTHtO/SrD4YD
0xuwVJZAklLDq8Vmlw8ftDTATVwbr+tqatvK2j/hdnieyH4NOeLSavNQ5u9P22ql8dk/iIrImi2Z
81jeXy7MhmMt1n/8MLfu9hMu99ObSqHx5vyzWp7Rn3obxE00ys7rgcwOGmZMqRJToeAlzPxOl0Zs
xqu9Xglw4+aEJzk6RjRPWhnoUbgztZxaTuNd7KJfPnj5xpjzilaDfJXdEx6er2fLr0G2gPxVG8x4
1HedG68qQ3O+wfZe7utsznfQzL/M3Qq+PnT70gRnJTjMCY0tGossvUK/VGumjo3aTrOeIxdXlz96
9nu/cmryEiN2d/X1S1AKFAuZzozVsEIcRfYTwtfXIW7mqfHgpbUJb7Q/EUtR8pifvXUDki3IVNlS
P5aukpPCz496hXtAvCmSK/z0wi0Giw9OlwEcNgeXWUlJ9EkdIQXxEsWM7D4rb/b7GHnKtSCE3I9o
2rziTPrlu1a4nPO7c9P9yRdMk2w8pmhgAz5/E2aYapvToEwzFBurhM37PRrO1SeVnsampxs1a9Dg
KoEL+en8xYvPq8mHOpmuyp4/RzbnLy3DxtEA7AdLpr+EdcS7ZukjZJQ1Md/COxqtcRTQuiqedPFe
02hkt3UlN3jlUUrBbMtHVahgN7giZv/cesLsH1G7fmKmwqHNR9tICtSk2nb+fnL8vXxvBhikfLKZ
j1H3W785r6Sw6ireA3YH6ee3z8KKt1xcY/PHl2wH00MfJP3bIWNJO2Y+o6XXB8aWQi4qBtlTq/DG
OLRMAH4U8UCdRdxvnccEiQcBe34sk08yIRg+rDTpMn5co6EfREAG36h0uHpvj4XVToT5M9tZm+yX
dzUgJa8VnlhXx9ORdzbMfoGYsPP1OX8Mf3nIH32lP17xtlKIr29V8liirlPwQc3J67B8xMNhuqpw
uj4rPG5iRR82a0NGO219w4PSFfog3oQFyk1BJPsvITHHJyeA2Z/RrlgK8bRITwHsk1bFt9CL+QrS
8oEKp7Lx5bX95tw7VncpfH0ctrWc1BPn+lB+ftDbvLR8/eONHx9aO9lDnX4X1D952dZfpPr4zkUN
YJ/vmKHHhjfUQpWhJcMTMXgeeoMA3QIam27xuhC/7Zw/qvBw7AUzlp9VO/7y9tl/4XSJxfaXl8BH
3JjsNb/vds+pgHJkX8iuoCuPXpPnBhG09Sma+0198z4hfC9uRox4jdH67fsNgKRdiSO6fjwx8ZT9
/B1u2d31KH4rGH58uqVrE/HooS8A27uObDdHXec381JCsUQpXTyEW56el3ED54v5ZWQZV3wYfemI
JlX4ULR/5zFr2baBaeWaDHdPKR77/XGD6p3MmNdOez7MeTpKcrbB1zoqo498NxpIxYdPwuW59qbf
PISJnx1RM1VFYvn0B/C2KGT6ejTi8WHu8M8fUtgcgmhM6rMB67VBmOebDRqQHWvQd5cPXlpq3xY7
LNuArcbA35Nv8U7ajhjCkXz3Y9XkcbNIkwAitvqQbfxxOd3Y/V2+YR5gYdaDPD1f5/V878g+uZN8
Yi92R7Mek33Xta3wke0CurcsU6nqh5gvU9+UdHTz8E1gEZ8MZVsBeZkHvMRBqU/T8zAphb4oiZdf
zXy6uNxHRSgtmeVPaVvN8zY49bt8zm/vfDq69+PPz/+ZH7GZn8EZK5GOikm9eptqG8iMbUGXPH3F
/aotS1iQ24LC4sn59FpyDVxjzBieWuBjJ0su2Jpcs5+/44cuCJSZj8g8r2rpiFRRnvMndpFPX53R
5+oM83yGuYyd417r+gZNqvghhlB+9cnv1AIZq/Wa4PboRvzS3zVY7ssTs1bNWW8gmjKQR+lDLNti
+Z/5BGflyHb8tPyT78MCw5kO3ZLk5W0xBsrn1B+IaomDN5DHOoWX7XyxpIPQsrm/ovG8etPJOJd5
1e2pCdMAnC610yMf7vn+AY9H1ODpkG3mPHIwlHVcwG//6axf5U9l5kc8Zd/Df+UJzlX4Mlcm32h8
UeeOnoS4mC+GyBO3LSpg3LwWtFfGnI9NnrkQ8HRL/PvViadyV9lgriObbKPcigRfl1UQ7aNGdqja
6/1vXvCM7s953lujztSGUDGu154d9knC26/w7v7kkzMPRTMPCiAZ9Zfs/fLtDfHFK9C1rQqizvwy
0qvjgqYd3wTjTxiPhz7O0C2NKW1+vCMTywc/rxX2mz90Uqtryno955XDouEUDXUJ5JVLlItnDw3u
CUQUOcWe4Xl/T2GvnZHqh3/y1qjs1QqUMvAUKvcu8djv+87+gWCHW9GUrlgjK8H5Suf5V8yPu5MI
VmJssKQ4yKPe+3FEqtirDL9CEwnrPTLR1rkbVFk9xXyQdEZRHolXzKWFrQvCPvShacX+N++Ixpf/
1UCBQ8hILi7z7rlTB3mJtwqz1jvajl10EgAHz4j8eORPnrclOaZLt6p5mSf5RgaiH3Gd0G0+zjyA
+mZ1xlKEh3zgjROCWfEL2w9HC63G0axgKFlEIcidfHTIVoS/f6cC/vNff/31v34nDMoqeX7mgwH9
c+z//d9HBf59T+7/FgTx30z8cxKBdvf0+fc//3UI4e+6rcq6/999VTy/3d///CX+OW3wd1/198//
c/lf8//6z3/9HwAAAP//AwBim/ij4SAAAA==
headers:
CF-Cache-Status:
- DYNAMIC
CF-RAY:
- 7c0b986ab8dbfa62-SJC
Connection:
- keep-alive
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Mon, 01 May 2023 22:52:32 GMT
Server:
- cloudflare
access-control-allow-origin:
- '*'
alt-svc:
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
openai-organization:
- user-adtx4fhfg1qsiyzdoaxciooj
openai-processing-ms:
- '195'
openai-version:
- '2020-10-01'
strict-transport-security:
- max-age=15724800; includeSubDomains
x-ratelimit-limit-requests:
- '3000'
x-ratelimit-remaining-requests:
- '2999'
x-ratelimit-reset-requests:
- 20ms
x-request-id:
- 208e3672e7991aa80472beb0310e43d9
status:
code: 200
message: OK
version: 1

View File

@@ -2896,4 +2896,472 @@ interactions:
status: status:
code: 200 code: 200
message: OK message: OK
- request:
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Write
a concise summary of the following text; include any information that can be
used to answer the question \"What is the price of the book?\". Do not directly
answer the question itself:\n\n\nLITERAL TEXT: \"\"\"Books to Scrape We love
being scraped! Home Books Philosophy Meditations Meditations \u00a325.89 In
stock (1 available) Warning! This is a demo website for web scraping purposes.
Prices and ratings here were randomly assigned and have no real meaning. Product
Description Written in Greek, without any intention of publication, by the only
Roman emperor who was also a philosopher, the Meditations of Marcus Aurelius
(AD 121-180) offer a remarkable series of challenging spiritual reflections
and exercises developed as the emperor struggled to understand himself and make
sense of the universe. Ranging from doubt and despair to conviction and ex Written
in Greek, without any intention of publication, by the only Roman emperor who
was also a philosopher, the Meditations of Marcus Aurelius (AD 121-180) offer
a remarkable series of challenging spiritual reflections and exercises developed
as the emperor struggled to understand himself and make sense of the universe.
Ranging from doubt and despair to conviction and exaltation, they cover such
diverse topics as the nature of moral virtue, human rationality, divine providence,
and Marcus'' own emotions. But while the Meditations were composed to provide
personal consolation and encouragement, in developing his beliefs Marcus Aurelius
also created one of the greatest of all works of philosophy: a timeless collection
of extended meditations and short aphorisms that has been consulted and admired
by statesmen, thinkers and readers through the centuries. ...more Product Information
UPC4f19709e47883df5 Product TypeBooks Price (excl. tax)\u00a325.89 Price (incl.
tax)\u00a325.89 Tax\u00a30.00 Availability In stock (1 available) Number of
reviews 0 Products you recently viewed The Nicomachean Ethics \u00a336.34 In
stock Add to basket Run, Spot, Run: The ... \u00a320.02 In stock Add to basket
Critique of Pure Reason \u00a320.75 In stock Add to basket At The Existentialist
Caf\u00e9: ... \u00a329.93 In stock Add to basket Kierkegaard: A Christian Missionary
... \u00a347.13 In stock Add to basket Proofs of God: Classical ... \u00a354.21
In stock Add to basket\"\"\"\n\n\nCONCISE SUMMARY: The text is best summarized
as"}], "temperature": 0, "max_tokens": 0}'
headers:
AGENT-MODE:
- AAAAAAAAAAAAAAAAAAAAAMLheAAaAAAA0%2BuSeid%2BULvsea4JtiGRiSDSJSI%3DEUifiRBkKG5E2XzMDjRfl76ZC9Ub0wnz4XsNiRVBChTYbJcE3F
AGENT-TYPE:
- Auto-GPT-2023-X-TYPE
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '2507'
Content-Type:
- application/json
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA1SSTW4bMQyF9z0FofXYsJM4P7NL202RZtEg6KJNEWgkesRaIwqkxo4b+DS9SU9W
aJK46E7gz+N7H/RsyJvWuGCLG3KcXdxcJd/7m29fhuvx83139+uj+3qyfn+xvCofTGO4+4muvG7M
HQ85YiFOpjFO0Bb0pl2eX64Wp5ery1VjBvYYTWv6XGan89WsjNLxbHG6WJrGjGp7NO2zycJDLo+F
N5jUtKvFWWP+aR/ry+WiMYWLjcfS+fLs0BgXmByqab8/mwH1TVY4ommNVSUtNpVqklPBVANY8KhO
KNcTwGsoAaFj3sCDuUVPxdaGPhjo9nBrxY0K16NgpFEb2AVyAUjBbi1F20WENQvkUVywisAJ6oGB
YYedUnlp77ADdWIzpb7OZlbUOdwHhCzk8D8bpPDn98lqfnnVQDeWqfEmtrOSFEqwL+VpWcEmD2IL
pV7BCoLY5HmIe6gE+oR+mgh2i5AYBG2EAW2i1L94mM7yeo2ioJmEymgjCK4jugnGtI9PKI4UFTxu
MXJGXxFVH3c82AQ4ZBSWaTgHiqycA0oDjrcoNXrhTE5BRxfAKgwsNsKWpIzYQBiriEz4baSybyYl
T1tKNSpvyWNyOIdPpUJynJQ8CnrgdETYT79RC+xYNlqrRyv7VwwKHWIC6weStwyUNjX9RBKtr+8S
hMc+8FggkBaW/dwcGrOmRBoeBa1yMq3Rwtk0hpLHJ9MuDj8O7/4CAAD//wMAU2wM4F0DAAA=
headers:
Access-Control-Allow-Origin:
- '*'
Alt-Svc:
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
Cache-Control:
- no-cache, must-revalidate
Cf-Cache-Status:
- DYNAMIC
Cf-Ray:
- 7ccfc4f548931119-ORD
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 25 May 2023 18:16:38 GMT
Function-Execution-Id:
- pbmeavwjmg42
Openai-Model:
- gpt-3.5-turbo-0301
Openai-Organization:
- significant-gravitas
Openai-Processing-Ms:
- '13515'
Openai-Version:
- '2020-10-01'
Server:
- Google Frontend
Strict-Transport-Security:
- max-age=15724800; includeSubDomains
Vary:
- Accept-Encoding
X-Cloud-Trace-Context:
- ab314b2e9bb880724cb82a80580d492f;o=1
X-Powered-By:
- Express
X-Ratelimit-Limit-Requests:
- '3500'
X-Ratelimit-Limit-Tokens:
- '90000'
X-Ratelimit-Remaining-Requests:
- '3499'
X-Ratelimit-Remaining-Tokens:
- '88914'
X-Ratelimit-Reset-Requests:
- 17ms
X-Ratelimit-Reset-Tokens:
- 724ms
X-Request-Id:
- fb2737f00b14403a74927018f9a97197
status:
code: 200
message: OK
- request:
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
are browse_website-GPT, an AI designed to use the browse_website command to
visit http://books.toscrape.com/catalogue/meditations_33/index.html, answer
the question ''What is the price of the book?'' and write the price to a file
named \"browse_website.txt\", and use the task_complete command to complete
the task.\nYour decisions must always be made independently without seeking
user assistance. Play to your strengths as an LLM and pursue simple strategies
with no legal complications.\n\nGOALS:\n\n1. Use the browse_website command
to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer
the question ''What is the price of the book?''\n2. Write the price of the book
to a file named \"browse_website.txt\".\n3. Use the task_complete command to
complete the task.\n4. Do not use any other commands.\n\n\nConstraints:\n1.
~4000 word limit for short term memory. Your short term memory is short, so
immediately save important information to files.\n2. If you are unsure how you
previously did something or want to recall past events, thinking about similar
events will help you remember.\n3. No user assistance\n4. Exclusively use the
commands listed below e.g. command_name\n\nCommands:\n1. append_to_file: Append
to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n2. delete_file:
Delete file, args: \"filename\": \"<filename>\"\n3. list_files: List Files in
Directory, args: \"directory\": \"<directory>\"\n4. read_file: Read a file,
args: \"filename\": \"<filename>\"\n5. write_to_file: Write to file, args: \"filename\":
\"<filename>\", \"text\": \"<text>\"\n6. browse_website: Browse Website, args:
\"url\": \"<url>\", \"question\": \"<what_you_want_to_find_on_website>\"\n7.
delete_agent: Delete GPT Agent, args: \"key\": \"<key>\"\n8. get_hyperlinks:
Get hyperlinks, args: \"url\": \"<url>\"\n9. get_text_summary: Get text summary,
args: \"url\": \"<url>\", \"question\": \"<question>\"\n10. list_agents: List
GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\":
\"<key>\", \"message\": \"<message>\"\n12. start_agent: Start GPT Agent, args:
\"name\": \"<name>\", \"task\": \"<short_task_desc>\", \"prompt\": \"<prompt>\"\n13.
task_complete: Task Complete (Shutdown), args: \"reason\": \"<reason>\"\n\nResources:\n1.
Internet access for searches and information gathering.\n2. Long Term memory
management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File
output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your
actions to ensure you are performing to the best of your abilities.\n2. Constructively
self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions
and strategies to refine your approach.\n4. Every command has a cost, so be
smart and efficient. Aim to complete tasks in the least number of steps.\n5.
Write all code to a file.\n\nYou should only respond in JSON format as described
below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\":
\"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n-
long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\":
\"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\":
\"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n}
\nEnsure the response can be parsed by Python json.loads"}, {"role": "system",
"content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role":
"system", "content": "This reminds you of these events from your past: \nI was
created"}, {"role": "user", "content": "Determine which next command to use,
and respond using the format specified above:"}, {"role": "assistant", "content":
"{\n \"thoughts\": {\n \"text\": \"I need to use the browse_website
command to visit http://books.toscrape.com/catalogue/meditations_33/index.html
and find the price of the book. I will then write the price to a file named
browse_website.txt. I can then use the task_complete command to complete the
task.\",\n \"reasoning\": \"I need to use the browse_website command
to visit the website and find the price of the book. I will then use the write_to_file
command to write the price to a file named browse_website.txt. Finally, I can
use the task_complete command to complete the task.\",\n \"plan\": \"-
Use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html
and find the price of the book\\n- Use the write_to_file command to write the
price to a file named browse_website.txt\\n- Use the task_complete command to
complete the task\",\n \"criticism\": \"I need to ensure that I am using
the correct URL and question for the browse_website command. I also need to
make sure that I am writing the correct information to the file.\",\n \"speak\":
\"I will use the browse_website command to visit the website and find the price
of the book. Then, I will write the price to a file named browse_website.txt
and complete the task.\"\n },\n \"command\": {\n \"name\": \"browse_website\",\n \"args\":
{\n \"url\": \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\n \"question\":
\"What is the price of the book?\"\n }\n }\n}"}, {"role": "system",
"content": "Command browse_website returned: Answer gathered from website: a
description of the book \"Meditations\" by Marcus Aurelius, which is available
for purchase on a demo website for web scraping purposes. The price of the book
is \u00a325.89, but the website warns that the prices and ratings are randomly
assigned and have no real meaning. The book offers spiritual reflections and
exercises developed by the Roman emperor and philosopher, covering topics such
as moral virtue, human rationality, and divine providence. It is considered
one of the greatest works of philosophy and has been admired by thinkers and
readers throughout history.\n\nLinks: [''Books to Scrape (http://books.toscrape.com/index.html)'',
''Home (http://books.toscrape.com/index.html)'', ''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)'',
''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)'',
'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)'']"},
{"role": "user", "content": "Determine which next command to use, and respond
using the format specified above:"}], "temperature": 0, "max_tokens": 0}'
headers:
AGENT-MODE:
- AAAAAAAAAAAAAAAAAAAAAMLheAAaAAAA0%2BuSeid%2BULvsea4JtiGRiSDSJSI%3DEUifiRBkKG5E2XzMDjRfl76ZC9Ub0wnz4XsNiRVBChTYbJcE3F
AGENT-TYPE:
- Auto-GPT-2023-X-TYPE
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '6625'
Content-Type:
- application/json
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA7SUS27bMBCG9z3FYNay4bdjbYsujHZVNJtWhUFRY4mxxBHIce3A8Gl6k56soCy/
lCyCFlkJmsfP7x9qdECTYYy6UKKruuzNPy+sfE8n43w5/DKdcTFbLstP86/y/PTxESPk9Im0tB19
zVVdkhi2GKF2pIQyjIezh+lg/DAbDCOsOKMSY8xr6Y37055sXcq9wXgwxAi3XuWE8QFrx1UtK+EN
WY/xcDIbRXgVvyRGk3mEwqLKa+18sDhGqAs2mjzGPw5YkT8LOy4JY1TeGy/KSsBkK2SDhUNiAQAS
lIK3eSE+wRjaYJugvYRggkso1C+CNW9tBlIQ1M5oAl43Lynzpg9LsEQZCMPWUxPfOSO0El6tTUmg
uaqUbQqaxI2OMChoiqyqKIPU8c7TakepN0J92UuQ18qGHnvRF+U3q3ZOd/qX2Lmqn2B068yR8myN
zc/2/hn9ZgRvsvGtIBu1Zv7fR10qe7LQg8f3RU8Sez3j7cQdYO2MGG181R18pTYEfutCpxJYgqoa
YGPzRkuzc6QFjF2zq1TYi9AWUg12c37ovESMh5RCe7uZYOydUmbCg91zd6i+JrU58+1MWb73V9HA
v3bXJ6pjdN7U9sQXixqET8B3hB1fyuXdHT8lQu1V4pWrv9Xp/hr+/B5N+w+LM2wD3HIn9ojHCNfG
Gl+sTjuHMXrhGiM0NqM9xoPjz+OHvwAAAP//AwCkoYsdiAUAAA==
headers:
Access-Control-Allow-Origin:
- '*'
Alt-Svc:
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
Cache-Control:
- no-cache, must-revalidate
Cf-Cache-Status:
- DYNAMIC
Cf-Ray:
- 7ccfc55dc9dee1ad-ORD
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 25 May 2023 18:17:12 GMT
Function-Execution-Id:
- pbmegdid7glm
Openai-Model:
- gpt-3.5-turbo-0301
Openai-Organization:
- significant-gravitas
Openai-Processing-Ms:
- '30081'
Openai-Version:
- '2020-10-01'
Server:
- Google Frontend
Strict-Transport-Security:
- max-age=15724800; includeSubDomains
Vary:
- Accept-Encoding
X-Cloud-Trace-Context:
- a847bfe80cefa41d8c90162f62421032;o=1
X-Powered-By:
- Express
X-Ratelimit-Limit-Requests:
- '3500'
X-Ratelimit-Limit-Tokens:
- '90000'
X-Ratelimit-Remaining-Requests:
- '3499'
X-Ratelimit-Remaining-Tokens:
- '85948'
X-Ratelimit-Reset-Requests:
- 17ms
X-Ratelimit-Reset-Tokens:
- 2.701s
X-Request-Id:
- 2138028baa9d1b5c4544e7197979eb65
status:
code: 200
message: OK
- request:
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
are browse_website-GPT, an AI designed to use the browse_website command to
visit http://books.toscrape.com/catalogue/meditations_33/index.html, answer
the question ''What is the price of the book?'' and write the price to a file
named \"browse_website.txt\", and use the task_complete command to complete
the task.\nYour decisions must always be made independently without seeking
user assistance. Play to your strengths as an LLM and pursue simple strategies
with no legal complications.\n\nGOALS:\n\n1. Use the browse_website command
to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer
the question ''What is the price of the book?''\n2. Write the price of the book
to a file named \"browse_website.txt\".\n3. Use the task_complete command to
complete the task.\n4. Do not use any other commands.\n\n\nConstraints:\n1.
~4000 word limit for short term memory. Your short term memory is short, so
immediately save important information to files.\n2. If you are unsure how you
previously did something or want to recall past events, thinking about similar
events will help you remember.\n3. No user assistance\n4. Exclusively use the
commands listed below e.g. command_name\n\nCommands:\n1. append_to_file: Append
to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n2. delete_file:
Delete file, args: \"filename\": \"<filename>\"\n3. list_files: List Files in
Directory, args: \"directory\": \"<directory>\"\n4. read_file: Read a file,
args: \"filename\": \"<filename>\"\n5. write_to_file: Write to file, args: \"filename\":
\"<filename>\", \"text\": \"<text>\"\n6. browse_website: Browse Website, args:
\"url\": \"<url>\", \"question\": \"<what_you_want_to_find_on_website>\"\n7.
delete_agent: Delete GPT Agent, args: \"key\": \"<key>\"\n8. get_hyperlinks:
Get hyperlinks, args: \"url\": \"<url>\"\n9. get_text_summary: Get text summary,
args: \"url\": \"<url>\", \"question\": \"<question>\"\n10. list_agents: List
GPT Agents, args: () -> str\n11. message_agent: Message GPT Agent, args: \"key\":
\"<key>\", \"message\": \"<message>\"\n12. start_agent: Start GPT Agent, args:
\"name\": \"<name>\", \"task\": \"<short_task_desc>\", \"prompt\": \"<prompt>\"\n13.
task_complete: Task Complete (Shutdown), args: \"reason\": \"<reason>\"\n\nResources:\n1.
Internet access for searches and information gathering.\n2. Long Term memory
management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File
output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your
actions to ensure you are performing to the best of your abilities.\n2. Constructively
self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions
and strategies to refine your approach.\n4. Every command has a cost, so be
smart and efficient. Aim to complete tasks in the least number of steps.\n5.
Write all code to a file.\n\nYou should only respond in JSON format as described
below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\":
\"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n-
long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\":
\"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\":
\"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n}
\nEnsure the response can be parsed by Python json.loads"}, {"role": "system",
"content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role":
"system", "content": "This reminds you of these events from your past: \nI was
created"}, {"role": "user", "content": "Determine which next command to use,
and respond using the format specified above:"}, {"role": "assistant", "content":
"{\n \"thoughts\": {\n \"text\": \"I need to use the browse_website
command to visit http://books.toscrape.com/catalogue/meditations_33/index.html
and find the price of the book. I will then write the price to a file named
browse_website.txt. I can then use the task_complete command to complete the
task.\",\n \"reasoning\": \"I need to use the browse_website command
to visit the website and find the price of the book. I will then use the write_to_file
command to write the price to a file named browse_website.txt. Finally, I can
use the task_complete command to complete the task.\",\n \"plan\": \"-
Use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html
and find the price of the book\\n- Use the write_to_file command to write the
price to a file named browse_website.txt\\n- Use the task_complete command to
complete the task\",\n \"criticism\": \"I need to ensure that I am using
the correct URL and question for the browse_website command. I also need to
make sure that I am writing the correct information to the file.\",\n \"speak\":
\"I will use the browse_website command to visit the website and find the price
of the book. Then, I will write the price to a file named browse_website.txt
and complete the task.\"\n },\n \"command\": {\n \"name\": \"browse_website\",\n \"args\":
{\n \"url\": \"http://books.toscrape.com/catalogue/meditations_33/index.html\",\n \"question\":
\"What is the price of the book?\"\n }\n }\n}"}, {"role": "system",
"content": "Command browse_website returned: Answer gathered from website: a
description of the book \"Meditations\" by Marcus Aurelius, which is available
for purchase on a demo website for web scraping purposes. The price of the book
is \u00a325.89, but the website warns that the prices and ratings are randomly
assigned and have no real meaning. The book offers spiritual reflections and
exercises developed by the Roman emperor and philosopher, covering topics such
as moral virtue, human rationality, and divine providence. It is considered
one of the greatest works of philosophy and has been admired by thinkers and
readers throughout history.\n\nLinks: [''Books to Scrape (http://books.toscrape.com/index.html)'',
''Home (http://books.toscrape.com/index.html)'', ''Books (http://books.toscrape.com/catalogue/category/books_1/index.html)'',
''Philosophy (http://books.toscrape.com/catalogue/category/books/philosophy_7/index.html)'',
'' (http://books.toscrape.com/catalogue/the-nicomachean-ethics_75/index.html)'']"},
{"role": "user", "content": "Determine which next command to use, and respond
using the format specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\":
{\n \"text\": \"I have found the price of the book. I need to use the
write_to_file command to write the price to a file named browse_website.txt.
I can then use the task_complete command to complete the task.\",\n \"reasoning\":
\"I need to use the write_to_file command to write the price of the book to
a file named browse_website.txt. Then, I can use the task_complete command to
complete the task.\",\n \"plan\": \"- Use the write_to_file command to
write the price of the book to a file named browse_website.txt\\n- Use the task_complete
command to complete the task\",\n \"criticism\": \"I need to make sure
that I am writing the correct information to the file and that the file is being
created in the correct directory.\",\n \"speak\": \"I will use the write_to_file
command to write the price of the book to a file named browse_website.txt and
complete the task.\"\n },\n \"command\": {\n \"name\": \"write_to_file\",\n \"args\":
{\n \"filename\": \"browse_website.txt\",\n \"text\":
\"\u00a325.89\"\n }\n }\n}"}, {"role": "system", "content": "Command
write_to_file returned: File written to successfully."}, {"role": "user", "content":
"Determine which next command to use, and respond using the format specified
above:"}], "temperature": 0, "max_tokens": 0}'
headers:
AGENT-MODE:
- AAAAAAAAAAAAAAAAAAAAAMLheAAaAAAA0%2BuSeid%2BULvsea4JtiGRiSDSJSI%3DEUifiRBkKG5E2XzMDjRfl76ZC9Ub0wnz4XsNiRVBChTYbJcE3F
AGENT-TYPE:
- Auto-GPT-2023-X-TYPE
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '8003'
Content-Type:
- application/json
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA6xSTY/TMBC98ytGc06rlm7bbY5wqrggBEiIoJXrTBMTxxPZE9pVlf+OnKabblmx
EuLoNzPvYzwnNDmmqEslum7sZP1hw1+L4iOXd/lm3byffbKHu+O7qpXttwoT5N1P0jJMTDXXjSUx
7DBB7UkJ5ZjOV/fL2eJ+tdgkWHNOFlMsGpkspsuJtH7Hk9liNscE26AKwvSEjee6kQfhilzAdL5e
zhMcya8LCQqLsiO0ebvqEtQlG00B0+8nrClciD1bwhRVCCaIchJtshNyMcIpcwAAGUrJbVFKyDCF
ARwKdJQIZriFUv0iGDxRDlISiArVFLaglQPHB2gDPeEPl9Y4UyuXgzCEshXI+eD6NlWQk2mGybWk
JxXYGVe8ogu8h71xuXFFjzXeaIpgfOyYK4iaB28kdhiJ8gr2xtL/dtxY5c5mJ/DlH/hu6HS0rE2o
LwtwRP1orSqC0PqooOQvMibAjmLqNlAOmr0nLfaxX8jTaC89to6+Gs8Neft4GzM0pKrXPqXfubG2
X+4LWaENlw970fs0w7Nml1yOc6j8cZtO1XS284zpxrXyxe1ZX9/ZmeBztD6mCa3WFMK+tf0Wxslu
8Ja5DrsE98aZUD6cmTDFINxggsbldMR01v3o3vwGAAD//wMARwk3SV8EAAA=
headers:
Access-Control-Allow-Origin:
- '*'
Alt-Svc:
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
Cache-Control:
- no-cache, must-revalidate
Cf-Cache-Status:
- DYNAMIC
Cf-Ray:
- 7ccfc64648bb10cb-ORD
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 25 May 2023 18:17:38 GMT
Function-Execution-Id:
- yvvl2c4sntif
Openai-Model:
- gpt-3.5-turbo-0301
Openai-Organization:
- significant-gravitas
Openai-Processing-Ms:
- '18885'
Openai-Version:
- '2020-10-01'
Server:
- Google Frontend
Strict-Transport-Security:
- max-age=15724800; includeSubDomains
Vary:
- Accept-Encoding
X-Cloud-Trace-Context:
- df845088a79f4f2e92c980eb9a6e2e52
X-Powered-By:
- Express
X-Ratelimit-Limit-Requests:
- '3500'
X-Ratelimit-Limit-Tokens:
- '90000'
X-Ratelimit-Remaining-Requests:
- '3499'
X-Ratelimit-Remaining-Tokens:
- '85933'
X-Ratelimit-Reset-Requests:
- 17ms
X-Ratelimit-Reset-Tokens:
- 2.71s
X-Request-Id:
- d91688703794f0cfd634cd4bd337530d
status:
code: 200
message: OK
version: 1 version: 1

View File

@@ -1350,4 +1350,231 @@ interactions:
status: status:
code: 200 code: 200
message: OK message: OK
- request:
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
are write_to_file-GPT, an AI designed to use the write_to_file command to write
''Hello World'' into a file named \"hello_world.txt\" and then use the task_complete
command to complete the task.\nYour decisions must always be made independently
without seeking user assistance. Play to your strengths as an LLM and pursue
simple strategies with no legal complications.\n\nGOALS:\n\n1. Use the write_to_file
command to write ''Hello World'' into a file named \"hello_world.txt\".\n2.
Use the task_complete command to complete the task.\n3. Do not use any other
commands.\n\n\nConstraints:\n1. ~4000 word limit for short term memory. Your
short term memory is short, so immediately save important information to files.\n2.
If you are unsure how you previously did something or want to recall past events,
thinking about similar events will help you remember.\n3. No user assistance\n4.
Exclusively use the commands listed below e.g. command_name\n\nCommands:\n1.
append_to_file: Append to file, args: \"filename\": \"<filename>\", \"text\":
\"<text>\"\n2. delete_file: Delete file, args: \"filename\": \"<filename>\"\n3.
list_files: List Files in Directory, args: \"directory\": \"<directory>\"\n4.
read_file: Read a file, args: \"filename\": \"<filename>\"\n5. write_to_file:
Write to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n6.
delete_agent: Delete GPT Agent, args: \"key\": \"<key>\"\n7. get_hyperlinks:
Get hyperlinks, args: \"url\": \"<url>\"\n8. get_text_summary: Get text summary,
args: \"url\": \"<url>\", \"question\": \"<question>\"\n9. list_agents: List
GPT Agents, args: () -> str\n10. message_agent: Message GPT Agent, args: \"key\":
\"<key>\", \"message\": \"<message>\"\n11. start_agent: Start GPT Agent, args:
\"name\": \"<name>\", \"task\": \"<short_task_desc>\", \"prompt\": \"<prompt>\"\n12.
task_complete: Task Complete (Shutdown), args: \"reason\": \"<reason>\"\n\nResources:\n1.
Internet access for searches and information gathering.\n2. Long Term memory
management.\n3. GPT-3.5 powered Agents for delegation of simple tasks.\n4. File
output.\n\nPerformance Evaluation:\n1. Continuously review and analyze your
actions to ensure you are performing to the best of your abilities.\n2. Constructively
self-criticize your big-picture behavior constantly.\n3. Reflect on past decisions
and strategies to refine your approach.\n4. Every command has a cost, so be
smart and efficient. Aim to complete tasks in the least number of steps.\n5.
Write all code to a file.\n\nYou should only respond in JSON format as described
below \nResponse Format: \n{\n \"thoughts\": {\n \"text\": \"thought\",\n \"reasoning\":
\"reasoning\",\n \"plan\": \"- short bulleted\\n- list that conveys\\n-
long-term plan\",\n \"criticism\": \"constructive self-criticism\",\n \"speak\":
\"thoughts summary to say to user\"\n },\n \"command\": {\n \"name\":
\"command name\",\n \"args\": {\n \"arg name\": \"value\"\n }\n }\n}
\nEnsure the response can be parsed by Python json.loads"}, {"role": "system",
"content": "The current time and date is Tue Jan 1 00:00:00 2000"}, {"role":
"system", "content": "This reminds you of these events from your past: \nI was
created"}, {"role": "user", "content": "Determine which next command to use,
and respond using the format specified above:"}, {"role": "assistant", "content":
"{\n \"thoughts\": {\n \"text\": \"I need to use the write_to_file
command to write ''Hello World'' into a file named ''hello_world.txt''.\",\n \"reasoning\":
\"The task requires me to write ''Hello World'' into a file named ''hello_world.txt'',
and the write_to_file command is the appropriate command for this task.\",\n \"plan\":
\"- Use the write_to_file command to write ''Hello World'' into a file named
''hello_world.txt''.\",\n \"criticism\": \"I need to ensure that I use
the correct filename and text to write to the file.\",\n \"speak\": \"I
will use the write_to_file command to write ''Hello World'' into a file named
''hello_world.txt''.\"\n },\n \"command\": {\n \"name\": \"write_to_file\",\n \"args\":
{\n \"filename\": \"hello_world.txt\",\n \"text\": \"Hello
World\"\n }\n }\n}"}, {"role": "system", "content": "Command write_to_file
returned: File written to successfully."}, {"role": "user", "content": "Determine
which next command to use, and respond using the format specified above:"}],
"temperature": 0, "max_tokens": 0}'
headers:
AGENT-MODE:
- AAAAAAAAAAAAAAAAAAAAAMLheAAaAAAA0%2BuSeid%2BULvsea4JtiGRiSDSJSI%3DEUifiRBkKG5E2XzMDjRfl76ZC9Ub0wnz4XsNiRVBChTYbJcE3F
AGENT-TYPE:
- Auto-GPT-2023-X-TYPE
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '4608'
Content-Type:
- application/json
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA7RSy27bQAy89ysIXnyRDTuGnVhfUCOHFoiLBqgKY7Oipa1XXHWXip0a+vdCLyhN
XwjQ3hYccjjDnQuaFGPUuRJdlHZ6fbtxUu5MOHx72vo7qdanbHl9f3/1jm/fY4Tu4Qtp6Sdm2hWl
JTGOMULtSQmlGC/WN6v58ma9XkZYuJQsxpiVMl3OVlOp/IObzpfzBUZYBZURxhcsvStK2Ys7EgeM
F/PFJsKRfAQ26wjFibJj6WqxqiPUuTOaAsafLlhQGIi9s4QxqhBMEMXSyHQsxI2FS8IAAAlK7qos
l5BgDH2xB+gsTTHBLeTqkSBUWlMIh8raJzh5I0IMk7dkrYOPztt0AobFgYKDsQSsCkphkjf4/tTg
MznLZAa7nIDpLKBdUShOQRxUgcAEEBWO+947NfXxnVOLzhKMnqv0pIJjw1kndde3gaevlfEUoGh5
Grn0erHQysuJfyEE7gxrgj8dR1zb3myI2tc/s11axZ3jKXwIY9/I8mzLX9m0N2K0CcXw30zUThKH
yjdzSmALpXePJiVQ0F0dDs4P5Iaz39KHktRxoD4Za1vfr5bcUdbRkNy+/afgNn/ZbfuB/oUo5bOX
mX+eqP+R/MFCa6N3k3CNdYQHwybk+243xhjElRih4ZTOGM/rz/Wb7wAAAP//AwDKh13FrgQAAA==
headers:
Access-Control-Allow-Origin:
- '*'
Alt-Svc:
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
Cache-Control:
- no-cache, must-revalidate
Cf-Cache-Status:
- DYNAMIC
Cf-Ray:
- 7ccfc6e12bd2e1ef-ORD
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 25 May 2023 18:18:06 GMT
Function-Execution-Id:
- yvvlex8job2h
Openai-Model:
- gpt-3.5-turbo-0301
Openai-Organization:
- significant-gravitas
Openai-Processing-Ms:
- '22621'
Openai-Version:
- '2020-10-01'
Server:
- Google Frontend
Strict-Transport-Security:
- max-age=15724800; includeSubDomains
Vary:
- Accept-Encoding
X-Cloud-Trace-Context:
- 827e911ff253a9d12783cb62e669dc20;o=1
X-Powered-By:
- Express
X-Ratelimit-Limit-Requests:
- '3500'
X-Ratelimit-Limit-Tokens:
- '90000'
X-Ratelimit-Remaining-Requests:
- '3499'
X-Ratelimit-Remaining-Tokens:
- '86005'
X-Ratelimit-Reset-Requests:
- 17ms
X-Ratelimit-Reset-Tokens:
- 2.663s
X-Request-Id:
- 4d357db894485be6e259654a899f2e57
status:
code: 200
message: OK
- request:
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Write
a concise summary of the following text:\n\n\nLITERAL TEXT: \"\"\"Hello World\"\"\"\n\n\nCONCISE
SUMMARY: The text is best summarized as"}], "temperature": 0, "max_tokens":
0}'
headers:
AGENT-MODE:
- AAAAAAAAAAAAAAAAAAAAAMLheAAaAAAA0%2BuSeid%2BULvsea4JtiGRiSDSJSI%3DEUifiRBkKG5E2XzMDjRfl76ZC9Ub0wnz4XsNiRVBChTYbJcE3F
AGENT-TYPE:
- Auto-GPT-2023-X-TYPE
Accept:
- '*/*'
Accept-Encoding:
- gzip, deflate
Connection:
- keep-alive
Content-Length:
- '252'
Content-Type:
- application/json
method: POST
uri: https://api.openai.com/v1/chat/completions
response:
body:
string: !!binary |
H4sIAAAAAAAAA0SOQUvDQBQG7/6K8J03JSVpmu7RU7GIXooHK2WbvCarm33L7qsoIf9dClqvAzPM
BNtBox2MtGNw+Xq3CQ9DXFfPj09Oim5Xu+3+u9oO+/tPhgKf3qmVX2PR8hgciWUPhTaSEeqgl3Wz
KsqmbjYKI3fkoNEHycvFKpdLPHFelMUSCpdkeoKeECKPQY7CH+QTdFkr/KdvuFEQFuNuoKpmhXZg
21KCfp0wUvpLRnYEDZOSTWK8XAfZC/nrvMmSveazPhKJ9X3G5+yALTnH2QtH1y0OwKxwtt6m4RjJ
JPbQSMIBCtZ39AVdzG/z3Q8AAAD//wMAv00f4UIBAAA=
headers:
Access-Control-Allow-Origin:
- '*'
Alt-Svc:
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
- h3=":443"; ma=2592000,h3-29=":443"; ma=2592000
Cache-Control:
- no-cache, must-revalidate
Cf-Cache-Status:
- DYNAMIC
Cf-Ray:
- 7ccfc7839fad10e3-ORD
Content-Encoding:
- gzip
Content-Type:
- application/json
Date:
- Thu, 25 May 2023 18:18:11 GMT
Function-Execution-Id:
- pbme9het6dvt
Openai-Model:
- gpt-3.5-turbo-0301
Openai-Organization:
- significant-gravitas
Openai-Processing-Ms:
- '1227'
Openai-Version:
- '2020-10-01'
Server:
- Google Frontend
Strict-Transport-Security:
- max-age=15724800; includeSubDomains
Vary:
- Accept-Encoding
X-Cloud-Trace-Context:
- 4a9b8edee07c9658c887ffc760c711e6;o=1
X-Powered-By:
- Express
X-Ratelimit-Limit-Requests:
- '3500'
X-Ratelimit-Limit-Tokens:
- '90000'
X-Ratelimit-Remaining-Requests:
- '3499'
X-Ratelimit-Remaining-Tokens:
- '89466'
X-Ratelimit-Reset-Requests:
- 17ms
X-Ratelimit-Reset-Tokens:
- 356ms
X-Request-Id:
- fb5593e05606c34e2e9461ff82b28747
status:
code: 200
message: OK
version: 1 version: 1

View File

@@ -1,8 +1,6 @@
import pytest import pytest
from pytest_mock import MockerFixture
from autogpt.agent import Agent from autogpt.agent import Agent
from autogpt.commands.file_operations import read_file
from tests.integration.challenges.utils import run_interaction_loop from tests.integration.challenges.utils import run_interaction_loop
from tests.utils import requires_api_key from tests.utils import requires_api_key
@@ -13,11 +11,11 @@ CYCLE_COUNT = 2
@pytest.mark.vcr @pytest.mark.vcr
def test_browse_website( def test_browse_website(
browser_agent: Agent, browser_agent: Agent,
patched_api_requestor: MockerFixture, patched_api_requestor: None,
monkeypatch: pytest.MonkeyPatch, monkeypatch: pytest.MonkeyPatch,
) -> None: ) -> None:
file_path = browser_agent.workspace.get_path("browse_website.txt") file_path = browser_agent.workspace.get_path("browse_website.txt")
run_interaction_loop(monkeypatch, browser_agent, CYCLE_COUNT) run_interaction_loop(monkeypatch, browser_agent, CYCLE_COUNT)
content = read_file(file_path) content = open(file_path, encoding="utf-8").read()
assert "£25.89" in content, f"Expected £25.89, got {content}" assert "£25.89" in content, f"Expected £25.89, got {content}"

View File

@@ -1,5 +1,4 @@
import pytest import pytest
from pytest_mock import MockerFixture
from autogpt.agent import Agent from autogpt.agent import Agent
from autogpt.commands.file_operations import read_file from autogpt.commands.file_operations import read_file
@@ -13,7 +12,7 @@ CYCLE_COUNT = 3
@pytest.mark.vcr @pytest.mark.vcr
def test_write_file( def test_write_file(
writer_agent: Agent, writer_agent: Agent,
patched_api_requestor: MockerFixture, patched_api_requestor: None,
monkeypatch: pytest.MonkeyPatch, monkeypatch: pytest.MonkeyPatch,
) -> None: ) -> None:
file_path = str(writer_agent.workspace.get_path("hello_world.txt")) file_path = str(writer_agent.workspace.get_path("hello_world.txt"))

View File

@@ -1,5 +1,4 @@
import pytest import pytest
from pytest_mock import MockerFixture
from autogpt.commands.file_operations import read_file, write_to_file from autogpt.commands.file_operations import read_file, write_to_file
from tests.integration.challenges.utils import run_interaction_loop, run_multiple_times from tests.integration.challenges.utils import run_interaction_loop, run_multiple_times
@@ -16,7 +15,7 @@ from autogpt.agent import Agent
def test_information_retrieval_challenge_a( def test_information_retrieval_challenge_a(
get_company_revenue_agent: Agent, get_company_revenue_agent: Agent,
monkeypatch: pytest.MonkeyPatch, monkeypatch: pytest.MonkeyPatch,
patched_api_requestor: MockerFixture, patched_api_requestor: None,
) -> None: ) -> None:
""" """
Test the challenge_a function in a given agent by mocking user inputs and checking the output file content. Test the challenge_a function in a given agent by mocking user inputs and checking the output file content.

View File

@@ -1,5 +1,4 @@
import pytest import pytest
from pytest_mock import MockerFixture
from autogpt.agent import Agent from autogpt.agent import Agent
from autogpt.commands.file_operations import read_file, write_to_file from autogpt.commands.file_operations import read_file, write_to_file
@@ -15,7 +14,7 @@ MAX_LEVEL = 3
def test_memory_challenge_a( def test_memory_challenge_a(
memory_management_agent: Agent, memory_management_agent: Agent,
user_selected_level: int, user_selected_level: int,
patched_api_requestor: MockerFixture, patched_api_requestor: None,
monkeypatch: pytest.MonkeyPatch, monkeypatch: pytest.MonkeyPatch,
) -> None: ) -> None:
""" """

View File

@@ -1,5 +1,4 @@
import pytest import pytest
from pytest_mock import MockerFixture
from autogpt.agent import Agent from autogpt.agent import Agent
from autogpt.commands.file_operations import read_file, write_to_file from autogpt.commands.file_operations import read_file, write_to_file
@@ -20,7 +19,7 @@ NOISE = 1000
def test_memory_challenge_b( def test_memory_challenge_b(
memory_management_agent: Agent, memory_management_agent: Agent,
user_selected_level: int, user_selected_level: int,
patched_api_requestor: MockerFixture, patched_api_requestor: None,
monkeypatch: pytest.MonkeyPatch, monkeypatch: pytest.MonkeyPatch,
) -> None: ) -> None:
""" """

View File

@@ -1,6 +1,4 @@
import pytest import pytest
from _pytest.monkeypatch import MonkeyPatch
from pytest_mock import MockerFixture
from autogpt.agent import Agent from autogpt.agent import Agent
from autogpt.commands.file_operations import read_file, write_to_file from autogpt.commands.file_operations import read_file, write_to_file
@@ -21,7 +19,7 @@ NOISE = 1000
def test_memory_challenge_c( def test_memory_challenge_c(
memory_management_agent: Agent, memory_management_agent: Agent,
user_selected_level: int, user_selected_level: int,
patched_api_requestor: MockerFixture, patched_api_requestor: None,
monkeypatch: pytest.MonkeyPatch, monkeypatch: pytest.MonkeyPatch,
) -> None: ) -> None:
""" """

View File

@@ -1,7 +1,8 @@
import os import os
import openai import openai.api_requestor
import pytest import pytest
from pytest_mock import MockerFixture
from tests.conftest import PROXY from tests.conftest import PROXY
from tests.vcr.vcr_filter import before_record_request, before_record_response from tests.vcr.vcr_filter import before_record_request, before_record_response
@@ -30,7 +31,7 @@ def patch_api_base(requestor):
@pytest.fixture @pytest.fixture
def patched_api_requestor(mocker): def patched_api_requestor(mocker: MockerFixture):
original_init = openai.api_requestor.APIRequestor.__init__ original_init = openai.api_requestor.APIRequestor.__init__
original_validate_headers = openai.api_requestor.APIRequestor._validate_headers original_validate_headers = openai.api_requestor.APIRequestor._validate_headers
@@ -51,5 +52,3 @@ def patched_api_requestor(mocker):
"_validate_headers", "_validate_headers",
new=patched_validate_headers, new=patched_validate_headers,
) )
return mocker

View File

View File

@@ -0,0 +1,18 @@
import numpy
import pytest
from autogpt.memory.vector.memory_item import MemoryItem
from autogpt.memory.vector.utils import Embedding
@pytest.fixture
def memory_item(mock_embedding: Embedding):
return MemoryItem(
raw_content="test content",
summary="test content summary",
chunks=["test content"],
chunk_summaries=["test content summary"],
e_summary=mock_embedding,
e_chunks=[mock_embedding],
metadata={},
)

View File

@@ -0,0 +1,106 @@
# sourcery skip: snake-case-functions
"""Tests for JSONFileMemory class"""
import orjson
import pytest
from autogpt.config import Config
from autogpt.memory.vector import JSONFileMemory, MemoryItem
from autogpt.workspace import Workspace
from tests.utils import requires_api_key
@pytest.fixture(autouse=True)
def cleanup_sut_singleton():
if JSONFileMemory in JSONFileMemory._instances:
del JSONFileMemory._instances[JSONFileMemory]
def test_json_memory_init_without_backing_file(config: Config, workspace: Workspace):
index_file = workspace.root / f"{config.memory_index}.json"
assert not index_file.exists()
JSONFileMemory(config)
assert index_file.exists()
assert index_file.read_text() == "[]"
def test_json_memory_init_with_backing_empty_file(config: Config, workspace: Workspace):
index_file = workspace.root / f"{config.memory_index}.json"
index_file.touch()
assert index_file.exists()
JSONFileMemory(config)
assert index_file.exists()
assert index_file.read_text() == "[]"
def test_json_memory_init_with_backing_file(config: Config, workspace: Workspace):
index_file = workspace.root / f"{config.memory_index}.json"
index_file.touch()
raw_data = {"texts": ["test"]}
data = orjson.dumps(raw_data, option=JSONFileMemory.SAVE_OPTIONS)
with index_file.open("wb") as f:
f.write(data)
assert index_file.exists()
JSONFileMemory(config)
assert index_file.exists()
assert index_file.read_text() == "[]"
def test_json_memory_add(config: Config, memory_item: MemoryItem):
index = JSONFileMemory(config)
index.add(memory_item)
assert index.memories[0] == memory_item
def test_json_memory_clear(config: Config, memory_item: MemoryItem):
index = JSONFileMemory(config)
assert index.memories == []
index.add(memory_item)
assert index.memories[0] == memory_item, "Cannot test clear() because add() fails"
index.clear()
assert index.memories == []
def test_json_memory_get(config: Config, memory_item: MemoryItem, mock_get_embedding):
index = JSONFileMemory(config)
assert (
index.get("test") == None
), "Cannot test get() because initial index is not empty"
index.add(memory_item)
retrieved = index.get("test")
assert retrieved is not None
assert retrieved.memory_item == memory_item
@pytest.mark.vcr
@requires_api_key("OPENAI_API_KEY")
def test_json_memory_get_relevant(config: Config, patched_api_requestor: None) -> None:
index = JSONFileMemory(config)
mem1 = MemoryItem.from_text_file("Sample text", "sample.txt")
mem2 = MemoryItem.from_text_file("Grocery list:\n- Pancake mix", "groceries.txt")
mem3 = MemoryItem.from_text_file("What is your favorite color?", "color.txt")
lipsum = "Lorem ipsum dolor sit amet"
mem4 = MemoryItem.from_text_file(" ".join([lipsum] * 100), "lipsum.txt")
index.add(mem1)
index.add(mem2)
index.add(mem3)
index.add(mem4)
assert index.get_relevant(mem1.raw_content, 1)[0].memory_item == mem1
assert index.get_relevant(mem2.raw_content, 1)[0].memory_item == mem2
assert index.get_relevant(mem3.raw_content, 1)[0].memory_item == mem3
assert [mr.memory_item for mr in index.get_relevant(lipsum, 2)] == [mem4, mem1]
def test_json_memory_get_stats(config: Config, memory_item: MemoryItem) -> None:
index = JSONFileMemory(config)
index.add(memory_item)
n_memories, n_chunks = index.get_stats()
assert n_memories == 1
assert n_chunks == 1

View File

@@ -0,0 +1,44 @@
import numpy
import pytest
from pytest_mock import MockerFixture
import autogpt.memory.vector.memory_item as vector_memory_item
import autogpt.memory.vector.providers.base as memory_provider_base
from autogpt.config.config import Config
from autogpt.llm.providers.openai import OPEN_AI_EMBEDDING_MODELS
from autogpt.memory.vector import get_memory
from autogpt.memory.vector.utils import Embedding
@pytest.fixture
def embedding_dimension(config: Config):
return OPEN_AI_EMBEDDING_MODELS[config.embedding_model].embedding_dimensions
@pytest.fixture
def mock_embedding(embedding_dimension: int) -> Embedding:
return numpy.full((1, embedding_dimension), 0.0255, numpy.float32)[0]
@pytest.fixture
def mock_get_embedding(mocker: MockerFixture, embedding_dimension: int):
mocker.patch.object(
vector_memory_item,
"get_embedding",
return_value=[0.0255] * embedding_dimension,
)
mocker.patch.object(
memory_provider_base,
"get_embedding",
return_value=[0.0255] * embedding_dimension,
)
@pytest.fixture
def memory_none(agent_test_config: Config, mock_get_embedding):
was_memory_backend = agent_test_config.memory_backend
agent_test_config.set_memory_backend("no_memory")
yield get_memory(agent_test_config)
agent_test_config.set_memory_backend(was_memory_backend)

View File

@@ -1,50 +0,0 @@
import random
import string
import sys
import unittest
from pathlib import Path
from autogpt.config import Config
from autogpt.memory.local import LocalCache
class TestLocalCache(unittest.TestCase):
def generate_random_string(self, length):
return "".join(random.choice(string.ascii_letters) for _ in range(length))
def setUp(self):
"""Set up the test environment for the LocalCache tests."""
cfg = cfg = Config()
self.cache = LocalCache(cfg)
self.cache.clear()
# Add example texts to the cache
self.example_texts = [
"The quick brown fox jumps over the lazy dog",
"I love machine learning and natural language processing",
"The cake is a lie, but the pie is always true",
"ChatGPT is an advanced AI model for conversation",
]
for text in self.example_texts:
self.cache.add(text)
# Add some random strings to test noise
for _ in range(5):
self.cache.add(self.generate_random_string(10))
def test_get_relevant(self):
"""Test getting relevant texts from the cache."""
query = "I'm interested in artificial intelligence and NLP"
k = 3
relevant_texts = self.cache.get_relevant(query, k)
print(f"Top {k} relevant texts for the query '{query}':")
for i, text in enumerate(relevant_texts, start=1):
print(f"{i}. {text}")
self.assertEqual(len(relevant_texts), k)
self.assertIn(self.example_texts[1], relevant_texts)
if __name__ == "__main__":
unittest.main()

View File

@@ -1,55 +0,0 @@
# sourcery skip: snake-case-functions
"""Tests for the MilvusMemory class."""
import random
import string
import unittest
from autogpt.config import Config
from autogpt.memory.milvus import MilvusMemory
try:
class TestMilvusMemory(unittest.TestCase):
"""Unit tests for the MilvusMemory class."""
def generate_random_string(self, length: int) -> str:
return "".join(random.choice(string.ascii_letters) for _ in range(length))
def setUp(self) -> None:
cfg = Config()
cfg.milvus_addr = "localhost:19530"
self.memory = MilvusMemory(cfg)
self.memory.clear()
# Add example texts to the cache
self.example_texts = [
"The quick brown fox jumps over the lazy dog",
"I love machine learning and natural language processing",
"The cake is a lie, but the pie is always true",
"ChatGPT is an advanced AI model for conversation",
]
for text in self.example_texts:
self.memory.add(text)
# Add some random strings to test noise
for _ in range(5):
self.memory.add(self.generate_random_string(10))
def test_get_relevant(self) -> None:
"""Test getting relevant texts from the cache."""
query = "I'm interested in artificial intelligence and NLP"
num_relevant = 3
relevant_texts = self.memory.get_relevant(query, num_relevant)
print(f"Top {k} relevant texts for the query '{query}':")
for i, text in enumerate(relevant_texts, start=1):
print(f"{i}. {text}")
self.assertEqual(len(relevant_texts), k)
self.assertIn(self.example_texts[1], relevant_texts)
except:
print(
"Skipping tests/integration/milvus_memory_tests.py as Milvus is not installed."
)

View File

@@ -1,65 +0,0 @@
import string
from unittest.mock import MagicMock
import pytest
from numpy.random import RandomState
from pytest_mock import MockerFixture
from autogpt.config import Config
from autogpt.llm import llm_utils
from autogpt.llm.api_manager import ApiManager
from autogpt.llm.modelsinfo import COSTS
from tests.utils import requires_api_key
@pytest.fixture(scope="session")
def random_large_string():
"""Big string used to overwhelm token limits."""
seed = 42
n_characters = 30_000
random = RandomState(seed)
return "".join(random.choice(list(string.ascii_lowercase), size=n_characters))
@pytest.fixture()
def api_manager(mocker: MockerFixture):
api_manager = ApiManager()
mocker.patch.multiple(
api_manager,
total_prompt_tokens=0,
total_completion_tokens=0,
total_cost=0,
)
yield api_manager
@pytest.fixture()
def spy_create_embedding(mocker: MockerFixture):
return mocker.spy(llm_utils, "create_embedding")
@pytest.mark.vcr
@requires_api_key("OPENAI_API_KEY")
def test_get_ada_embedding(
config: Config,
api_manager: ApiManager,
spy_create_embedding: MagicMock,
patched_api_requestor,
):
token_cost = COSTS[config.embedding_model]["prompt"]
llm_utils.get_ada_embedding("test")
spy_create_embedding.assert_called_once_with("test", model=config.embedding_model)
assert (prompt_tokens := api_manager.get_total_prompt_tokens()) == 1
assert api_manager.get_total_completion_tokens() == 0
assert api_manager.get_total_cost() == (prompt_tokens * token_cost) / 1000
@pytest.mark.vcr
@requires_api_key("OPENAI_API_KEY")
def test_get_ada_embedding_large_context(random_large_string):
# This test should be able to mock the openai call after we have a fix. We don't need
# to hit the API to test the logic of the function (so not using vcr). This is a quick
# regression test to document the issue.
llm_utils.get_ada_embedding(random_large_string)

View File

@@ -1,110 +0,0 @@
# sourcery skip: snake-case-functions
"""Tests for LocalCache class"""
import unittest
import orjson
import pytest
from autogpt.memory.local import EMBED_DIM, SAVE_OPTIONS
from autogpt.memory.local import LocalCache as LocalCache_
from tests.utils import requires_api_key
@pytest.fixture
def LocalCache():
# Hack, real gross. Singletons are not good times.
if LocalCache_ in LocalCache_._instances:
del LocalCache_._instances[LocalCache_]
return LocalCache_
@pytest.fixture
def mock_embed_with_ada(mocker):
mocker.patch(
"autogpt.memory.local.get_ada_embedding",
return_value=[0.1] * EMBED_DIM,
)
def test_init_without_backing_file(LocalCache, config, workspace):
cache_file = workspace.root / f"{config.memory_index}.json"
assert not cache_file.exists()
LocalCache(config)
assert cache_file.exists()
assert cache_file.read_text() == "{}"
def test_init_with_backing_empty_file(LocalCache, config, workspace):
cache_file = workspace.root / f"{config.memory_index}.json"
cache_file.touch()
assert cache_file.exists()
LocalCache(config)
assert cache_file.exists()
assert cache_file.read_text() == "{}"
def test_init_with_backing_file(LocalCache, config, workspace):
cache_file = workspace.root / f"{config.memory_index}.json"
cache_file.touch()
raw_data = {"texts": ["test"]}
data = orjson.dumps(raw_data, option=SAVE_OPTIONS)
with cache_file.open("wb") as f:
f.write(data)
assert cache_file.exists()
LocalCache(config)
assert cache_file.exists()
assert cache_file.read_text() == "{}"
def test_add(LocalCache, config, mock_embed_with_ada):
cache = LocalCache(config)
cache.add("test")
assert cache.data.texts == ["test"]
assert cache.data.embeddings.shape == (1, EMBED_DIM)
def test_clear(LocalCache, config, mock_embed_with_ada):
cache = LocalCache(config)
assert cache.data.texts == []
assert cache.data.embeddings.shape == (0, EMBED_DIM)
cache.add("test")
assert cache.data.texts == ["test"]
assert cache.data.embeddings.shape == (1, EMBED_DIM)
cache.clear()
assert cache.data.texts == []
assert cache.data.embeddings.shape == (0, EMBED_DIM)
def test_get(LocalCache, config, mock_embed_with_ada):
cache = LocalCache(config)
assert cache.get("test") == []
cache.add("test")
assert cache.get("test") == ["test"]
@pytest.mark.vcr
@requires_api_key("OPENAI_API_KEY")
def test_get_relevant(LocalCache, config, patched_api_requestor) -> None:
cache = LocalCache(config)
text1 = "Sample text 1"
text2 = "Sample text 2"
cache.add(text1)
cache.add(text2)
result = cache.get_relevant(text1, 1)
assert result == [text1]
def test_get_stats(LocalCache, config, mock_embed_with_ada) -> None:
cache = LocalCache(config)
text = "Sample text"
cache.add(text)
stats = cache.get_stats()
assert stats == (1, cache.data.embeddings.shape)

View File

@@ -1,67 +0,0 @@
import json
import pytest
from autogpt.config import Config
from autogpt.memory import get_memory
from autogpt.memory_management.store_memory import (
save_memory_trimmed_from_context_window,
)
from tests.utils import requires_api_key
@pytest.fixture
def message_history_fixture():
assistant_reply = {
"thoughts": {
"text": "thoughts",
"reasoning": "reasoning",
"plan": "plan",
"criticism": "criticism",
"speak": "speak",
},
"command": {"name": "google", "args": {"query": "google_query"}},
}
return [
{"content": json.dumps(assistant_reply, indent=4)},
{"content": "Command Result: Important Information."},
]
@pytest.fixture
def expected_permanent_memory() -> str:
return """Assistant Reply: {
"thoughts": {
"text": "thoughts",
"reasoning": "reasoning",
"plan": "plan",
"criticism": "criticism",
"speak": "speak"
},
"command": {
"name": "google",
"args": {
"query": "google_query"
}
}
}
Result: None
Human Feedback:Command Result: Important Information."""
@requires_api_key("OPENAI_API_KEY")
@pytest.mark.vcr
def test_save_memory_trimmed_from_context_window(
message_history_fixture,
expected_permanent_memory,
config: Config,
patched_api_requestor,
):
next_message_to_add_index = len(message_history_fixture) - 1
memory = get_memory(config, init=True)
save_memory_trimmed_from_context_window(
message_history_fixture, next_message_to_add_index, memory
)
memory_found = memory.get_relevant("Important Information", 5)
assert memory_found[0] == expected_permanent_memory

View File

@@ -0,0 +1,71 @@
import pytest
from autogpt.config import Config
from autogpt.plugins import scan_plugins
PLUGINS_TEST_DIR = "tests/unit/data/test_plugins"
PLUGIN_TEST_OPENAI = "https://weathergpt.vercel.app/"
@pytest.fixture
def mock_config_denylist_allowlist_check():
class MockConfig:
"""Mock config object for testing the denylist_allowlist_check function"""
plugins_denylist = ["BadPlugin"]
plugins_allowlist = ["GoodPlugin"]
authorise_key = "y"
exit_key = "n"
return MockConfig()
@pytest.fixture
def config_with_plugins():
"""Mock config object for testing the scan_plugins function"""
# Test that the function returns the correct number of plugins
cfg = Config()
cfg.plugins_dir = PLUGINS_TEST_DIR
cfg.plugins_openai = ["https://weathergpt.vercel.app/"]
return cfg
@pytest.fixture
def mock_config_openai_plugin():
"""Mock config object for testing the scan_plugins function"""
class MockConfig:
"""Mock config object for testing the scan_plugins function"""
plugins_dir = PLUGINS_TEST_DIR
plugins_openai = [PLUGIN_TEST_OPENAI]
plugins_denylist = ["AutoGPTPVicuna"]
plugins_allowlist = [PLUGIN_TEST_OPENAI]
return MockConfig()
def test_scan_plugins_openai(mock_config_openai_plugin):
# Test that the function returns the correct number of plugins
result = scan_plugins(mock_config_openai_plugin, debug=True)
assert len(result) == 1
@pytest.fixture
def mock_config_generic_plugin():
"""Mock config object for testing the scan_plugins function"""
# Test that the function returns the correct number of plugins
class MockConfig:
plugins_dir = PLUGINS_TEST_DIR
plugins_openai = []
plugins_denylist = []
plugins_allowlist = ["AutoGPTPVicuna"]
return MockConfig()
def test_scan_plugins_generic(mock_config_generic_plugin):
# Test that the function returns the correct number of plugins
result = scan_plugins(mock_config_generic_plugin, debug=True)
assert len(result) == 1

View File

@@ -1,120 +0,0 @@
import unittest
from uuid import uuid4
from weaviate import Client
from weaviate.util import get_valid_uuid
from autogpt.config import Config
from autogpt.llm import get_ada_embedding
from autogpt.memory.weaviate import WeaviateMemory
class TestWeaviateMemory(unittest.TestCase):
cfg = None
client = None
index = None
@classmethod
def setUpClass(cls):
"""Set up the test environment for the WeaviateMemory tests."""
# only create the connection to weaviate once
cls.cfg = Config()
if cls.cfg.use_weaviate_embedded:
from weaviate.embedded import EmbeddedOptions
cls.client = Client(
embedded_options=EmbeddedOptions(
hostname=cls.cfg.weaviate_host,
port=int(cls.cfg.weaviate_port),
persistence_data_path=cls.cfg.weaviate_embedded_path,
)
)
else:
cls.client = Client(
f"{cls.cfg.weaviate_protocol}://{cls.cfg.weaviate_host}:{self.cfg.weaviate_port}"
)
cls.index = WeaviateMemory.format_classname(cls.cfg.memory_index)
"""
In order to run these tests you will need a local instance of
Weaviate running. Refer to https://weaviate.io/developers/weaviate/installation/docker-compose
for creating local instances using docker.
Alternatively in your .env file set the following environmental variables to run Weaviate embedded (see: https://weaviate.io/developers/weaviate/installation/embedded):
USE_WEAVIATE_EMBEDDED=True
WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate"
"""
def setUp(self):
"""Set up the test environment for the WeaviateMemory tests."""
try:
self.client.schema.delete_class(self.index)
except:
pass
self.memory = WeaviateMemory(self.cfg)
def test_add(self):
"""Test adding a text to the cache"""
doc = "You are a Titan name Thanos and you are looking for the Infinity Stones"
self.memory.add(doc)
result = self.client.query.get(self.index, ["raw_text"]).do()
actual = result["data"]["Get"][self.index]
self.assertEqual(len(actual), 1)
self.assertEqual(actual[0]["raw_text"], doc)
def test_get(self):
"""Test getting a text from the cache"""
doc = "You are an Avenger and swore to defend the Galaxy from a menace called Thanos"
# add the document to the cache
with self.client.batch as batch:
batch.add_data_object(
uuid=get_valid_uuid(uuid4()),
data_object={"raw_text": doc},
class_name=self.index,
vector=get_ada_embedding(doc),
)
batch.flush()
actual = self.memory.get(doc)
self.assertEqual(len(actual), 1)
self.assertEqual(actual[0], doc)
def test_get_stats(self):
"""Test getting the stats of the cache"""
docs = [
"You are now about to count the number of docs in this index",
"And then you about to find out if you can count correctly",
]
[self.memory.add(doc) for doc in docs]
stats = self.memory.get_stats()
self.assertTrue(stats)
self.assertTrue("count" in stats)
self.assertEqual(stats["count"], 2)
def test_clear(self):
"""Test clearing the cache"""
docs = [
"Shame this is the last test for this class",
"Testing is fun when someone else is doing it",
]
[self.memory.add(doc) for doc in docs]
self.assertEqual(self.memory.get_stats()["count"], 2)
self.memory.clear()
self.assertEqual(self.memory.get_stats()["count"], 0)
if __name__ == "__main__":
unittest.main()

View File

@@ -1,72 +0,0 @@
# sourcery skip: snake-case-functions
"""Tests for the MilvusMemory class."""
import os
import sys
import unittest
try:
from autogpt.config import Config
from autogpt.memory.milvus import MilvusMemory
def mock_config() -> Config:
"""Mock the config object for testing purposes."""
# Return a mock config object with the required attributes
class MockConfig(Config):
debug_mode = False
continuous_mode = False
speak_mode = False
milvus_collection = "autogpt"
milvus_addr = "localhost:19530"
return MockConfig()
class TestMilvusMemory(unittest.TestCase):
"""Tests for the MilvusMemory class."""
def setUp(self) -> None:
"""Set up the test environment"""
self.cfg = mock_config()
self.memory = MilvusMemory(self.cfg)
def test_add(self) -> None:
"""Test adding a text to the cache"""
text = "Sample text"
self.memory.clear()
self.memory.add(text)
result = self.memory.get(text)
self.assertEqual([text], result)
def test_clear(self) -> None:
"""Test clearing the cache"""
self.memory.clear()
self.assertEqual(self.memory.collection.num_entities, 0)
def test_get(self) -> None:
"""Test getting a text from the cache"""
text = "Sample text"
self.memory.clear()
self.memory.add(text)
result = self.memory.get(text)
self.assertEqual(result, [text])
def test_get_relevant(self) -> None:
"""Test getting relevant texts from the cache"""
text1 = "Sample text 1"
text2 = "Sample text 2"
self.memory.clear()
self.memory.add(text1)
self.memory.add(text2)
result = self.memory.get_relevant(text1, 1)
self.assertEqual(result, [text1])
def test_get_stats(self) -> None:
"""Test getting the cache stats"""
text = "Sample text"
self.memory.clear()
self.memory.add(text)
stats = self.memory.get_stats()
self.assertEqual(15, len(stats))
except ImportError as err:
print(f"Skipping tests for MilvusMemory: {err}")

View File

@@ -3,17 +3,16 @@ from unittest.mock import MagicMock
import pytest import pytest
from autogpt.agent import Agent from autogpt.agent import Agent
from autogpt.config import Config from autogpt.config import AIConfig
@pytest.fixture @pytest.fixture
def agent(): def agent():
ai_name = "Test AI" ai_name = "Test AI"
memory = MagicMock() memory = MagicMock()
full_message_history = []
next_action_count = 0 next_action_count = 0
command_registry = MagicMock() command_registry = MagicMock()
config = Config() config = AIConfig()
system_prompt = "System prompt" system_prompt = "System prompt"
triggering_prompt = "Triggering prompt" triggering_prompt = "Triggering prompt"
workspace_directory = "workspace_directory" workspace_directory = "workspace_directory"
@@ -21,7 +20,6 @@ def agent():
agent = Agent( agent = Agent(
ai_name, ai_name,
memory, memory,
full_message_history,
next_action_count, next_action_count,
command_registry, command_registry,
config, config,
@@ -32,10 +30,10 @@ def agent():
return agent return agent
def test_agent_initialization(agent): def test_agent_initialization(agent: Agent):
assert agent.ai_name == "Test AI" assert agent.ai_name == "Test AI"
assert agent.memory == agent.memory assert agent.memory == agent.memory
assert agent.full_message_history == [] assert agent.history.messages == []
assert agent.next_action_count == 0 assert agent.next_action_count == 0
assert agent.command_registry == agent.command_registry assert agent.command_registry == agent.command_registry
assert agent.config == agent.config assert agent.config == agent.config

View File

@@ -1,15 +1,14 @@
import pytest import pytest
from autogpt.agent.agent_manager import AgentManager from autogpt.agent.agent_manager import AgentManager
from autogpt.llm import create_chat_completion from autogpt.llm.chat import create_chat_completion
@pytest.fixture @pytest.fixture
def agent_manager(): def agent_manager():
# Hack, real gross. Singletons are not good times. # Hack, real gross. Singletons are not good times.
if AgentManager in AgentManager._instances: yield AgentManager()
del AgentManager._instances[AgentManager] del AgentManager._instances[AgentManager]
return AgentManager()
@pytest.fixture @pytest.fixture
@@ -37,28 +36,28 @@ def mock_create_chat_completion(mocker):
return mock_create_chat_completion return mock_create_chat_completion
def test_create_agent(agent_manager, task, prompt, model): def test_create_agent(agent_manager: AgentManager, task, prompt, model):
key, agent_reply = agent_manager.create_agent(task, prompt, model) key, agent_reply = agent_manager.create_agent(task, prompt, model)
assert isinstance(key, int) assert isinstance(key, int)
assert isinstance(agent_reply, str) assert isinstance(agent_reply, str)
assert key in agent_manager.agents assert key in agent_manager.agents
def test_message_agent(agent_manager, task, prompt, model): def test_message_agent(agent_manager: AgentManager, task, prompt, model):
key, _ = agent_manager.create_agent(task, prompt, model) key, _ = agent_manager.create_agent(task, prompt, model)
user_message = "Please translate 'Good morning' to French." user_message = "Please translate 'Good morning' to French."
agent_reply = agent_manager.message_agent(key, user_message) agent_reply = agent_manager.message_agent(key, user_message)
assert isinstance(agent_reply, str) assert isinstance(agent_reply, str)
def test_list_agents(agent_manager, task, prompt, model): def test_list_agents(agent_manager: AgentManager, task, prompt, model):
key, _ = agent_manager.create_agent(task, prompt, model) key, _ = agent_manager.create_agent(task, prompt, model)
agents_list = agent_manager.list_agents() agents_list = agent_manager.list_agents()
assert isinstance(agents_list, list) assert isinstance(agents_list, list)
assert (key, task) in agents_list assert (key, task) in agents_list
def test_delete_agent(agent_manager, task, prompt, model): def test_delete_agent(agent_manager: AgentManager, task, prompt, model):
key, _ = agent_manager.create_agent(task, prompt, model) key, _ = agent_manager.create_agent(task, prompt, model)
success = agent_manager.delete_agent(key) success = agent_manager.delete_agent(key)
assert success assert success

View File

@@ -2,7 +2,7 @@ from unittest.mock import MagicMock, patch
import pytest import pytest
from autogpt.llm import COSTS, ApiManager from autogpt.llm.api_manager import COSTS, ApiManager
api_manager = ApiManager() api_manager = ApiManager()

View File

@@ -1,78 +0,0 @@
# Generated by CodiumAI
import time
from unittest.mock import patch
from autogpt.llm import create_chat_message, generate_context
def test_happy_path_role_content():
"""Test that the function returns a dictionary with the correct keys and values when valid strings are provided for role and content."""
result = create_chat_message("system", "Hello, world!")
assert result == {"role": "system", "content": "Hello, world!"}
def test_empty_role_content():
"""Test that the function returns a dictionary with the correct keys and values when empty strings are provided for role and content."""
result = create_chat_message("", "")
assert result == {"role": "", "content": ""}
def test_generate_context_empty_inputs(mocker):
"""Test the behavior of the generate_context function when all input parameters are empty."""
# Mock the time.strftime function to return a fixed value
mocker.patch("time.strftime", return_value="Sat Apr 15 00:00:00 2023")
# Arrange
prompt = ""
relevant_memory = ""
full_message_history = []
model = "gpt-3.5-turbo-0301"
# Act
result = generate_context(prompt, relevant_memory, full_message_history, model)
# Assert
expected_result = (
-1,
32,
2,
[
{"role": "system", "content": ""},
{
"role": "system",
"content": f"The current time and date is {time.strftime('%c')}",
},
],
)
assert result == expected_result
def test_generate_context_valid_inputs():
"""Test that the function successfully generates a current_context given valid inputs."""
# Given
prompt = "What is your favorite color?"
relevant_memory = "You once painted your room blue."
full_message_history = [
create_chat_message("user", "Hi there!"),
create_chat_message("assistant", "Hello! How can I assist you today?"),
create_chat_message("user", "Can you tell me a joke?"),
create_chat_message(
"assistant",
"Why did the tomato turn red? Because it saw the salad dressing!",
),
create_chat_message("user", "Haha, that's funny."),
]
model = "gpt-3.5-turbo-0301"
# When
result = generate_context(prompt, relevant_memory, full_message_history, model)
# Then
assert isinstance(result[0], int)
assert isinstance(result[1], int)
assert isinstance(result[2], int)
assert isinstance(result[3], list)
assert result[0] >= 0
assert result[2] >= 0
assert result[1] >= 0
assert len(result[3]) >= 2 # current_context should have at least 2 messages
assert result[1] <= 2048 # token limit for GPT-3.5-turbo-0301 is 2048 tokens

View File

@@ -13,6 +13,8 @@ from pytest_mock import MockerFixture
import autogpt.commands.file_operations as file_ops import autogpt.commands.file_operations as file_ops
from autogpt.config import Config from autogpt.config import Config
from autogpt.memory.vector.memory_item import MemoryItem
from autogpt.memory.vector.utils import Embedding
from autogpt.utils import readable_file_size from autogpt.utils import readable_file_size
from autogpt.workspace import Workspace from autogpt.workspace import Workspace
@@ -22,6 +24,23 @@ def file_content():
return "This is a test file.\n" return "This is a test file.\n"
@pytest.fixture()
def mock_MemoryItem_from_text(mocker: MockerFixture, mock_embedding: Embedding):
mocker.patch.object(
file_ops.MemoryItem,
"from_text",
new=lambda content, source_type, metadata: MemoryItem(
raw_content=content,
summary=f"Summary of content '{content}'",
chunk_summaries=[f"Summary of content '{content}'"],
chunks=[content],
e_summary=mock_embedding,
e_chunks=[mock_embedding],
metadata=metadata | {"source_type": source_type},
),
)
@pytest.fixture() @pytest.fixture()
def test_file_path(config, workspace: Workspace): def test_file_path(config, workspace: Workspace):
return workspace.get_path("test_file.txt") return workspace.get_path("test_file.txt")
@@ -188,7 +207,11 @@ def test_split_file(max_length, overlap, content, expected):
) )
def test_read_file(test_file_with_content_path: Path, file_content): def test_read_file(
mock_MemoryItem_from_text,
test_file_with_content_path: Path,
file_content,
):
content = file_ops.read_file(test_file_with_content_path) content = file_ops.read_file(test_file_with_content_path)
assert content == file_content assert content == file_content

View File

@@ -2,7 +2,7 @@ from datetime import datetime
from autogpt.agent.agent import Agent from autogpt.agent.agent import Agent
from autogpt.config import AIConfig from autogpt.config import AIConfig
from autogpt.llm import create_chat_completion from autogpt.llm.chat import create_chat_completion
from autogpt.log_cycle.log_cycle import LogCycleHandler from autogpt.log_cycle.log_cycle import LogCycleHandler

View File

@@ -1,11 +1,9 @@
from unittest.mock import patch from unittest.mock import patch
import pytest import pytest
from openai import InvalidRequestError
from openai.error import APIError, RateLimitError from openai.error import APIError, RateLimitError
from autogpt.llm import llm_utils from autogpt.llm import utils as llm_utils
from autogpt.llm.llm_utils import check_model
@pytest.fixture(params=[RateLimitError, APIError]) @pytest.fixture(params=[RateLimitError, APIError])
@@ -107,36 +105,6 @@ def test_retry_openapi_other_api_error(capsys):
assert output.out == "" assert output.out == ""
def test_chunked_tokens():
text = "Auto-GPT is an experimental open-source application showcasing the capabilities of the GPT-4 language model"
expected_output = [
(
13556,
12279,
2898,
374,
459,
22772,
1825,
31874,
3851,
67908,
279,
17357,
315,
279,
480,
2898,
12,
19,
4221,
1646,
)
]
output = list(llm_utils.chunked_tokens(text, "cl100k_base", 8191))
assert output == expected_output
def test_check_model(api_manager): def test_check_model(api_manager):
""" """
Test if check_model() returns original model when valid. Test if check_model() returns original model when valid.
@@ -145,7 +113,7 @@ def test_check_model(api_manager):
with patch("openai.Model.list") as mock_list_models: with patch("openai.Model.list") as mock_list_models:
# Test when correct model is returned # Test when correct model is returned
mock_list_models.return_value = {"data": [{"id": "gpt-4"}]} mock_list_models.return_value = {"data": [{"id": "gpt-4"}]}
result = check_model("gpt-4", "smart_llm_model") result = llm_utils.check_model("gpt-4", "smart_llm_model")
assert result == "gpt-4" assert result == "gpt-4"
# Reset api manager models # Reset api manager models
@@ -153,7 +121,7 @@ def test_check_model(api_manager):
# Test when incorrect model is returned # Test when incorrect model is returned
mock_list_models.return_value = {"data": [{"id": "gpt-3.5-turbo"}]} mock_list_models.return_value = {"data": [{"id": "gpt-3.5-turbo"}]}
result = check_model("gpt-4", "fast_llm_model") result = llm_utils.check_model("gpt-4", "fast_llm_model")
assert result == "gpt-3.5-turbo" assert result == "gpt-3.5-turbo"
# Reset api manager models # Reset api manager models

View File

@@ -1,16 +1,11 @@
import pytest import pytest
from autogpt.config import Config from autogpt.config import Config
from autogpt.plugins import ( from autogpt.plugins import denylist_allowlist_check, inspect_zip_for_modules
denylist_allowlist_check,
inspect_zip_for_modules,
scan_plugins,
)
PLUGINS_TEST_DIR = "tests/unit/data/test_plugins" PLUGINS_TEST_DIR = "tests/unit/data/test_plugins"
PLUGIN_TEST_ZIP_FILE = "Auto-GPT-Plugin-Test-master.zip" PLUGIN_TEST_ZIP_FILE = "Auto-GPT-Plugin-Test-master.zip"
PLUGIN_TEST_INIT_PY = "Auto-GPT-Plugin-Test-master/src/auto_gpt_vicuna/__init__.py" PLUGIN_TEST_INIT_PY = "Auto-GPT-Plugin-Test-master/src/auto_gpt_vicuna/__init__.py"
PLUGIN_TEST_OPENAI = "https://weathergpt.vercel.app/"
def test_inspect_zip_for_modules(): def test_inspect_zip_for_modules():
@@ -77,54 +72,3 @@ def test_denylist_allowlist_check_user_input_invalid(
assert not denylist_allowlist_check( assert not denylist_allowlist_check(
"UnknownPlugin", mock_config_denylist_allowlist_check "UnknownPlugin", mock_config_denylist_allowlist_check
) )
@pytest.fixture
def config_with_plugins():
"""Mock config object for testing the scan_plugins function"""
# Test that the function returns the correct number of plugins
cfg = Config()
cfg.plugins_dir = PLUGINS_TEST_DIR
cfg.plugins_openai = ["https://weathergpt.vercel.app/"]
return cfg
@pytest.fixture
def mock_config_openai_plugin():
"""Mock config object for testing the scan_plugins function"""
class MockConfig:
"""Mock config object for testing the scan_plugins function"""
plugins_dir = PLUGINS_TEST_DIR
plugins_openai = [PLUGIN_TEST_OPENAI]
plugins_denylist = ["AutoGPTPVicuna"]
plugins_allowlist = [PLUGIN_TEST_OPENAI]
return MockConfig()
def test_scan_plugins_openai(mock_config_openai_plugin):
# Test that the function returns the correct number of plugins
result = scan_plugins(mock_config_openai_plugin, debug=True)
assert len(result) == 1
@pytest.fixture
def mock_config_generic_plugin():
"""Mock config object for testing the scan_plugins function"""
# Test that the function returns the correct number of plugins
class MockConfig:
plugins_dir = PLUGINS_TEST_DIR
plugins_openai = []
plugins_denylist = []
plugins_allowlist = ["AutoGPTPVicuna"]
return MockConfig()
def test_scan_plugins_generic(mock_config_generic_plugin):
# Test that the function returns the correct number of plugins
result = scan_plugins(mock_config_generic_plugin, debug=True)
assert len(result) == 1

View File

@@ -1,20 +1,13 @@
import pytest import pytest
from autogpt.llm import count_message_tokens, count_string_tokens from autogpt.llm.base import Message
from autogpt.llm.utils import count_message_tokens, count_string_tokens
def test_count_message_tokens(): def test_count_message_tokens():
messages = [ messages = [
{"role": "user", "content": "Hello"}, Message("user", "Hello"),
{"role": "assistant", "content": "Hi there!"}, Message("assistant", "Hi there!"),
]
assert count_message_tokens(messages) == 17
def test_count_message_tokens_with_name():
messages = [
{"role": "user", "content": "Hello", "name": "John"},
{"role": "assistant", "content": "Hi there!"},
] ]
assert count_message_tokens(messages) == 17 assert count_message_tokens(messages) == 17
@@ -25,19 +18,19 @@ def test_count_message_tokens_empty_input():
def test_count_message_tokens_invalid_model(): def test_count_message_tokens_invalid_model():
"""Invalid model should raise a KeyError""" """Invalid model should raise a NotImplementedError"""
messages = [ messages = [
{"role": "user", "content": "Hello"}, Message("user", "Hello"),
{"role": "assistant", "content": "Hi there!"}, Message("assistant", "Hi there!"),
] ]
with pytest.raises(KeyError): with pytest.raises(NotImplementedError):
count_message_tokens(messages, model="invalid_model") count_message_tokens(messages, model="invalid_model")
def test_count_message_tokens_gpt_4(): def test_count_message_tokens_gpt_4():
messages = [ messages = [
{"role": "user", "content": "Hello"}, Message("user", "Hello"),
{"role": "assistant", "content": "Hi there!"}, Message("assistant", "Hi there!"),
] ]
assert count_message_tokens(messages, model="gpt-4-0314") == 15 assert count_message_tokens(messages, model="gpt-4-0314") == 15
@@ -55,16 +48,6 @@ def test_count_string_tokens_empty_input():
assert count_string_tokens("", model_name="gpt-3.5-turbo-0301") == 0 assert count_string_tokens("", model_name="gpt-3.5-turbo-0301") == 0
def test_count_message_tokens_invalid_model():
"""Invalid model should raise a NotImplementedError"""
messages = [
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there!"},
]
with pytest.raises(NotImplementedError):
count_message_tokens(messages, model="invalid_model")
def test_count_string_tokens_gpt_4(): def test_count_string_tokens_gpt_4():
"""Test that the string tokens are counted correctly.""" """Test that the string tokens are counted correctly."""