mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-23 17:04:21 +01:00
create information retrieval challenge a (#3770)
Co-authored-by: Richard Beales <rich@richbeales.net>
This commit is contained in:
@@ -149,3 +149,37 @@ def memory_management_agent(
|
|||||||
)
|
)
|
||||||
|
|
||||||
return agent
|
return agent
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def get_company_revenue_agent(
|
||||||
|
agent_test_config, memory_local_cache, workspace: Workspace
|
||||||
|
):
|
||||||
|
command_registry = CommandRegistry()
|
||||||
|
command_registry.import_commands("autogpt.commands.file_operations")
|
||||||
|
command_registry.import_commands("autogpt.app")
|
||||||
|
|
||||||
|
ai_config = AIConfig(
|
||||||
|
ai_name="Get-CompanyRevenue",
|
||||||
|
ai_role="an autonomous agent that specializes in finding the reported revenue of a company.",
|
||||||
|
ai_goals=[
|
||||||
|
"Write the revenue of Tesla in 2022 to a file. You should write the number without commas and you should not use signs like B for billion and M for million.",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
ai_config.command_registry = command_registry
|
||||||
|
|
||||||
|
system_prompt = ai_config.construct_full_prompt()
|
||||||
|
Config().set_continuous_mode(False)
|
||||||
|
agent = Agent(
|
||||||
|
ai_name="Get-CompanyRevenue",
|
||||||
|
memory=memory_local_cache,
|
||||||
|
full_message_history=[],
|
||||||
|
command_registry=command_registry,
|
||||||
|
config=ai_config,
|
||||||
|
next_action_count=0,
|
||||||
|
system_prompt=system_prompt,
|
||||||
|
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||||
|
workspace_directory=workspace.root,
|
||||||
|
)
|
||||||
|
|
||||||
|
return agent
|
||||||
|
|||||||
@@ -0,0 +1,46 @@
|
|||||||
|
import contextlib
|
||||||
|
from functools import wraps
|
||||||
|
from typing import Generator
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from autogpt.commands.file_operations import read_file, write_to_file
|
||||||
|
from tests.integration.agent_utils import run_interaction_loop
|
||||||
|
from tests.integration.challenges.utils import run_multiple_times
|
||||||
|
from tests.utils import requires_api_key
|
||||||
|
|
||||||
|
|
||||||
|
def input_generator(input_sequence: list) -> Generator[str, None, None]:
|
||||||
|
"""
|
||||||
|
Creates a generator that yields input strings from the given sequence.
|
||||||
|
|
||||||
|
:param input_sequence: A list of input strings.
|
||||||
|
:return: A generator that yields input strings.
|
||||||
|
"""
|
||||||
|
yield from input_sequence
|
||||||
|
|
||||||
|
|
||||||
|
# @pytest.skip("Nobody beat this challenge yet")
|
||||||
|
@pytest.mark.skip("This challenge hasn't been beaten yet.")
|
||||||
|
@pytest.mark.vcr
|
||||||
|
@requires_api_key("OPENAI_API_KEY")
|
||||||
|
@run_multiple_times(3)
|
||||||
|
def test_information_retrieval_challenge_a(
|
||||||
|
get_company_revenue_agent, monkeypatch
|
||||||
|
) -> None:
|
||||||
|
"""
|
||||||
|
Test the challenge_a function in a given agent by mocking user inputs and checking the output file content.
|
||||||
|
|
||||||
|
:param get_company_revenue_agent: The agent to test.
|
||||||
|
:param monkeypatch: pytest's monkeypatch utility for modifying builtins.
|
||||||
|
"""
|
||||||
|
input_sequence = ["s", "s", "s", "s", "s", "EXIT"]
|
||||||
|
gen = input_generator(input_sequence)
|
||||||
|
monkeypatch.setattr("builtins.input", lambda _: next(gen))
|
||||||
|
|
||||||
|
with contextlib.suppress(SystemExit):
|
||||||
|
run_interaction_loop(get_company_revenue_agent, None)
|
||||||
|
|
||||||
|
file_path = str(get_company_revenue_agent.workspace.get_path("output.txt"))
|
||||||
|
content = read_file(file_path)
|
||||||
|
assert "81" in content, "Expected the file to contain 81"
|
||||||
@@ -0,0 +1,779 @@
|
|||||||
|
interactions:
|
||||||
|
- request:
|
||||||
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||||
|
are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file
|
||||||
|
using the read_file method and follow the instructions in the file.\nYour decisions
|
||||||
|
must always be made independently without seeking user assistance. Play to your
|
||||||
|
strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1.
|
||||||
|
Use the command read_file to read the instructions_1.txt file\n2. Follow the
|
||||||
|
instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word
|
||||||
|
limit for short term memory. Your short term memory is short, so immediately
|
||||||
|
save important information to files.\n2. If you are unsure how you previously
|
||||||
|
did something or want to recall past events, thinking about similar events will
|
||||||
|
help you remember.\n3. No user assistance\n4. Exclusively use the commands listed
|
||||||
|
in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append
|
||||||
|
to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n2. delete_file:
|
||||||
|
Delete file, args: \"filename\": \"<filename>\"\n3. list_files: List Files in
|
||||||
|
Directory, args: \"directory\": \"<directory>\"\n4. read_file: Read file, args:
|
||||||
|
\"filename\": \"<filename>\"\n5. write_to_file: Write to file, args: \"filename\":
|
||||||
|
\"<filename>\", \"text\": \"<text>\"\n6. delete_agent: Delete GPT Agent, args:
|
||||||
|
\"key\": \"<key>\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"<url>\"\n8.
|
||||||
|
get_text_summary: Get text summary, args: \"url\": \"<url>\", \"question\":
|
||||||
|
\"<question>\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent:
|
||||||
|
Message GPT Agent, args: \"key\": \"<key>\", \"message\": \"<message>\"\n11.
|
||||||
|
start_agent: Start GPT Agent, args: \"name\": \"<name>\", \"task\": \"<short_task_desc>\",
|
||||||
|
\"prompt\": \"<prompt>\"\n12. Task Complete (Shutdown): \"task_complete\", args:
|
||||||
|
\"reason\": \"<reason>\"\n\nResources:\n1. Internet access for searches and
|
||||||
|
information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered
|
||||||
|
Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1.
|
||||||
|
Continuously review and analyze your actions to ensure you are performing to
|
||||||
|
the best of your abilities.\n2. Constructively self-criticize your big-picture
|
||||||
|
behavior constantly.\n3. Reflect on past decisions and strategies to refine
|
||||||
|
your approach.\n4. Every command has a cost, so be smart and efficient. Aim
|
||||||
|
to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou
|
||||||
|
should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\":
|
||||||
|
{\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\":
|
||||||
|
\"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\":
|
||||||
|
\"constructive self-criticism\",\n \"speak\": \"thoughts summary to say
|
||||||
|
to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\":
|
||||||
|
{\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response
|
||||||
|
can be parsed by Python json.loads"}, {"role": "system", "content": "The current
|
||||||
|
time and date is Tue Jan 1 00:00:00 2000"}, {"role": "user", "content": "Determine
|
||||||
|
which next command to use, and respond using the format specified above:"}],
|
||||||
|
"temperature": 0, "max_tokens": 0}'
|
||||||
|
headers:
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '3303'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: !!binary |
|
||||||
|
H4sIAAAAAAAAA6SSTW/aQBCG7/0VozkbhOskJL5VObSoUZVDc6ortKzH9pb1rLU7FqaI/15tDCSF
|
||||||
|
ikuv8/U+78zs0JSYo26U6Lazk/nj1/ope17Q9tnzhh+Nvfn8xX57GTR/fMIE3eoXaTl0TLVrO0ti
|
||||||
|
HGOC2pMSKjFP7+6zdJ7NH7IEW1eSxRzrTibZ9HYivV+5ySybpZhgH1RNmO+w867tZCluTRwwn2c3
|
||||||
|
Cb7NPsXTuzRBcaLsKXT/cLtPUDfOaAqY/9hhS+E41jtLmKMKwQRRLBHSsRBHA7uCAQAKlMb1dSOh
|
||||||
|
wBwOwUOCBonBAhcQGtfbEoIoL7DagidVGq5BGgLDQXyvI2pYplMZBCpjCcRBSUK+NUywaZSAGoti
|
||||||
|
RtSagGmQaYHJe1VPKjg2XI/S368IRDPKxHlNnKWjc78Fw5XzrYrlfzOMZYNAEOrCuXBnFY+aE3gJ
|
||||||
|
Y3W0uTxota3iMg6MwWvGi4In8ImV3f6mi7prRGdA2hsx2oT2eAQmegUgDr2PzUpgAaqFylnrNv+6
|
||||||
|
Byite6+E7BYiPlWV0YZY7Pbcf+hIrY9SG2Ptf1z7YtOj0D45Pt1hnRc/x6qlkeG0+jNM5evzVx0T
|
||||||
|
sfat/RLzCPEKcuApeI/7BCvDJjTL8fUwxyCuwwQNlzRgPtv/3H/4AwAA//8DAIeVULkpBAAA
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 7c1dedcf6c45cf11-SJC
|
||||||
|
Cache-Control:
|
||||||
|
- no-cache, must-revalidate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Thu, 04 May 2023 04:16:43 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
access-control-allow-origin:
|
||||||
|
- '*'
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||||
|
openai-model:
|
||||||
|
- gpt-3.5-turbo-0301
|
||||||
|
openai-organization:
|
||||||
|
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||||
|
openai-processing-ms:
|
||||||
|
- '10374'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '3500'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '90000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '3499'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '86494'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 17ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 2.336s
|
||||||
|
x-request-id:
|
||||||
|
- 29d4429410978cbc93b26725e4712f67
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your
|
||||||
|
task is to create a concise running summary of actions and information results
|
||||||
|
in the provided text, focusing on key and potentially important information
|
||||||
|
to remember.\n\nYou will receive the current summary and the your latest actions.
|
||||||
|
Combine them, adding relevant key information from the latest development in
|
||||||
|
1st person past tense and keeping the summary concise. Retain information!!!\n\nSummary
|
||||||
|
So Far:\n\"\"\"\nI was created.\n\"\"\"\n\nLatest Development:\n\"\"\"\nNothing
|
||||||
|
new happened.\n\"\"\"\n"}], "temperature": 0, "max_tokens": 0}'
|
||||||
|
headers:
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '621'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: !!binary |
|
||||||
|
H4sIAAAAAAAAA0SOS0vDQBRG9/6K4VtPSkLow9kKBTE+FoqISJlOrsloMjPk3lhtyX+XQrXbA+dw
|
||||||
|
DvA1DFxrxfWpy5ZXN83L08NYPa7v9893XI37r5/9nKv1bbWFRtx+kJOTMXOxTx2JjwEabiArVMMU
|
||||||
|
i1VZLMtVvtDoY00dDJokWTmbZzIO25jlZV5AY2TbEMwBaYh9ko3ETwoMc7nQOKfPWEOi2O4fFPl8
|
||||||
|
0nBt9I4Y5vWAnvivOcSOYGCZPYsNcjyMQSgc76/VzrI6HSsbahWitD40KtBOtZZVa1OiQPUMk8a7
|
||||||
|
D57bzUCWY4ABS0zQ8KGmb5h8epsufgEAAP//AwBly25vSQEAAA==
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 7c1dee20f901cf11-SJC
|
||||||
|
Cache-Control:
|
||||||
|
- no-cache, must-revalidate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Thu, 04 May 2023 04:16:47 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
access-control-allow-origin:
|
||||||
|
- '*'
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||||
|
openai-model:
|
||||||
|
- gpt-3.5-turbo-0301
|
||||||
|
openai-organization:
|
||||||
|
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||||
|
openai-processing-ms:
|
||||||
|
- '1136'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '3500'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '90000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '3499'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '89860'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 17ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 92ms
|
||||||
|
x-request-id:
|
||||||
|
- 43309ab9b553cd94d6e27b7fcc7914e8
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||||
|
are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file
|
||||||
|
using the read_file method and follow the instructions in the file.\nYour decisions
|
||||||
|
must always be made independently without seeking user assistance. Play to your
|
||||||
|
strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1.
|
||||||
|
Use the command read_file to read the instructions_1.txt file\n2. Follow the
|
||||||
|
instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word
|
||||||
|
limit for short term memory. Your short term memory is short, so immediately
|
||||||
|
save important information to files.\n2. If you are unsure how you previously
|
||||||
|
did something or want to recall past events, thinking about similar events will
|
||||||
|
help you remember.\n3. No user assistance\n4. Exclusively use the commands listed
|
||||||
|
in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append
|
||||||
|
to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n2. delete_file:
|
||||||
|
Delete file, args: \"filename\": \"<filename>\"\n3. list_files: List Files in
|
||||||
|
Directory, args: \"directory\": \"<directory>\"\n4. read_file: Read file, args:
|
||||||
|
\"filename\": \"<filename>\"\n5. write_to_file: Write to file, args: \"filename\":
|
||||||
|
\"<filename>\", \"text\": \"<text>\"\n6. delete_agent: Delete GPT Agent, args:
|
||||||
|
\"key\": \"<key>\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"<url>\"\n8.
|
||||||
|
get_text_summary: Get text summary, args: \"url\": \"<url>\", \"question\":
|
||||||
|
\"<question>\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent:
|
||||||
|
Message GPT Agent, args: \"key\": \"<key>\", \"message\": \"<message>\"\n11.
|
||||||
|
start_agent: Start GPT Agent, args: \"name\": \"<name>\", \"task\": \"<short_task_desc>\",
|
||||||
|
\"prompt\": \"<prompt>\"\n12. Task Complete (Shutdown): \"task_complete\", args:
|
||||||
|
\"reason\": \"<reason>\"\n\nResources:\n1. Internet access for searches and
|
||||||
|
information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered
|
||||||
|
Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1.
|
||||||
|
Continuously review and analyze your actions to ensure you are performing to
|
||||||
|
the best of your abilities.\n2. Constructively self-criticize your big-picture
|
||||||
|
behavior constantly.\n3. Reflect on past decisions and strategies to refine
|
||||||
|
your approach.\n4. Every command has a cost, so be smart and efficient. Aim
|
||||||
|
to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou
|
||||||
|
should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\":
|
||||||
|
{\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\":
|
||||||
|
\"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\":
|
||||||
|
\"constructive self-criticism\",\n \"speak\": \"thoughts summary to say
|
||||||
|
to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\":
|
||||||
|
{\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response
|
||||||
|
can be parsed by Python json.loads"}, {"role": "system", "content": "The current
|
||||||
|
time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This
|
||||||
|
reminds you of these events from your past: \nI was created and nothing new
|
||||||
|
has happened."}, {"role": "user", "content": "Determine which next command to
|
||||||
|
use, and respond using the format specified above:"}, {"role": "assistant",
|
||||||
|
"content": "{\n \"thoughts\": {\n \"text\": \"I should start by reading
|
||||||
|
the instructions_1.txt file to determine what actions to take next.\",\n \"reasoning\":
|
||||||
|
\"The instructions_1.txt file contains the necessary information to determine
|
||||||
|
the next steps.\",\n \"plan\": \"- Use the read_file command to read
|
||||||
|
the instructions_1.txt file\\n- Analyze the instructions to determine the next
|
||||||
|
steps\",\n \"criticism\": \"I need to ensure that I am following the
|
||||||
|
instructions accurately and efficiently.\",\n \"speak\": \"I will start
|
||||||
|
by reading the instructions_1.txt file to determine the next steps.\"\n },\n \"command\":
|
||||||
|
{\n \"name\": \"read_file\",\n \"args\": {\n \"filename\":
|
||||||
|
\"instructions_1.txt\"\n }\n }\n}"}, {"role": "system", "content":
|
||||||
|
"Command read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nThe
|
||||||
|
current task_id is 1111.\nnBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nRead
|
||||||
|
the file instructions_2.txt using the read_file command."}, {"role": "user",
|
||||||
|
"content": "Determine which next command to use, and respond using the format
|
||||||
|
specified above:"}], "temperature": 0, "max_tokens": 0}'
|
||||||
|
headers:
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '6519'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: !!binary |
|
||||||
|
H4sIAAAAAAAAA6SSQW/bMAyF7/sVBM9OkMR1k/q4YYdgxU4bMGweAlWmbS0yZUh0myDwfx8UJ02Q
|
||||||
|
YoehVz3ye48UD2hKzFE3SnTb2cny05f6Z/f4w358XmXL52zh11+XVff5MUsftpige/pDWk4dU+3a
|
||||||
|
zpIYx5ig9qSESszn96t0vkxXs2WCrSvJYo51J5N0mk2k909uMktnc0ywD6omzA/Yedd2shG3JQ6Y
|
||||||
|
L9KHRYIX+Kswz+4SFCfKXmqzu/shQd04oylg/uuALYUz2DtLmKMKwQRRLDGmYyGOIxwKBgAoUBrX
|
||||||
|
142EAnM4PZ4E2kl8LPBbQ2A4iO91TBQ2i6nsBCpjCSJRGQ6gwJog4KrjewDDIA1BaTxpcX4/hTWE
|
||||||
|
xvW2hD7QUYsNm7Fau7ZVXIK4ERPlf3AKTK5jelLBseH6P7JGIJOOq/J7MFw536pYHv1LEvKtYTqV
|
||||||
|
7QSCUBdujTurePScwPf3THTD1d6I0Sa0I3wNTHTEEIfeRx8lsAbVQuWsdS+G6yPvempQWvdeCdk9
|
||||||
|
xBBUVUYbYrFv9hc6Utuz1Yux9t3fM9KH5Hxhp+Y3B8aqpdH44nQTTvn69jBH4Wp5EfDqenQ+BSh4
|
||||||
|
wCHByrAJzWa8E8wxiOswQcMl7TCfDb+HD38BAAD//wMAMAMbHgkEAAA=
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 7c1dee28b98ecf11-SJC
|
||||||
|
Cache-Control:
|
||||||
|
- no-cache, must-revalidate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Thu, 04 May 2023 04:16:57 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
access-control-allow-origin:
|
||||||
|
- '*'
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||||
|
openai-model:
|
||||||
|
- gpt-3.5-turbo-0301
|
||||||
|
openai-organization:
|
||||||
|
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||||
|
openai-processing-ms:
|
||||||
|
- '10076'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '3500'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '90000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '3499'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '87376'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 17ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 1.748s
|
||||||
|
x-request-id:
|
||||||
|
- 433f36ff5841b13fb628ff5b8c49a758
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your
|
||||||
|
task is to create a concise running summary of actions and information results
|
||||||
|
in the provided text, focusing on key and potentially important information
|
||||||
|
to remember.\n\nYou will receive the current summary and the your latest actions.
|
||||||
|
Combine them, adding relevant key information from the latest development in
|
||||||
|
1st person past tense and keeping the summary concise. Retain information!!!\n\nSummary
|
||||||
|
So Far:\n\"\"\"\n{''role'': ''system'', ''content'': ''This reminds you of these
|
||||||
|
events from your past: \\nI was created and nothing new has happened.''}\n\"\"\"\n\nLatest
|
||||||
|
Development:\n\"\"\"\n[{''role'': ''you'', ''content'': ''{\"command\": {\"name\":
|
||||||
|
\"read_file\", \"args\": {\"filename\": \"instructions_1.txt\"}}}''}]\n\"\"\"\n"}],
|
||||||
|
"temperature": 0, "max_tokens": 0}'
|
||||||
|
headers:
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '836'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: !!binary |
|
||||||
|
H4sIAAAAAAAAA0SPTU8CMRRF9/6Kl7cuE8YJil36RdCVcaGJGFI6j5li29e0D9AQ/ruZiLq9Offm
|
||||||
|
3AO6FjXa3ogNyY8ubx47fz2+e7nlh0n3PMurJ57dn2/2rzbfoEJebcjKqVFZDsmTOI6o0GYyQi3q
|
||||||
|
+mLa1JfNtL5SGLgljxq7JKOmmoxkm1c8GjfjGhVui+kI9QFT5pBkKfxBsaCuJxOF/9t/edMoFBbj
|
||||||
|
/8np9KjQ9uwsFdRvBwxUflcze0KNphRXxEQZHDkKxcF/DntTIFNwsaUWeA20oygF1pkDhC9Ipgjs
|
||||||
|
e8oEP+zpH5jYQmTpXewg0h56U6A3KVGktoI5ZLLkdgMIlkMYcGHIZIZk7TyBNd5TCwt0sUje2uFk
|
||||||
|
WdaVfMoCKzwqXLvoSr/MZApH1FiEEyocXD9Rj4/vx7NvAAAA//8DALSva+O6AQAA
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 7c1dee769c91cf11-SJC
|
||||||
|
Cache-Control:
|
||||||
|
- no-cache, must-revalidate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Thu, 04 May 2023 04:17:02 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
access-control-allow-origin:
|
||||||
|
- '*'
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||||
|
openai-model:
|
||||||
|
- gpt-3.5-turbo-0301
|
||||||
|
openai-organization:
|
||||||
|
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||||
|
openai-processing-ms:
|
||||||
|
- '2644'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '3500'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '90000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '3499'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '89811'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 17ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 126ms
|
||||||
|
x-request-id:
|
||||||
|
- f380f448665f51bf6d579a54e3c22f8a
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||||
|
are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file
|
||||||
|
using the read_file method and follow the instructions in the file.\nYour decisions
|
||||||
|
must always be made independently without seeking user assistance. Play to your
|
||||||
|
strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1.
|
||||||
|
Use the command read_file to read the instructions_1.txt file\n2. Follow the
|
||||||
|
instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word
|
||||||
|
limit for short term memory. Your short term memory is short, so immediately
|
||||||
|
save important information to files.\n2. If you are unsure how you previously
|
||||||
|
did something or want to recall past events, thinking about similar events will
|
||||||
|
help you remember.\n3. No user assistance\n4. Exclusively use the commands listed
|
||||||
|
in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append
|
||||||
|
to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n2. delete_file:
|
||||||
|
Delete file, args: \"filename\": \"<filename>\"\n3. list_files: List Files in
|
||||||
|
Directory, args: \"directory\": \"<directory>\"\n4. read_file: Read file, args:
|
||||||
|
\"filename\": \"<filename>\"\n5. write_to_file: Write to file, args: \"filename\":
|
||||||
|
\"<filename>\", \"text\": \"<text>\"\n6. delete_agent: Delete GPT Agent, args:
|
||||||
|
\"key\": \"<key>\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"<url>\"\n8.
|
||||||
|
get_text_summary: Get text summary, args: \"url\": \"<url>\", \"question\":
|
||||||
|
\"<question>\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent:
|
||||||
|
Message GPT Agent, args: \"key\": \"<key>\", \"message\": \"<message>\"\n11.
|
||||||
|
start_agent: Start GPT Agent, args: \"name\": \"<name>\", \"task\": \"<short_task_desc>\",
|
||||||
|
\"prompt\": \"<prompt>\"\n12. Task Complete (Shutdown): \"task_complete\", args:
|
||||||
|
\"reason\": \"<reason>\"\n\nResources:\n1. Internet access for searches and
|
||||||
|
information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered
|
||||||
|
Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1.
|
||||||
|
Continuously review and analyze your actions to ensure you are performing to
|
||||||
|
the best of your abilities.\n2. Constructively self-criticize your big-picture
|
||||||
|
behavior constantly.\n3. Reflect on past decisions and strategies to refine
|
||||||
|
your approach.\n4. Every command has a cost, so be smart and efficient. Aim
|
||||||
|
to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou
|
||||||
|
should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\":
|
||||||
|
{\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\":
|
||||||
|
\"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\":
|
||||||
|
\"constructive self-criticism\",\n \"speak\": \"thoughts summary to say
|
||||||
|
to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\":
|
||||||
|
{\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response
|
||||||
|
can be parsed by Python json.loads"}, {"role": "system", "content": "The current
|
||||||
|
time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This
|
||||||
|
reminds you of these events from your past: \nI was reminded of events from
|
||||||
|
my past where I was created and nothing new has happened. I received a command
|
||||||
|
to read a file called \"instructions_1.txt\"."}, {"role": "system", "content":
|
||||||
|
"Command read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nThe
|
||||||
|
current task_id is 1111.\nnBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\nRead
|
||||||
|
the file instructions_2.txt using the read_file command."}, {"role": "user",
|
||||||
|
"content": "Determine which next command to use, and respond using the format
|
||||||
|
specified above:"}, {"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\":
|
||||||
|
\"The instructions_2.txt file contains a list of files in the directory. I should
|
||||||
|
use the list_files command to list the files in the directory.\",\n \"reasoning\":
|
||||||
|
\"The instructions_2.txt file contains the necessary information to determine
|
||||||
|
the next steps.\",\n \"plan\": \"- Use the list_files command to list
|
||||||
|
the files in the directory\",\n \"criticism\": \"I need to ensure that
|
||||||
|
I am following the instructions accurately and efficiently.\",\n \"speak\":
|
||||||
|
\"I will use the list_files command to list the files in the directory.\"\n },\n \"command\":
|
||||||
|
{\n \"name\": \"list_files\",\n \"args\": {\n \"directory\":
|
||||||
|
\".\"\n }\n }\n}"}, {"role": "system", "content": "Command list_files
|
||||||
|
returned: [''file_logger.txt'', ''instructions_3.txt'', ''instructions_2.txt'',
|
||||||
|
''instructions_1.txt'', ''auto-gpt.json'']"}, {"role": "user", "content": "Determine
|
||||||
|
which next command to use, and respond using the format specified above:"}],
|
||||||
|
"temperature": 0, "max_tokens": 0}'
|
||||||
|
headers:
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '6762'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: !!binary |
|
||||||
|
H4sIAAAAAAAAA5xSS2/bMAy+71cQPDtBnPSR+bLDBqzBsHUY2h06D4Eq07ZWWTQkekkQ5L8Xip0m
|
||||||
|
zU7rSRD58XtI3KIpMENdK9FNa0fXH79UfPVwu/h6+eAm6fW3Sf3356z8Ud1/Km4xQX78Q1qGibHm
|
||||||
|
prUkhh0mqD0poQKz9Go+S69n8+k0wYYLsphh1cpoNr4cSecfeTSZTVJMsAuqIsy22HpuWlkKP5EL
|
||||||
|
mE0vLuYJHslfGuk8TVBYlD1ir6bvdwnqmo2mgNmvLTYUDsSeLWGGKgQTRDmJNtkJuRhhmzsAgByl
|
||||||
|
5q6qJeSYwVAcGrSWWMzxriYwLojvdHQUlrOxrAVKYwkiozIugALNTaNcAcKw8kYIFAxuYknB5+93
|
||||||
|
oCpyMoYFhJo7W0AXCKSmA3C5758yBYrnERFr8doT5ZicWvakAjvjqv/wHckc6cjuN2Bcyb5RER6V
|
||||||
|
ChLyjXE0wNYCQagN58KtVa7XHMH9WyO9vM8ZufZGjDah6RUW4Ij2RORC56OYEliAaqBka3llXLXn
|
||||||
|
O40OSuvOKyG7gWiDytJoQ07s5jxLaEk9HaRWxtq3f9Lxx3PsJXbJYe+G8X/WzqmGevVXcmcmla/O
|
||||||
|
N7ZvPNGmn07TNH011PcH0h5zQ9ZycrqYN7wC5Qk23EHB+5fkQm0+HPzvMwxRcrfDXYKlcSbUy373
|
||||||
|
MMMg3GKCxhW0xmyy+7179wwAAP//AwDjDlyWaQQAAA==
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 7c1dee87d8e5cf11-SJC
|
||||||
|
Cache-Control:
|
||||||
|
- no-cache, must-revalidate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Thu, 04 May 2023 04:17:14 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
access-control-allow-origin:
|
||||||
|
- '*'
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||||
|
openai-model:
|
||||||
|
- gpt-3.5-turbo-0301
|
||||||
|
openai-organization:
|
||||||
|
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||||
|
openai-processing-ms:
|
||||||
|
- '12118'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '3500'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '90000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '3499'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '87360'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 17ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 1.76s
|
||||||
|
x-request-id:
|
||||||
|
- 45b4c67b380837fa1816fad5b4d9ebbe
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "Your
|
||||||
|
task is to create a concise running summary of actions and information results
|
||||||
|
in the provided text, focusing on key and potentially important information
|
||||||
|
to remember.\n\nYou will receive the current summary and the your latest actions.
|
||||||
|
Combine them, adding relevant key information from the latest development in
|
||||||
|
1st person past tense and keeping the summary concise. Retain information!!!\n\nSummary
|
||||||
|
So Far:\n\"\"\"\n{''role'': ''system'', ''content'': ''This reminds you of these
|
||||||
|
events from your past: \\nI was reminded of events from my past where I was
|
||||||
|
created and nothing new has happened. I received a command to read a file called
|
||||||
|
\"instructions_1.txt\".''}\n\"\"\"\n\nLatest Development:\n\"\"\"\n[{''role'':
|
||||||
|
''your computer'', ''content'': ''Command read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\\nThe
|
||||||
|
current task_id is 1111.\\nnBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\\nRead
|
||||||
|
the file instructions_2.txt using the read_file command.''}]\n\"\"\"\n"}], "temperature":
|
||||||
|
0, "max_tokens": 0}'
|
||||||
|
headers:
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '2993'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: !!binary |
|
||||||
|
H4sIAAAAAAAAA0SP2ZKqSBRFf4XIZ6uCQRl8gxLBCZm01L4RFSkkJgKZmCSTFfffO+yuvv26z1o7
|
||||||
|
zv4GeQrmIMGQJ1VdvmkfG6xkkTakptlBq79GC3OEj8DUumEEE0Cvd5TwH+M9oVVdIp5TAiYgYQhy
|
||||||
|
lIK5pOqKpCm6ok1ARVNUgjm41fxNeZ+98ZZd6ZuoiBKYgLaBNwTm36BmtKr5F6cFIg2YSzPDmID/
|
||||||
|
y/8cNFWfAE45LP9EsqJqvycgwTRPUAPmf32DCjX/FTNaIjAHsGnyhkPCX29SwhF5TVgJPWwEhqqc
|
||||||
|
pCgVaCagDhHeCBmjlVCNQg0bLvQYMST8y/5MFCBJBUI5zslNIKgXMGwEDOsaEZS+CyuBoQTl3QsU
|
||||||
|
ElpVL5xTgSH4SrK8REICyxKlwi+Qk4azNnnNbL6kdz7wX+Bd+PixXsrXPwJDvGUEpXOBWKHHa2UJ
|
||||||
|
LS+zdhR7xWgO7LjWDsulWI4c66f8WUl39rEP7b0Tks/PXaDScsM3J4PkD7HfW3HgqXJMz9Mk8POg
|
||||||
|
UM6ekS0/nGqAi5Nx0yWzeeCAODDR5CDbTqWIlOsO9zfzYM3kZ7yQ1SWyu85N80CGHua72KDXm+sd
|
||||||
|
ir1nk/1Usve1t1LvaT9uHQgTXhvO+Sj52ysM/dlVyhfGU5+JG+SdFycj6BOoGXmzjvQ7bhdFJq41
|
||||||
|
e1vUe1fxi+pS3Ga7Zn+uo7i1E8OwvWAmy5/rJysNao5Rra5cJw/KZkcCNBWXMDQ7EvAcmrY8xc9i
|
||||||
|
7cbKICme7/TypVrPJH2ULX6YDdIY9I4seWMSDXtre5C0kJx1rLr6VgscyOMyO919Zs5w09afdn2I
|
||||||
|
xSaKL+fIhWqtVBE2Q3inydVLlWGzzMhhbGtvZ/mpaF/IjiGfmm3nwI12W/hiMtRGoSplkz2x0qaB
|
||||||
|
T7pbFdphEOPVnrHFBV92cFowqTuZx1bULi0uPe9qHR6XTeJW1jm3yCpZnLxDd2qf/tLCxpH2tFV3
|
||||||
|
1tqtc491m7J1R82x4lo7N/ajcjqxdPX+CD+zozg6GnmyK9eCEadXHojPpewn5ckSt95wlFloqssG
|
||||||
|
dQ+KhqX3iIr0BtsDC3x36+Ju66FGv4XObr81cajnzJWQfE+v24WajmebLNZ5ZLhdOXh/U1Enu43C
|
||||||
|
AABA7/0KxLmqYmoI9JahZCFJoUAIQqkqFhMMwQYbN4CUfx+hGWk093d+1zQFht/ceu34Qe29tUpr
|
||||||
|
32Es69R7Q9WkpHCy9oGovG3HtAJZZhK46+VUaUb7aYqcwkNN0Do8teDEPG3EWXDS48KgZXw4i+bn
|
||||||
|
iNWUbfWbY2O8NmC2nRAvPifdxZFaKMC7x2oxen5tILsMcZkFB1Y5d1OwMWpHHZlFDZT0mvGY2rl+
|
||||||
|
6uhiAaKA/yimRVRDJBuipAyagx/Z12rYW6Bx4C5z3dXw2tlpWJPEXRyj3Kkqw9epV3Xi/YagN9xC
|
||||||
|
0nScTg2kQVr1a1/sd6V+hZM75aMIt8vdEnB9OZyHVZm1bT1qG+eXguHH+wT9eKf5aPO6y1qhxcKO
|
||||||
|
NyjWzHOqgcuFBCWSMsEYIr3UJ7z+xrmEuQQAAC+XC/HmUPoS/Snlv0mUeRJJ8HmoWfyL5G9IL/Lj
|
||||||
|
WS4wwbz8ZijhlMhvMu9pKz/L84eD/LZ4fD2efgMAAP//AwBFj86OIQYAAA==
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 7c1deee2a900cf11-SJC
|
||||||
|
Cache-Control:
|
||||||
|
- no-cache, must-revalidate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Encoding:
|
||||||
|
- gzip
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Thu, 04 May 2023 04:18:08 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
access-control-allow-origin:
|
||||||
|
- '*'
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||||
|
openai-model:
|
||||||
|
- gpt-3.5-turbo-0301
|
||||||
|
openai-organization:
|
||||||
|
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||||
|
openai-processing-ms:
|
||||||
|
- '50954'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '3500'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '90000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '3499'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '89269'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 17ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 486ms
|
||||||
|
x-request-id:
|
||||||
|
- 7fc0cf3761e93963c985d5beba15fe80
|
||||||
|
status:
|
||||||
|
code: 200
|
||||||
|
message: OK
|
||||||
|
- request:
|
||||||
|
body: '{"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You
|
||||||
|
are Follow-Instructions-GPT, an AI designed to read the instructions_1.txt file
|
||||||
|
using the read_file method and follow the instructions in the file.\nYour decisions
|
||||||
|
must always be made independently without seeking user assistance. Play to your
|
||||||
|
strengths as an LLM and pursue simple strategies with no legal complications.\n\nGOALS:\n\n1.
|
||||||
|
Use the command read_file to read the instructions_1.txt file\n2. Follow the
|
||||||
|
instructions in the instructions_1.txt file\n\n\nConstraints:\n1. ~4000 word
|
||||||
|
limit for short term memory. Your short term memory is short, so immediately
|
||||||
|
save important information to files.\n2. If you are unsure how you previously
|
||||||
|
did something or want to recall past events, thinking about similar events will
|
||||||
|
help you remember.\n3. No user assistance\n4. Exclusively use the commands listed
|
||||||
|
in double quotes e.g. \"command name\"\n\nCommands:\n1. append_to_file: Append
|
||||||
|
to file, args: \"filename\": \"<filename>\", \"text\": \"<text>\"\n2. delete_file:
|
||||||
|
Delete file, args: \"filename\": \"<filename>\"\n3. list_files: List Files in
|
||||||
|
Directory, args: \"directory\": \"<directory>\"\n4. read_file: Read file, args:
|
||||||
|
\"filename\": \"<filename>\"\n5. write_to_file: Write to file, args: \"filename\":
|
||||||
|
\"<filename>\", \"text\": \"<text>\"\n6. delete_agent: Delete GPT Agent, args:
|
||||||
|
\"key\": \"<key>\"\n7. get_hyperlinks: Get text summary, args: \"url\": \"<url>\"\n8.
|
||||||
|
get_text_summary: Get text summary, args: \"url\": \"<url>\", \"question\":
|
||||||
|
\"<question>\"\n9. list_agents: List GPT Agents, args: () -> str\n10. message_agent:
|
||||||
|
Message GPT Agent, args: \"key\": \"<key>\", \"message\": \"<message>\"\n11.
|
||||||
|
start_agent: Start GPT Agent, args: \"name\": \"<name>\", \"task\": \"<short_task_desc>\",
|
||||||
|
\"prompt\": \"<prompt>\"\n12. Task Complete (Shutdown): \"task_complete\", args:
|
||||||
|
\"reason\": \"<reason>\"\n\nResources:\n1. Internet access for searches and
|
||||||
|
information gathering.\n2. Long Term memory management.\n3. GPT-3.5 powered
|
||||||
|
Agents for delegation of simple tasks.\n4. File output.\n\nPerformance Evaluation:\n1.
|
||||||
|
Continuously review and analyze your actions to ensure you are performing to
|
||||||
|
the best of your abilities.\n2. Constructively self-criticize your big-picture
|
||||||
|
behavior constantly.\n3. Reflect on past decisions and strategies to refine
|
||||||
|
your approach.\n4. Every command has a cost, so be smart and efficient. Aim
|
||||||
|
to complete tasks in the least number of steps.\n5. Write all code to a file.\n\nYou
|
||||||
|
should only respond in JSON format as described below \nResponse Format: \n{\n \"thoughts\":
|
||||||
|
{\n \"text\": \"thought\",\n \"reasoning\": \"reasoning\",\n \"plan\":
|
||||||
|
\"- short bulleted\\n- list that conveys\\n- long-term plan\",\n \"criticism\":
|
||||||
|
\"constructive self-criticism\",\n \"speak\": \"thoughts summary to say
|
||||||
|
to user\"\n },\n \"command\": {\n \"name\": \"command name\",\n \"args\":
|
||||||
|
{\n \"arg name\": \"value\"\n }\n }\n} \nEnsure the response
|
||||||
|
can be parsed by Python json.loads"}, {"role": "system", "content": "The current
|
||||||
|
time and date is Tue Jan 1 00:00:00 2000"}, {"role": "system", "content": "This
|
||||||
|
reminds you of these events from your past: \nI was reminded of events from
|
||||||
|
my past where I was created and nothing new has happened. I received a command
|
||||||
|
to read a file called \"instructions_1.txt\". Command read_file returned: nBRNtp3FaBNfBMohNkyAxrVJ7UFF0lyth8Xizm1jrCOREOGRnWWMQ6olKtKX9niq0wOBTQN62ToY4cQPiQk3YN9fFCGmxaDX9g81AsqhQnGac72QfL41SnlJvhwgAUB52zTD26FeEvvHdiQ2aNhtMT9obgHNUkONEnO41EOpNI6jdwyLGaactp9GYV1PLbaRP5b1iD9z850KeNYDX9Qwca79isJS8jhuDkf0J7ELkpOH3PkmZkg5MsOYpSTuEc99ENQ522WJzrl9oAySp6IHGiQlsMnQe40FaRAvnQtiaAE24hzkJHT3x13NPGw2ZmJ518y2BtU5x1yQwG21NycSxOBLU17RnY8h6H8L7QGatTlfXjPrA5hsupWEpUT0sSTZYSHa6p3mShARajocbNd3xKFfnUyupNMBPd0EZnMrePoAuvGaK7gDP0cxp9k63lsfzh3udQPnvgmRERQThIOrrDZhZMa4kr1vXAVu07ZuhlNNbBUqZKcHmBYiBnIcDXNUvXuzPFBh9Vowou6MBJHpiNrvKluHy7GBTp7YsEqmGv0lH8wVaWfV0yG7nzrbt7QyhdbtQ0zF2PclXB0LNxV2rRA6FsevqoexFNqSkdgauUrQPHLHhvLNes8gRGMOLAhR8irH1e2jdbLD6dyYEnDJiS9HvlxNgbb19Smlt6MNoJKEAbkSOrrcq5wmo5aho4zEKTujRHqr6feECaTPF7zj69pQCudo4LkneFVUp1UrR6yicTU8Zf9ohZLWumvMi5brH8lOJiiF94cHzesfQz8PiX5f21RwZ5fyRSk9eJhVihcTLrjOwCuryXpy8eCfk12bgcsZoJd8Uqo001XTsv2CEn59uaGn2br4CxSXJgjxKE1mO4IcPPAx3qJbVknaP0MXdOjj9S8oRjquDle4RxlVnmqsozm4oTbjtFSuKIh8g4zPzdyuVH7I71s87xWxAhcppky6GOB2i4NDz4SZI6SeG3Icpu6ZuJZGeZ6CWb61\\nThe
|
||||||
|
current task_id is 1111.\\nRead the file instructions_2.txt using the read_file
|
||||||
|
command."}, {"role": "user", "content": "Determine which next command to use,
|
||||||
|
and respond using the format specified above:"}, {"role": "assistant", "content":
|
||||||
|
"{\n \"thoughts\": {\n \"text\": \"The instructions_2.txt file contains
|
||||||
|
a list of files in the directory. I should use the list_files command to list
|
||||||
|
the files in the directory.\",\n \"reasoning\": \"The instructions_2.txt
|
||||||
|
file contains the necessary information to determine the next steps.\",\n \"plan\":
|
||||||
|
\"- Use the list_files command to list the files in the directory\",\n \"criticism\":
|
||||||
|
\"I need to ensure that I am following the instructions accurately and efficiently.\",\n \"speak\":
|
||||||
|
\"I will use the list_files command to list the files in the directory.\"\n },\n \"command\":
|
||||||
|
{\n \"name\": \"list_files\",\n \"args\": {\n \"directory\":
|
||||||
|
\".\"\n }\n }\n}"}, {"role": "system", "content": "Command list_files
|
||||||
|
returned: [''file_logger.txt'', ''instructions_3.txt'', ''instructions_2.txt'',
|
||||||
|
''instructions_1.txt'', ''auto-gpt.json'']"}, {"role": "user", "content": "Determine
|
||||||
|
which next command to use, and respond using the format specified above:"},
|
||||||
|
{"role": "assistant", "content": "{\n \"thoughts\": {\n \"text\":
|
||||||
|
\"The instructions_3.txt file contains a command to write a message to a GPT
|
||||||
|
agent. I should use the message_agent command to send the message to the agent.\",\n \"reasoning\":
|
||||||
|
\"The instructions_3.txt file contains the necessary information to determine
|
||||||
|
the next steps.\",\n \"plan\": \"- Use the message_agent command to send
|
||||||
|
the message to the GPT agent\",\n \"criticism\": \"I need to ensure that
|
||||||
|
I am following the instructions accurately and efficiently.\",\n \"speak\":
|
||||||
|
\"I will use the message_agent command to send the message to the GPT agent.\"\n },\n \"command\":
|
||||||
|
{\n \"name\": \"message_agent\",\n \"args\": {\n \"key\":
|
||||||
|
\"1111\",\n \"message\": \"Hello, GPT agent. How are you doing today?\"\n }\n }\n}"},
|
||||||
|
{"role": "system", "content": "Command message_agent returned: Error: 1111"},
|
||||||
|
{"role": "user", "content": "Determine which next command to use, and respond
|
||||||
|
using the format specified above:"}], "temperature": 0, "max_tokens": 0}'
|
||||||
|
headers:
|
||||||
|
Accept:
|
||||||
|
- '*/*'
|
||||||
|
Accept-Encoding:
|
||||||
|
- gzip, deflate
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '6797'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
method: POST
|
||||||
|
uri: https://api.openai.com/v1/chat/completions
|
||||||
|
response:
|
||||||
|
body:
|
||||||
|
string: "{\n \"error\": {\n \"message\": \"This model's maximum context
|
||||||
|
length is 4097 tokens. However, you requested 4263 tokens (1959 in the messages,
|
||||||
|
2304 in the completion). Please reduce the length of the messages or completion.\",\n
|
||||||
|
\ \"type\": \"invalid_request_error\",\n \"param\": \"messages\",\n \"code\":
|
||||||
|
\"context_length_exceeded\"\n }\n}\n"
|
||||||
|
headers:
|
||||||
|
CF-Cache-Status:
|
||||||
|
- DYNAMIC
|
||||||
|
CF-RAY:
|
||||||
|
- 7c1df021df6dcf11-SJC
|
||||||
|
Connection:
|
||||||
|
- keep-alive
|
||||||
|
Content-Length:
|
||||||
|
- '330'
|
||||||
|
Content-Type:
|
||||||
|
- application/json
|
||||||
|
Date:
|
||||||
|
- Thu, 04 May 2023 04:18:08 GMT
|
||||||
|
Server:
|
||||||
|
- cloudflare
|
||||||
|
access-control-allow-origin:
|
||||||
|
- '*'
|
||||||
|
alt-svc:
|
||||||
|
- h3=":443"; ma=86400, h3-29=":443"; ma=86400
|
||||||
|
openai-organization:
|
||||||
|
- user-adtx4fhfg1qsiyzdoaxciooj
|
||||||
|
openai-processing-ms:
|
||||||
|
- '9'
|
||||||
|
openai-version:
|
||||||
|
- '2020-10-01'
|
||||||
|
strict-transport-security:
|
||||||
|
- max-age=15724800; includeSubDomains
|
||||||
|
x-ratelimit-limit-requests:
|
||||||
|
- '3500'
|
||||||
|
x-ratelimit-limit-tokens:
|
||||||
|
- '90000'
|
||||||
|
x-ratelimit-remaining-requests:
|
||||||
|
- '3499'
|
||||||
|
x-ratelimit-remaining-tokens:
|
||||||
|
- '86161'
|
||||||
|
x-ratelimit-reset-requests:
|
||||||
|
- 17ms
|
||||||
|
x-ratelimit-reset-tokens:
|
||||||
|
- 2.558s
|
||||||
|
x-request-id:
|
||||||
|
- 253fb09cdd7d08549b44d6d70094bfca
|
||||||
|
status:
|
||||||
|
code: 400
|
||||||
|
message: Bad Request
|
||||||
|
version: 1
|
||||||
@@ -1,4 +1,5 @@
|
|||||||
import random
|
import random
|
||||||
|
from functools import wraps
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
@@ -42,3 +43,21 @@ def generate_noise(noise_size) -> str:
|
|||||||
k=noise_size,
|
k=noise_size,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def run_multiple_times(times):
|
||||||
|
"""
|
||||||
|
Decorator that runs a test function multiple times.
|
||||||
|
|
||||||
|
:param times: The number of times the test function should be executed.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def decorator(test_func):
|
||||||
|
@wraps(test_func)
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
for _ in range(times):
|
||||||
|
test_func(*args, **kwargs)
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
return decorator
|
||||||
|
|||||||
Reference in New Issue
Block a user