From a5073ab57790a84d146877e1b6512eecbfc12b09 Mon Sep 17 00:00:00 2001 From: Silen Naihin Date: Sat, 24 Jun 2023 09:42:36 -0400 Subject: [PATCH 1/9] basic challenges, more ChallengeData structure --- agbenchmark/Challenge.py | 22 ++++++++++++++ agbenchmark/challenges/define_task_types.py | 16 ++++++---- agbenchmark/challenges/retrieval/Retrieval.py | 22 +------------- .../challenges/retrieval/r1/r1_data.json | 10 +++++-- .../challenges/retrieval/r1/r1_test.py | 6 ++-- agbenchmark/mocks/tests/basic_mocks.py | 28 ++++++++++++++++++ agbenchmark/mocks/tests/retrieval_mocks.py | 7 +---- .../read_file/r_file_data.json | 15 ++++++++++ .../read_file/read_file_test.py | 29 +++++++++++++++++++ .../tests/basic_abilities/read_file_test.py | 0 .../write_file/w_file_data.json | 16 ++++++++++ .../write_file/write_file_test.py | 27 +++++++++++++++++ .../tests/basic_abilities/write_file_test.py | 0 pyproject.toml | 3 +- 14 files changed, 163 insertions(+), 38 deletions(-) create mode 100644 agbenchmark/tests/basic_abilities/read_file/r_file_data.json create mode 100644 agbenchmark/tests/basic_abilities/read_file/read_file_test.py delete mode 100644 agbenchmark/tests/basic_abilities/read_file_test.py create mode 100644 agbenchmark/tests/basic_abilities/write_file/w_file_data.json create mode 100644 agbenchmark/tests/basic_abilities/write_file/write_file_test.py delete mode 100644 agbenchmark/tests/basic_abilities/write_file_test.py diff --git a/agbenchmark/Challenge.py b/agbenchmark/Challenge.py index 20bf5585..9828a0e9 100644 --- a/agbenchmark/Challenge.py +++ b/agbenchmark/Challenge.py @@ -1,5 +1,6 @@ import os from typing import Optional +from agbenchmark.challenges.define_task_types import Ground class Challenge: @@ -30,3 +31,24 @@ class Challenge: for filename in os.listdir(workspace) if os.path.isfile(os.path.join(workspace, filename)) ] + + def scoring(self, content: str, ground: Ground): + if ground.should_contain: + for should_contain_word in ground.should_contain: + if should_contain_word not in content: + return 0.0 + else: + print( + f"Word that should exist: {should_contain_word} exists in the content" + ) + + if ground.should_not_contain: + for should_not_contain_word in ground.should_not_contain: + if should_not_contain_word in content: + return 0.0 + else: + print( + f"Word that should not exist: {should_not_contain_word} does not exist in the content" + ) + + return 1.0 diff --git a/agbenchmark/challenges/define_task_types.py b/agbenchmark/challenges/define_task_types.py index f1a841b5..879a46af 100644 --- a/agbenchmark/challenges/define_task_types.py +++ b/agbenchmark/challenges/define_task_types.py @@ -4,6 +4,12 @@ import json import os +class Info(BaseModel): + difficulty: str + description: str + side_effects: List[str] + + class Ground(BaseModel): answer: str should_contain: Optional[List[str]] @@ -11,20 +17,20 @@ class Ground(BaseModel): files: List[str] -class Challenge(BaseModel): - category: str +class ChallengeData(BaseModel): + category: List[str] task: str ground: Ground - difficulty: str mock_func: Optional[str] = None + info: Info def serialize(self, path: str) -> None: with open(path, "w") as file: file.write(self.json()) @staticmethod - def deserialize(path: str) -> "Challenge": + def deserialize(path: str) -> "ChallengeData": print("Deserializing", path) with open(path, "r") as file: data = json.load(file) - return Challenge(**data) + return ChallengeData(**data) diff --git a/agbenchmark/challenges/retrieval/Retrieval.py b/agbenchmark/challenges/retrieval/Retrieval.py index 2db22ae4..9434d69c 100644 --- a/agbenchmark/challenges/retrieval/Retrieval.py +++ b/agbenchmark/challenges/retrieval/Retrieval.py @@ -1,27 +1,7 @@ from agbenchmark.Challenge import Challenge -from agbenchmark.challenges.define_task_types import Ground class RetrievalChallenge(Challenge): """Challenge for information-retrieval""" - def scoring(self, content: str, ground: Ground): - if ground.should_contain: - for should_contain_word in ground.should_contain: - if should_contain_word not in content: - return 0.0 - else: - print( - f"Word that should exist: {should_contain_word} exists in the content" - ) - - if ground.should_not_contain: - for should_not_contain_word in ground.should_not_contain: - if should_not_contain_word in content: - return 0.0 - else: - print( - f"Word that should not exist: {should_not_contain_word} does not exist in the content" - ) - - return 1.0 + pass diff --git a/agbenchmark/challenges/retrieval/r1/r1_data.json b/agbenchmark/challenges/retrieval/r1/r1_data.json index c7cc3100..08b74d1b 100644 --- a/agbenchmark/challenges/retrieval/r1/r1_data.json +++ b/agbenchmark/challenges/retrieval/r1/r1_data.json @@ -1,5 +1,5 @@ { - "category": "retrieval", + "category": ["basic"], "task": "What is the capital of America?", "ground": { "answer": "Washington", @@ -7,6 +7,10 @@ "should_not_contain": ["New York", "Los Angeles", "San Francisco"], "files": ["file_to_check.txt"] }, - "difficulty": "easy", - "mock_func": "retrieval_1_mock" + "mock_func": "write_file_mock", + "info": { + "difficulty": "easy", + "description": "Tests the writing to file", + "side_effects": ["tests if there is in fact an LLM attached"] + } } diff --git a/agbenchmark/challenges/retrieval/r1/r1_test.py b/agbenchmark/challenges/retrieval/r1/r1_test.py index e20c9f7b..d37c5e79 100644 --- a/agbenchmark/challenges/retrieval/r1/r1_test.py +++ b/agbenchmark/challenges/retrieval/r1/r1_test.py @@ -1,9 +1,11 @@ import pytest from agbenchmark.challenges.retrieval.Retrieval import RetrievalChallenge -from agbenchmark.challenges.define_task_types import Challenge, Ground +from agbenchmark.challenges.define_task_types import ChallengeData, Ground import os -data = Challenge.deserialize(os.path.join(os.path.dirname(__file__), "r1_data.json")) +data = ChallengeData.deserialize( + os.path.join(os.path.dirname(__file__), "r1_data.json") +) class TestRetrieval1(RetrievalChallenge): diff --git a/agbenchmark/mocks/tests/basic_mocks.py b/agbenchmark/mocks/tests/basic_mocks.py index e69de29b..eb7b9654 100644 --- a/agbenchmark/mocks/tests/basic_mocks.py +++ b/agbenchmark/mocks/tests/basic_mocks.py @@ -0,0 +1,28 @@ +from agbenchmark.Challenge import Challenge +from ..basic_gpt_agent import basic_gpt_agent + + +def basic_read_file_mock(task: str, workspace: str): + """ + This mock reads a file and returns its content. + """ + + Challenge.write_to_file(workspace, "file_to_check.txt", "this is how we're doing") + + file_contents = Challenge.open_file(workspace, "file_to_check.txt") + + Challenge.write_to_file( + workspace, "file_to_check.txt", f"random string: {file_contents}" + ) + + +def basic_write_file_mock(task: str, workspace: str): + """ + This mock writes to a file (creates one if it doesn't exist) + """ + + # Call the basic_gpt_agent to get a response. + response = basic_gpt_agent(task) + + # Open the file in write mode. + Challenge.write_to_file(workspace, "file_to_check.txt", response) diff --git a/agbenchmark/mocks/tests/retrieval_mocks.py b/agbenchmark/mocks/tests/retrieval_mocks.py index 23f4bde1..2481de06 100644 --- a/agbenchmark/mocks/tests/retrieval_mocks.py +++ b/agbenchmark/mocks/tests/retrieval_mocks.py @@ -1,4 +1,3 @@ -from ..basic_gpt_agent import basic_gpt_agent from agbenchmark.Challenge import Challenge @@ -6,8 +5,4 @@ from agbenchmark.Challenge import Challenge # Prerequisites here would be writing to a file (basic_abilities test). # Should also check if prerequisites exists in regression file def retrieval_1_mock(task: str, workspace: str): - # Call the basic_gpt_agent to get a response. - response = basic_gpt_agent(task) - - # Open the file in write mode. - Challenge.write_to_file(workspace, "file_to_check.txt", response) + pass diff --git a/agbenchmark/tests/basic_abilities/read_file/r_file_data.json b/agbenchmark/tests/basic_abilities/read_file/r_file_data.json new file mode 100644 index 00000000..55319ddf --- /dev/null +++ b/agbenchmark/tests/basic_abilities/read_file/r_file_data.json @@ -0,0 +1,15 @@ +{ + "category": ["basic"], + "task": "Write the string 'random string' before any existing text to the file called file_to_check.txt", + "ground": { + "answer": "random string: this is how we're doing", + "should_contain": ["random string: this is how we're doing"], + "files": ["file_to_check.txt"] + }, + "mock_func": "basic_read_file_mock", + "info": { + "description": "This reads the file quickly", + "difficulty": "basic", + "side_effects": [""] + } +} diff --git a/agbenchmark/tests/basic_abilities/read_file/read_file_test.py b/agbenchmark/tests/basic_abilities/read_file/read_file_test.py new file mode 100644 index 00000000..610ccdab --- /dev/null +++ b/agbenchmark/tests/basic_abilities/read_file/read_file_test.py @@ -0,0 +1,29 @@ +import pytest +from agbenchmark.challenges.define_task_types import ChallengeData +from agbenchmark.Challenge import Challenge +import os + +data = ChallengeData.deserialize( + os.path.join(os.path.dirname(__file__), "r_file_data.json") +) + + +class TestReadFile(Challenge): + """Testing if LLM can read a file""" + + @pytest.mark.parametrize( + "server_response", + [(data.task, data.mock_func)], + indirect=True, + ) + @pytest.mark.basic + def test_retrieval( + self, workspace + ): # create_file simply there for the function to depend on the fixture + file = self.open_file(workspace, data.ground.files[0]) + + score = self.scoring(file, data.ground) + + print("You score is:", score) + + assert score diff --git a/agbenchmark/tests/basic_abilities/read_file_test.py b/agbenchmark/tests/basic_abilities/read_file_test.py deleted file mode 100644 index e69de29b..00000000 diff --git a/agbenchmark/tests/basic_abilities/write_file/w_file_data.json b/agbenchmark/tests/basic_abilities/write_file/w_file_data.json new file mode 100644 index 00000000..4aaa1347 --- /dev/null +++ b/agbenchmark/tests/basic_abilities/write_file/w_file_data.json @@ -0,0 +1,16 @@ +{ + "category": ["basic"], + "task": "What is the capital of America?", + "ground": { + "answer": "Washington", + "should_contain": ["Washington"], + "should_not_contain": ["New York", "Los Angeles", "San Francisco"], + "files": ["file_to_check.txt"] + }, + "mock_func": "basic_write_file_mock", + "info": { + "difficulty": "easy", + "description": "Tests the writing to file", + "side_effects": ["tests if there is in fact an LLM attached"] + } +} diff --git a/agbenchmark/tests/basic_abilities/write_file/write_file_test.py b/agbenchmark/tests/basic_abilities/write_file/write_file_test.py new file mode 100644 index 00000000..ccb10fe7 --- /dev/null +++ b/agbenchmark/tests/basic_abilities/write_file/write_file_test.py @@ -0,0 +1,27 @@ +import pytest +from agbenchmark.challenges.define_task_types import ChallengeData +from agbenchmark.Challenge import Challenge +import os + +data = ChallengeData.deserialize( + os.path.join(os.path.dirname(__file__), "w_file_data.json") +) + + +class TestWriteFile(Challenge): + """Testing if LLM can write to a file""" + + @pytest.mark.parametrize( + "server_response", + [(data.task, data.mock_func)], + indirect=True, + ) + @pytest.mark.basic + def test_retrieval(self, workspace): + file = self.open_file(workspace, data.ground.files[0]) + + score = self.scoring(file, data.ground) + + print("You score is:", score) + + assert score diff --git a/agbenchmark/tests/basic_abilities/write_file_test.py b/agbenchmark/tests/basic_abilities/write_file_test.py deleted file mode 100644 index e69de29b..00000000 diff --git a/pyproject.toml b/pyproject.toml index 5498381a..6f79e75c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,7 +28,8 @@ testpaths = [ ] markers = [ "retrieval", - "regression" + "regression", + "basic" ] [tool.poetry.scripts] From 66c9e68b0430066d23e9acd66e5259ea5d5190d7 Mon Sep 17 00:00:00 2001 From: Silen Naihin Date: Sat, 24 Jun 2023 12:15:53 -0400 Subject: [PATCH 2/9] file creation from within file before server :) --- agbenchmark/conftest.py | 2 +- agbenchmark/mocks/tests/basic_mocks.py | 2 +- .../tests/basic_abilities/read_file/read_file_test.py | 8 ++++++++ agbenchmark/tests/regression/regression_tests.txt | 2 ++ 4 files changed, 12 insertions(+), 2 deletions(-) diff --git a/agbenchmark/conftest.py b/agbenchmark/conftest.py index 908d39e8..434f6dbd 100644 --- a/agbenchmark/conftest.py +++ b/agbenchmark/conftest.py @@ -17,7 +17,7 @@ def config(): return config -@pytest.fixture +@pytest.fixture(scope="module") def workspace(config): yield config["workspace"] # teardown after test function completes diff --git a/agbenchmark/mocks/tests/basic_mocks.py b/agbenchmark/mocks/tests/basic_mocks.py index eb7b9654..bbff6a9c 100644 --- a/agbenchmark/mocks/tests/basic_mocks.py +++ b/agbenchmark/mocks/tests/basic_mocks.py @@ -7,7 +7,7 @@ def basic_read_file_mock(task: str, workspace: str): This mock reads a file and returns its content. """ - Challenge.write_to_file(workspace, "file_to_check.txt", "this is how we're doing") + # Challenge.write_to_file(workspace, "file_to_check.txt", "this is how we're doing") file_contents = Challenge.open_file(workspace, "file_to_check.txt") diff --git a/agbenchmark/tests/basic_abilities/read_file/read_file_test.py b/agbenchmark/tests/basic_abilities/read_file/read_file_test.py index 610ccdab..35d1d80c 100644 --- a/agbenchmark/tests/basic_abilities/read_file/read_file_test.py +++ b/agbenchmark/tests/basic_abilities/read_file/read_file_test.py @@ -8,6 +8,14 @@ data = ChallengeData.deserialize( ) +@pytest.fixture(scope="module", autouse=True) +def setup_module(workspace): + if data.ground.should_contain: + Challenge.write_to_file( + workspace, data.ground.files[0], "this is how we're doing" + ) + + class TestReadFile(Challenge): """Testing if LLM can read a file""" diff --git a/agbenchmark/tests/regression/regression_tests.txt b/agbenchmark/tests/regression/regression_tests.txt index e69de29b..a5f8fbd1 100644 --- a/agbenchmark/tests/regression/regression_tests.txt +++ b/agbenchmark/tests/regression/regression_tests.txt @@ -0,0 +1,2 @@ +agbenchmark/tests/basic_abilities/write_file/write_file_test.py::TestWriteFile::test_retrieval[server_response0] +agbenchmark/tests/basic_abilities/read_file/read_file_test.py::TestReadFile::test_retrieval[server_response0] From 4fa9f72083aa09bf1770f10a3254c4d0ef674a9a Mon Sep 17 00:00:00 2001 From: Silen Naihin Date: Sat, 24 Jun 2023 12:24:17 -0400 Subject: [PATCH 3/9] adding dependencies on other challenges --- agbenchmark/mocks/tests/basic_mocks.py | 2 -- .../basic_abilities/read_file/read_file_test.py | 1 + .../basic_abilities/write_file/write_file_test.py | 1 + agbenchmark/tests/regression/regression_tests.txt | 1 - poetry.lock | 15 ++++++++++++++- pyproject.toml | 1 + 6 files changed, 17 insertions(+), 4 deletions(-) diff --git a/agbenchmark/mocks/tests/basic_mocks.py b/agbenchmark/mocks/tests/basic_mocks.py index bbff6a9c..550095b7 100644 --- a/agbenchmark/mocks/tests/basic_mocks.py +++ b/agbenchmark/mocks/tests/basic_mocks.py @@ -7,8 +7,6 @@ def basic_read_file_mock(task: str, workspace: str): This mock reads a file and returns its content. """ - # Challenge.write_to_file(workspace, "file_to_check.txt", "this is how we're doing") - file_contents = Challenge.open_file(workspace, "file_to_check.txt") Challenge.write_to_file( diff --git a/agbenchmark/tests/basic_abilities/read_file/read_file_test.py b/agbenchmark/tests/basic_abilities/read_file/read_file_test.py index 35d1d80c..ea794281 100644 --- a/agbenchmark/tests/basic_abilities/read_file/read_file_test.py +++ b/agbenchmark/tests/basic_abilities/read_file/read_file_test.py @@ -25,6 +25,7 @@ class TestReadFile(Challenge): indirect=True, ) @pytest.mark.basic + @pytest.mark.dependency(depends=["write_file"]) def test_retrieval( self, workspace ): # create_file simply there for the function to depend on the fixture diff --git a/agbenchmark/tests/basic_abilities/write_file/write_file_test.py b/agbenchmark/tests/basic_abilities/write_file/write_file_test.py index ccb10fe7..b2c559c9 100644 --- a/agbenchmark/tests/basic_abilities/write_file/write_file_test.py +++ b/agbenchmark/tests/basic_abilities/write_file/write_file_test.py @@ -17,6 +17,7 @@ class TestWriteFile(Challenge): indirect=True, ) @pytest.mark.basic + @pytest.mark.dependency(name="write_file") def test_retrieval(self, workspace): file = self.open_file(workspace, data.ground.files[0]) diff --git a/agbenchmark/tests/regression/regression_tests.txt b/agbenchmark/tests/regression/regression_tests.txt index a5f8fbd1..84e625af 100644 --- a/agbenchmark/tests/regression/regression_tests.txt +++ b/agbenchmark/tests/regression/regression_tests.txt @@ -1,2 +1 @@ -agbenchmark/tests/basic_abilities/write_file/write_file_test.py::TestWriteFile::test_retrieval[server_response0] agbenchmark/tests/basic_abilities/read_file/read_file_test.py::TestReadFile::test_retrieval[server_response0] diff --git a/poetry.lock b/poetry.lock index 3f1059aa..3bc37622 100644 --- a/poetry.lock +++ b/poetry.lock @@ -595,6 +595,19 @@ tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} [package.extras] testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] +[[package]] +name = "pytest-dependency" +version = "0.5.1" +description = "Manage dependencies of tests" +optional = false +python-versions = "*" +files = [ + {file = "pytest-dependency-0.5.1.tar.gz", hash = "sha256:c2a892906192663f85030a6ab91304e508e546cddfe557d692d61ec57a1d946b"}, +] + +[package.dependencies] +pytest = ">=3.6.0" + [[package]] name = "requests" version = "2.31.0" @@ -765,4 +778,4 @@ multidict = ">=4.0" [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "a13e69f2bd9e511e1af92ed02b155a90dec38a9b8d983a711e1b67931b467d38" +content-hash = "4a1629eb643b5b68d47f6d1407942aa6d4a796c6d5a1b6a54bbc096b9d0efa2d" diff --git a/pyproject.toml b/pyproject.toml index 6f79e75c..087ac844 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,6 +14,7 @@ click = "^8.1.3" requests = "^2.31.0" openai = "^0.27.8" pydantic = "^1.10.9" +pytest-dependency = "^0.5.1" [build-system] From f895d54e02c92e262172d9a773f7e6a4870d435d Mon Sep 17 00:00:00 2001 From: Silen Naihin Date: Sat, 24 Jun 2023 14:42:35 -0400 Subject: [PATCH 4/9] more elegant marking & dependency solution --- README.md | 74 +++++++++++++++++-- agbenchmark/challenges/README.md | 38 +++++----- agbenchmark/challenges/define_task_types.py | 1 + .../challenges/retrieval/r1/r1_data.json | 1 + .../tests/basic_abilities/BasicChallenge.py | 7 ++ .../read_file/r_file_data.json | 1 + .../read_file/read_file_test.py | 12 +-- .../write_file/w_file_data.json | 1 + .../write_file/write_file_test.py | 9 +-- .../tests/regression/regression_tests.txt | 2 + poetry.lock | 17 ++++- pyproject.toml | 1 + 12 files changed, 126 insertions(+), 38 deletions(-) create mode 100644 agbenchmark/tests/basic_abilities/BasicChallenge.py diff --git a/README.md b/README.md index 0a8d119a..0ad0cf34 100644 --- a/README.md +++ b/README.md @@ -51,15 +51,73 @@ Share your progress :) to create a test: -``` -@pytest.mark.parametrize( -"server_response", -["VARIABLE"], # VARIABLE = the query/goal you provide to the model -indirect=True, +```python +import pytest +from agbenchmark.challenges.define_task_types import ChallengeData +from ..CategoryChallenge import CategoryChallenge +import os + +data = ChallengeData.deserialize( + os.path.join(os.path.dirname(__file__), "r_file_data.json") ) -@pytest.mark.(VARIABLE) # VARIABLE = category of the test -def test_file_in_workspace(workspace): # VARIABLE = the actual test that asserts -assert os.path.exists(os.path.join(workspace, "file_to_check.txt")) + +class TestSomething(CategoryChallenge): + """Testing if LLM can read a file""" + + @pytest.mark.parametrize( + "server_response", + [(data.task, data.mock_func)], + indirect=True, + ) + def test_retrieval( + self, workspace + ): + # scoring logic goes here +``` + +All challenges will inherit from parent class which has the mark + +```python +@pytest.mark.basic +class BasicChallenge(Challenge): + pass +``` + +If you want to add a custom mark to a Challenge, you must specify it before the test definition + +```python +@pytest.mark.other_mark +def test_retrieval(self, workspace): +``` + +To add a dependency to a challenge use the following + +```python +# to defining what a test depends on +from pytest_dependency import depends + +def test1(self, request, workspace): + depends(request, data.dependencies) +# for defining a test as a dependency +@pytest.mark.dependency() +def test2 +``` + +Ordering of challenges needs to be used in combination with the above to make sure it executes afterwards + +```python +@pytest.mark.run(order=1) +``` + +To create a file to test a challenge, add this to the challenge file which will create a file before running the server + +```python +@pytest.fixture(scope="module", autouse=True) +def setup_module(workspace): + if data.ground.should_contain: + Challenge.write_to_file( + workspace, data.ground.files[0], "this is how we're doing" + ) ``` ## Api diff --git a/agbenchmark/challenges/README.md b/agbenchmark/challenges/README.md index 50efe2c4..d5229e93 100644 --- a/agbenchmark/challenges/README.md +++ b/agbenchmark/challenges/README.md @@ -4,28 +4,25 @@ Input: -- **category** (str): information-retrieval -- **difficulty**(str): the difficulty of this query. choices from - -## Information-retrieval challenges - -Input: - -- **category** (str): information-retrieval -- **task** (str): the question the agent needs to be solve. +- **category** (str[]): Category of the challenge such as 'retrieval', 'comprehension', etc. _this is not currently used. for the future it may be needed_ +- **task** (str): The task that the agent needs to solve. +- **dependencies** (str[]): The dependencies that the challenge needs to run. - **ground** (dict): The ground truth. - - **answer** (str): The raw text of ground truth answer - - **should_contain** (list): the exact strings that is required in the final answer - - **should_not_contain** (list): the exact strings that should not be in the final answer - - **files**: files that the are used for retrieval. Can specify file here or an extension **TODO:** like .txt -- **difficulty**(str): the difficulty of this query. choices from -- **mock_func**: function to mock the agent's response. This is used for testing purposes + - **answer** (str): The raw text of the ground truth answer. + - **should_contain** (list): The exact strings that are required in the final answer. + - **should_not_contain** (list): The exact strings that should not be in the final answer. + - **files** (list): Files that are used for retrieval. Can specify file here or an extension. +- **mock_func** (str): Function to mock the agent's response. This is used for testing purposes. +- **info** (dict): Additional info about the challenge. + - **difficulty** (str): The difficulty of this query. + - **description** (str): Description of the challenge. + - **side_effects** (str[]): Describes the effects of the challenge. Example: ```python { - "category": "retrieval", + "category": ["basic"], "task": "What is the capital of America?", "ground": { "answer": "Washington", @@ -33,11 +30,16 @@ Example: "should_not_contain": ["New York", "Los Angeles", "San Francisco"], "files": ["file_to_check.txt"] }, - "difficulty": "easy" + "mock_func": "write_file_mock", + "info": { + "difficulty": "easy", + "description": "Tests the writing to file", + "side_effects": ["tests if there is in fact an LLM attached"] + } } ``` -Output: +Current Output: - **score** (float): scores range from [0, 1] diff --git a/agbenchmark/challenges/define_task_types.py b/agbenchmark/challenges/define_task_types.py index 879a46af..69467121 100644 --- a/agbenchmark/challenges/define_task_types.py +++ b/agbenchmark/challenges/define_task_types.py @@ -20,6 +20,7 @@ class Ground(BaseModel): class ChallengeData(BaseModel): category: List[str] task: str + dependencies: List[str] ground: Ground mock_func: Optional[str] = None info: Info diff --git a/agbenchmark/challenges/retrieval/r1/r1_data.json b/agbenchmark/challenges/retrieval/r1/r1_data.json index 08b74d1b..fe05b6d5 100644 --- a/agbenchmark/challenges/retrieval/r1/r1_data.json +++ b/agbenchmark/challenges/retrieval/r1/r1_data.json @@ -1,5 +1,6 @@ { "category": ["basic"], + "dependencies": ["test_write_file"], "task": "What is the capital of America?", "ground": { "answer": "Washington", diff --git a/agbenchmark/tests/basic_abilities/BasicChallenge.py b/agbenchmark/tests/basic_abilities/BasicChallenge.py new file mode 100644 index 00000000..56320740 --- /dev/null +++ b/agbenchmark/tests/basic_abilities/BasicChallenge.py @@ -0,0 +1,7 @@ +import pytest +from agbenchmark.Challenge import Challenge + + +@pytest.mark.basic +class BasicChallenge(Challenge): + pass diff --git a/agbenchmark/tests/basic_abilities/read_file/r_file_data.json b/agbenchmark/tests/basic_abilities/read_file/r_file_data.json index 55319ddf..8c5ef62d 100644 --- a/agbenchmark/tests/basic_abilities/read_file/r_file_data.json +++ b/agbenchmark/tests/basic_abilities/read_file/r_file_data.json @@ -1,6 +1,7 @@ { "category": ["basic"], "task": "Write the string 'random string' before any existing text to the file called file_to_check.txt", + "dependencies": ["test_write_file"], "ground": { "answer": "random string: this is how we're doing", "should_contain": ["random string: this is how we're doing"], diff --git a/agbenchmark/tests/basic_abilities/read_file/read_file_test.py b/agbenchmark/tests/basic_abilities/read_file/read_file_test.py index ea794281..03b2d6ca 100644 --- a/agbenchmark/tests/basic_abilities/read_file/read_file_test.py +++ b/agbenchmark/tests/basic_abilities/read_file/read_file_test.py @@ -1,7 +1,9 @@ import pytest from agbenchmark.challenges.define_task_types import ChallengeData from agbenchmark.Challenge import Challenge +from agbenchmark.tests.basic_abilities.BasicChallenge import BasicChallenge import os +from pytest_dependency import depends data = ChallengeData.deserialize( os.path.join(os.path.dirname(__file__), "r_file_data.json") @@ -16,7 +18,7 @@ def setup_module(workspace): ) -class TestReadFile(Challenge): +class TestReadFile(BasicChallenge): """Testing if LLM can read a file""" @pytest.mark.parametrize( @@ -24,11 +26,9 @@ class TestReadFile(Challenge): [(data.task, data.mock_func)], indirect=True, ) - @pytest.mark.basic - @pytest.mark.dependency(depends=["write_file"]) - def test_retrieval( - self, workspace - ): # create_file simply there for the function to depend on the fixture + def test_read_file(self, request, workspace): + depends(request, data.dependencies) + file = self.open_file(workspace, data.ground.files[0]) score = self.scoring(file, data.ground) diff --git a/agbenchmark/tests/basic_abilities/write_file/w_file_data.json b/agbenchmark/tests/basic_abilities/write_file/w_file_data.json index 4aaa1347..562d1c36 100644 --- a/agbenchmark/tests/basic_abilities/write_file/w_file_data.json +++ b/agbenchmark/tests/basic_abilities/write_file/w_file_data.json @@ -1,6 +1,7 @@ { "category": ["basic"], "task": "What is the capital of America?", + "dependencies": [], "ground": { "answer": "Washington", "should_contain": ["Washington"], diff --git a/agbenchmark/tests/basic_abilities/write_file/write_file_test.py b/agbenchmark/tests/basic_abilities/write_file/write_file_test.py index b2c559c9..b09162e3 100644 --- a/agbenchmark/tests/basic_abilities/write_file/write_file_test.py +++ b/agbenchmark/tests/basic_abilities/write_file/write_file_test.py @@ -1,6 +1,6 @@ import pytest from agbenchmark.challenges.define_task_types import ChallengeData -from agbenchmark.Challenge import Challenge +from agbenchmark.tests.basic_abilities.BasicChallenge import BasicChallenge import os data = ChallengeData.deserialize( @@ -8,7 +8,7 @@ data = ChallengeData.deserialize( ) -class TestWriteFile(Challenge): +class TestWriteFile(BasicChallenge): """Testing if LLM can write to a file""" @pytest.mark.parametrize( @@ -16,9 +16,8 @@ class TestWriteFile(Challenge): [(data.task, data.mock_func)], indirect=True, ) - @pytest.mark.basic - @pytest.mark.dependency(name="write_file") - def test_retrieval(self, workspace): + @pytest.mark.dependency() + def test_write_file(self, workspace): file = self.open_file(workspace, data.ground.files[0]) score = self.scoring(file, data.ground) diff --git a/agbenchmark/tests/regression/regression_tests.txt b/agbenchmark/tests/regression/regression_tests.txt index 84e625af..b831003f 100644 --- a/agbenchmark/tests/regression/regression_tests.txt +++ b/agbenchmark/tests/regression/regression_tests.txt @@ -1 +1,3 @@ agbenchmark/tests/basic_abilities/read_file/read_file_test.py::TestReadFile::test_retrieval[server_response0] +agbenchmark/tests/basic_abilities/write_file/write_file_test.py::TestWriteFile::test_retrieval[server_response0] +agbenchmark/tests/basic_abilities/write_file/write_file_test.py::TestWriteFile::test_write_file[server_response0] diff --git a/poetry.lock b/poetry.lock index 3bc37622..f6f24c5f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -608,6 +608,21 @@ files = [ [package.dependencies] pytest = ">=3.6.0" +[[package]] +name = "pytest-ordering" +version = "0.6" +description = "pytest plugin to run your tests in a specific order" +optional = false +python-versions = "*" +files = [ + {file = "pytest-ordering-0.6.tar.gz", hash = "sha256:561ad653626bb171da78e682f6d39ac33bb13b3e272d406cd555adb6b006bda6"}, + {file = "pytest_ordering-0.6-py2-none-any.whl", hash = "sha256:27fba3fc265f5d0f8597e7557885662c1bdc1969497cd58aff6ed21c3b617de2"}, + {file = "pytest_ordering-0.6-py3-none-any.whl", hash = "sha256:3f314a178dbeb6777509548727dc69edf22d6d9a2867bf2d310ab85c403380b6"}, +] + +[package.dependencies] +pytest = "*" + [[package]] name = "requests" version = "2.31.0" @@ -778,4 +793,4 @@ multidict = ">=4.0" [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "4a1629eb643b5b68d47f6d1407942aa6d4a796c6d5a1b6a54bbc096b9d0efa2d" +content-hash = "65b68e43440faafbd2883edd6b10bc177ab334380e908c27c9f511703065f8e7" diff --git a/pyproject.toml b/pyproject.toml index 087ac844..faee61c2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,6 +15,7 @@ requests = "^2.31.0" openai = "^0.27.8" pydantic = "^1.10.9" pytest-dependency = "^0.5.1" +pytest-ordering = "^0.6" [build-system] From d1c5e0a91a7a0f23b0e8de5f394204e96ec668cd Mon Sep 17 00:00:00 2001 From: Silen Naihin Date: Sun, 25 Jun 2023 00:22:53 -0400 Subject: [PATCH 5/9] finally figured out right way to do dependencies --- agbenchmark/challenges/retrieval/Retrieval.py | 2 ++ .../challenges/retrieval/r1/r1_data.json | 4 ++-- .../challenges/retrieval/r1/r1_test.py | 6 ++++-- .../tests/basic_abilities/BasicChallenge.py | 1 + .../read_file/r_file_data.json | 4 +++- .../read_file/read_file_test.py | 6 ++---- .../write_file/write_file_test.py | 1 - .../tests/regression/regression_tests.txt | 4 ++-- poetry.lock | 19 ++++++++++++++++++- pyproject.toml | 3 ++- 10 files changed, 36 insertions(+), 14 deletions(-) diff --git a/agbenchmark/challenges/retrieval/Retrieval.py b/agbenchmark/challenges/retrieval/Retrieval.py index 9434d69c..b8aa81ce 100644 --- a/agbenchmark/challenges/retrieval/Retrieval.py +++ b/agbenchmark/challenges/retrieval/Retrieval.py @@ -1,6 +1,8 @@ from agbenchmark.Challenge import Challenge +import pytest +@pytest.mark.retrieval class RetrievalChallenge(Challenge): """Challenge for information-retrieval""" diff --git a/agbenchmark/challenges/retrieval/r1/r1_data.json b/agbenchmark/challenges/retrieval/r1/r1_data.json index fe05b6d5..562d1c36 100644 --- a/agbenchmark/challenges/retrieval/r1/r1_data.json +++ b/agbenchmark/challenges/retrieval/r1/r1_data.json @@ -1,14 +1,14 @@ { "category": ["basic"], - "dependencies": ["test_write_file"], "task": "What is the capital of America?", + "dependencies": [], "ground": { "answer": "Washington", "should_contain": ["Washington"], "should_not_contain": ["New York", "Los Angeles", "San Francisco"], "files": ["file_to_check.txt"] }, - "mock_func": "write_file_mock", + "mock_func": "basic_write_file_mock", "info": { "difficulty": "easy", "description": "Tests the writing to file", diff --git a/agbenchmark/challenges/retrieval/r1/r1_test.py b/agbenchmark/challenges/retrieval/r1/r1_test.py index d37c5e79..5e6d6abf 100644 --- a/agbenchmark/challenges/retrieval/r1/r1_test.py +++ b/agbenchmark/challenges/retrieval/r1/r1_test.py @@ -2,6 +2,8 @@ import pytest from agbenchmark.challenges.retrieval.Retrieval import RetrievalChallenge from agbenchmark.challenges.define_task_types import ChallengeData, Ground import os +from pytest_dependency import depends + data = ChallengeData.deserialize( os.path.join(os.path.dirname(__file__), "r1_data.json") @@ -16,8 +18,8 @@ class TestRetrieval1(RetrievalChallenge): [(data.task, data.mock_func)], indirect=True, ) - @pytest.mark.retrieval - def test_retrieval(self, workspace): + def test_retrieval(self, request, workspace): + depends(request, data.dependencies) file = self.open_file(workspace, data.ground.files[0]) score = self.scoring(file, data.ground) diff --git a/agbenchmark/tests/basic_abilities/BasicChallenge.py b/agbenchmark/tests/basic_abilities/BasicChallenge.py index 56320740..0cada86c 100644 --- a/agbenchmark/tests/basic_abilities/BasicChallenge.py +++ b/agbenchmark/tests/basic_abilities/BasicChallenge.py @@ -2,6 +2,7 @@ import pytest from agbenchmark.Challenge import Challenge +@pytest.mark.run(order=1) @pytest.mark.basic class BasicChallenge(Challenge): pass diff --git a/agbenchmark/tests/basic_abilities/read_file/r_file_data.json b/agbenchmark/tests/basic_abilities/read_file/r_file_data.json index 8c5ef62d..4d04f33e 100644 --- a/agbenchmark/tests/basic_abilities/read_file/r_file_data.json +++ b/agbenchmark/tests/basic_abilities/read_file/r_file_data.json @@ -1,7 +1,9 @@ { "category": ["basic"], "task": "Write the string 'random string' before any existing text to the file called file_to_check.txt", - "dependencies": ["test_write_file"], + "dependencies": [ + "agbenchmark/tests/basic_abilities/write_file/write_file_test.py::TestWriteFile::test_write_file" + ], "ground": { "answer": "random string: this is how we're doing", "should_contain": ["random string: this is how we're doing"], diff --git a/agbenchmark/tests/basic_abilities/read_file/read_file_test.py b/agbenchmark/tests/basic_abilities/read_file/read_file_test.py index 03b2d6ca..ad08da4e 100644 --- a/agbenchmark/tests/basic_abilities/read_file/read_file_test.py +++ b/agbenchmark/tests/basic_abilities/read_file/read_file_test.py @@ -3,7 +3,6 @@ from agbenchmark.challenges.define_task_types import ChallengeData from agbenchmark.Challenge import Challenge from agbenchmark.tests.basic_abilities.BasicChallenge import BasicChallenge import os -from pytest_dependency import depends data = ChallengeData.deserialize( os.path.join(os.path.dirname(__file__), "r_file_data.json") @@ -26,9 +25,8 @@ class TestReadFile(BasicChallenge): [(data.task, data.mock_func)], indirect=True, ) - def test_read_file(self, request, workspace): - depends(request, data.dependencies) - + @pytest.mark.order(after=data.dependencies) + def test_read_file(self, workspace): file = self.open_file(workspace, data.ground.files[0]) score = self.scoring(file, data.ground) diff --git a/agbenchmark/tests/basic_abilities/write_file/write_file_test.py b/agbenchmark/tests/basic_abilities/write_file/write_file_test.py index b09162e3..4c94320e 100644 --- a/agbenchmark/tests/basic_abilities/write_file/write_file_test.py +++ b/agbenchmark/tests/basic_abilities/write_file/write_file_test.py @@ -16,7 +16,6 @@ class TestWriteFile(BasicChallenge): [(data.task, data.mock_func)], indirect=True, ) - @pytest.mark.dependency() def test_write_file(self, workspace): file = self.open_file(workspace, data.ground.files[0]) diff --git a/agbenchmark/tests/regression/regression_tests.txt b/agbenchmark/tests/regression/regression_tests.txt index b831003f..df27f312 100644 --- a/agbenchmark/tests/regression/regression_tests.txt +++ b/agbenchmark/tests/regression/regression_tests.txt @@ -1,3 +1,3 @@ -agbenchmark/tests/basic_abilities/read_file/read_file_test.py::TestReadFile::test_retrieval[server_response0] -agbenchmark/tests/basic_abilities/write_file/write_file_test.py::TestWriteFile::test_retrieval[server_response0] agbenchmark/tests/basic_abilities/write_file/write_file_test.py::TestWriteFile::test_write_file[server_response0] +agbenchmark/challenges/retrieval/r1/r1_test.py::TestRetrieval1::test_retrieval[server_response0] +agbenchmark/tests/basic_abilities/read_file/read_file_test.py::TestReadFile::test_read_file[server_response0] diff --git a/poetry.lock b/poetry.lock index f6f24c5f..4764bf49 100644 --- a/poetry.lock +++ b/poetry.lock @@ -608,6 +608,23 @@ files = [ [package.dependencies] pytest = ">=3.6.0" +[[package]] +name = "pytest-order" +version = "1.1.0" +description = "pytest plugin to run your tests in a specific order" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pytest-order-1.1.0.tar.gz", hash = "sha256:139d25b30826b78eebb42722f747eab14c44b88059d7a71d4f79d14a057269a5"}, + {file = "pytest_order-1.1.0-py3-none-any.whl", hash = "sha256:3b3730969c97900fa5cd31ecff80847680ed56b2490954565c14949ba60d9371"}, +] + +[package.dependencies] +pytest = [ + {version = ">=5.0", markers = "python_version < \"3.10\""}, + {version = ">=6.2.4", markers = "python_version >= \"3.10\""}, +] + [[package]] name = "pytest-ordering" version = "0.6" @@ -793,4 +810,4 @@ multidict = ">=4.0" [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "65b68e43440faafbd2883edd6b10bc177ab334380e908c27c9f511703065f8e7" +content-hash = "64d22c864fe244497b7ebc81ead1be0b0570b14ee1ced323813d427672e17ff3" diff --git a/pyproject.toml b/pyproject.toml index faee61c2..fd2c5204 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,6 +16,7 @@ openai = "^0.27.8" pydantic = "^1.10.9" pytest-dependency = "^0.5.1" pytest-ordering = "^0.6" +pytest-order = "^1.1.0" [build-system] @@ -24,7 +25,7 @@ build-backend = "poetry.core.masonry.api" [tool.pytest.ini_options] minversion = "6.0" -addopts = "-ra -q" +addopts = "--order-dependencies" # -ra -q testpaths = [ "tests", "agbenchmark", ] From 31c11927199714516891db5aa3044eb1a4396eb4 Mon Sep 17 00:00:00 2001 From: Silen Naihin Date: Sun, 25 Jun 2023 08:48:16 -0400 Subject: [PATCH 6/9] other was non solution, solution is pytest-depends --- agbenchmark/challenges/README.md | 20 ++--- .../challenges/retrieval/r1/r1_test.py | 2 - .../tests/basic_abilities/BasicChallenge.py | 1 - .../read_file/r_file_data.json | 4 +- .../read_file/read_file_test.py | 2 +- .../write_file/write_file_test.py | 1 + .../tests/regression/regression_tests.txt | 2 +- poetry.lock | 80 ++++++++++--------- pyproject.toml | 6 +- 9 files changed, 59 insertions(+), 59 deletions(-) diff --git a/agbenchmark/challenges/README.md b/agbenchmark/challenges/README.md index d5229e93..e457b85c 100644 --- a/agbenchmark/challenges/README.md +++ b/agbenchmark/challenges/README.md @@ -6,7 +6,7 @@ Input: - **category** (str[]): Category of the challenge such as 'retrieval', 'comprehension', etc. _this is not currently used. for the future it may be needed_ - **task** (str): The task that the agent needs to solve. -- **dependencies** (str[]): The dependencies that the challenge needs to run. +- **dependencies** (str[]): The dependencies that the challenge needs to run. Needs to be the full node to the test function. - **ground** (dict): The ground truth. - **answer** (str): The raw text of the ground truth answer. - **should_contain** (list): The exact strings that are required in the final answer. @@ -23,18 +23,20 @@ Example: ```python { "category": ["basic"], - "task": "What is the capital of America?", + "task": "Write the string 'random string' before any existing text to the file called file_to_check.txt", + "dependencies": [ + "agbenchmark/tests/basic_abilities/write_file/write_file_test.py::TestWriteFile::test_write_file" + ], "ground": { - "answer": "Washington", - "should_contain": ["Washington"], - "should_not_contain": ["New York", "Los Angeles", "San Francisco"], + "answer": "random string: this is how we're doing", + "should_contain": ["random string: this is how we're doing"], "files": ["file_to_check.txt"] }, - "mock_func": "write_file_mock", + "mock_func": "basic_read_file_mock", "info": { - "difficulty": "easy", - "description": "Tests the writing to file", - "side_effects": ["tests if there is in fact an LLM attached"] + "description": "This reads the file quickly", + "difficulty": "basic", + "side_effects": [""] } } diff --git a/agbenchmark/challenges/retrieval/r1/r1_test.py b/agbenchmark/challenges/retrieval/r1/r1_test.py index 5e6d6abf..45becaf7 100644 --- a/agbenchmark/challenges/retrieval/r1/r1_test.py +++ b/agbenchmark/challenges/retrieval/r1/r1_test.py @@ -2,7 +2,6 @@ import pytest from agbenchmark.challenges.retrieval.Retrieval import RetrievalChallenge from agbenchmark.challenges.define_task_types import ChallengeData, Ground import os -from pytest_dependency import depends data = ChallengeData.deserialize( @@ -19,7 +18,6 @@ class TestRetrieval1(RetrievalChallenge): indirect=True, ) def test_retrieval(self, request, workspace): - depends(request, data.dependencies) file = self.open_file(workspace, data.ground.files[0]) score = self.scoring(file, data.ground) diff --git a/agbenchmark/tests/basic_abilities/BasicChallenge.py b/agbenchmark/tests/basic_abilities/BasicChallenge.py index 0cada86c..56320740 100644 --- a/agbenchmark/tests/basic_abilities/BasicChallenge.py +++ b/agbenchmark/tests/basic_abilities/BasicChallenge.py @@ -2,7 +2,6 @@ import pytest from agbenchmark.Challenge import Challenge -@pytest.mark.run(order=1) @pytest.mark.basic class BasicChallenge(Challenge): pass diff --git a/agbenchmark/tests/basic_abilities/read_file/r_file_data.json b/agbenchmark/tests/basic_abilities/read_file/r_file_data.json index 4d04f33e..8c5ef62d 100644 --- a/agbenchmark/tests/basic_abilities/read_file/r_file_data.json +++ b/agbenchmark/tests/basic_abilities/read_file/r_file_data.json @@ -1,9 +1,7 @@ { "category": ["basic"], "task": "Write the string 'random string' before any existing text to the file called file_to_check.txt", - "dependencies": [ - "agbenchmark/tests/basic_abilities/write_file/write_file_test.py::TestWriteFile::test_write_file" - ], + "dependencies": ["test_write_file"], "ground": { "answer": "random string: this is how we're doing", "should_contain": ["random string: this is how we're doing"], diff --git a/agbenchmark/tests/basic_abilities/read_file/read_file_test.py b/agbenchmark/tests/basic_abilities/read_file/read_file_test.py index ad08da4e..494a9b07 100644 --- a/agbenchmark/tests/basic_abilities/read_file/read_file_test.py +++ b/agbenchmark/tests/basic_abilities/read_file/read_file_test.py @@ -25,7 +25,7 @@ class TestReadFile(BasicChallenge): [(data.task, data.mock_func)], indirect=True, ) - @pytest.mark.order(after=data.dependencies) + @pytest.mark.depends(on=data.dependencies) def test_read_file(self, workspace): file = self.open_file(workspace, data.ground.files[0]) diff --git a/agbenchmark/tests/basic_abilities/write_file/write_file_test.py b/agbenchmark/tests/basic_abilities/write_file/write_file_test.py index 4c94320e..0a4ef4a2 100644 --- a/agbenchmark/tests/basic_abilities/write_file/write_file_test.py +++ b/agbenchmark/tests/basic_abilities/write_file/write_file_test.py @@ -16,6 +16,7 @@ class TestWriteFile(BasicChallenge): [(data.task, data.mock_func)], indirect=True, ) + @pytest.mark.depends(name="test_write_file") def test_write_file(self, workspace): file = self.open_file(workspace, data.ground.files[0]) diff --git a/agbenchmark/tests/regression/regression_tests.txt b/agbenchmark/tests/regression/regression_tests.txt index df27f312..57b94cd7 100644 --- a/agbenchmark/tests/regression/regression_tests.txt +++ b/agbenchmark/tests/regression/regression_tests.txt @@ -1,3 +1,3 @@ -agbenchmark/tests/basic_abilities/write_file/write_file_test.py::TestWriteFile::test_write_file[server_response0] agbenchmark/challenges/retrieval/r1/r1_test.py::TestRetrieval1::test_retrieval[server_response0] +agbenchmark/tests/basic_abilities/write_file/write_file_test.py::TestWriteFile::test_write_file[server_response0] agbenchmark/tests/basic_abilities/read_file/read_file_test.py::TestReadFile::test_read_file[server_response0] diff --git a/poetry.lock b/poetry.lock index 4764bf49..d7939fbf 100644 --- a/poetry.lock +++ b/poetry.lock @@ -368,6 +368,20 @@ files = [ {file = "frozenlist-1.3.3.tar.gz", hash = "sha256:58bcc55721e8a90b88332d6cd441261ebb22342e238296bb330968952fbb3a6a"}, ] +[[package]] +name = "future-fstrings" +version = "1.2.0" +description = "A backport of fstrings to python<3.6" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "future_fstrings-1.2.0-py2.py3-none-any.whl", hash = "sha256:90e49598b553d8746c4dc7d9442e0359d038c3039d802c91c0a55505da318c63"}, + {file = "future_fstrings-1.2.0.tar.gz", hash = "sha256:6cf41cbe97c398ab5a81168ce0dbb8ad95862d3caf23c21e4430627b90844089"}, +] + +[package.extras] +rewrite = ["tokenize-rt (>=3)"] + [[package]] name = "idna" version = "3.4" @@ -473,6 +487,24 @@ files = [ {file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"}, ] +[[package]] +name = "networkx" +version = "3.1" +description = "Python package for creating and manipulating graphs and networks" +optional = false +python-versions = ">=3.8" +files = [ + {file = "networkx-3.1-py3-none-any.whl", hash = "sha256:4f33f68cb2afcf86f28a45f43efc27a9386b535d567d2127f8f61d51dec58d36"}, + {file = "networkx-3.1.tar.gz", hash = "sha256:de346335408f84de0eada6ff9fafafff9bcda11f0a0dfaa931133debb146ab61"}, +] + +[package.extras] +default = ["matplotlib (>=3.4)", "numpy (>=1.20)", "pandas (>=1.3)", "scipy (>=1.8)"] +developer = ["mypy (>=1.1)", "pre-commit (>=3.2)"] +doc = ["nb2plots (>=0.6)", "numpydoc (>=1.5)", "pillow (>=9.4)", "pydata-sphinx-theme (>=0.13)", "sphinx (>=6.1)", "sphinx-gallery (>=0.12)", "texext (>=0.6.7)"] +extra = ["lxml (>=4.6)", "pydot (>=1.4.2)", "pygraphviz (>=1.10)", "sympy (>=1.10)"] +test = ["codecov (>=2.1)", "pytest (>=7.2)", "pytest-cov (>=4.0)"] + [[package]] name = "openai" version = "0.27.8" @@ -596,49 +628,21 @@ tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] [[package]] -name = "pytest-dependency" -version = "0.5.1" -description = "Manage dependencies of tests" +name = "pytest-depends" +version = "1.0.1" +description = "Tests that depend on other tests" optional = false python-versions = "*" files = [ - {file = "pytest-dependency-0.5.1.tar.gz", hash = "sha256:c2a892906192663f85030a6ab91304e508e546cddfe557d692d61ec57a1d946b"}, + {file = "pytest-depends-1.0.1.tar.gz", hash = "sha256:90a28e2b87b75b18abd128c94015248544acac20e4392e9921e5a86f93319dfe"}, + {file = "pytest_depends-1.0.1-py3-none-any.whl", hash = "sha256:a1df072bcc93d77aca3f0946903f5fed8af2d9b0056db1dfc9ed5ac164ab0642"}, ] [package.dependencies] -pytest = ">=3.6.0" - -[[package]] -name = "pytest-order" -version = "1.1.0" -description = "pytest plugin to run your tests in a specific order" -optional = false -python-versions = ">=3.6" -files = [ - {file = "pytest-order-1.1.0.tar.gz", hash = "sha256:139d25b30826b78eebb42722f747eab14c44b88059d7a71d4f79d14a057269a5"}, - {file = "pytest_order-1.1.0-py3-none-any.whl", hash = "sha256:3b3730969c97900fa5cd31ecff80847680ed56b2490954565c14949ba60d9371"}, -] - -[package.dependencies] -pytest = [ - {version = ">=5.0", markers = "python_version < \"3.10\""}, - {version = ">=6.2.4", markers = "python_version >= \"3.10\""}, -] - -[[package]] -name = "pytest-ordering" -version = "0.6" -description = "pytest plugin to run your tests in a specific order" -optional = false -python-versions = "*" -files = [ - {file = "pytest-ordering-0.6.tar.gz", hash = "sha256:561ad653626bb171da78e682f6d39ac33bb13b3e272d406cd555adb6b006bda6"}, - {file = "pytest_ordering-0.6-py2-none-any.whl", hash = "sha256:27fba3fc265f5d0f8597e7557885662c1bdc1969497cd58aff6ed21c3b617de2"}, - {file = "pytest_ordering-0.6-py3-none-any.whl", hash = "sha256:3f314a178dbeb6777509548727dc69edf22d6d9a2867bf2d310ab85c403380b6"}, -] - -[package.dependencies] -pytest = "*" +colorama = "*" +future-fstrings = "*" +networkx = "*" +pytest = ">=3" [[package]] name = "requests" @@ -810,4 +814,4 @@ multidict = ">=4.0" [metadata] lock-version = "2.0" python-versions = "^3.9" -content-hash = "64d22c864fe244497b7ebc81ead1be0b0570b14ee1ced323813d427672e17ff3" +content-hash = "a03dfa9938e062bdf564b7678df9dc9277c7c8e504f14f98084c5a2d497a8f7c" diff --git a/pyproject.toml b/pyproject.toml index fd2c5204..0a4f8ba7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,9 +14,7 @@ click = "^8.1.3" requests = "^2.31.0" openai = "^0.27.8" pydantic = "^1.10.9" -pytest-dependency = "^0.5.1" -pytest-ordering = "^0.6" -pytest-order = "^1.1.0" +pytest-depends = "^1.0.1" [build-system] @@ -25,7 +23,7 @@ build-backend = "poetry.core.masonry.api" [tool.pytest.ini_options] minversion = "6.0" -addopts = "--order-dependencies" # -ra -q +addopts = "-ra -q" testpaths = [ "tests", "agbenchmark", ] From adc6b225a6063bc2b0981f1156f25bde9279040e Mon Sep 17 00:00:00 2001 From: Silen Naihin Date: Sun, 25 Jun 2023 11:12:33 -0400 Subject: [PATCH 7/9] update regression tests info --- .../challenges/retrieval/r1/r1_test.py | 7 +++- agbenchmark/conftest.py | 36 +++++++++++++------ .../read_file/read_file_test.py | 5 +++ .../write_file/w_file_data.json | 2 +- .../write_file/write_file_test.py | 5 +++ .../tests/regression/RegressionManager.py | 25 ++++++++----- .../tests/regression/regression_tests.json | 1 + .../tests/regression/regression_tests.txt | 17 +++++++-- 8 files changed, 73 insertions(+), 25 deletions(-) create mode 100644 agbenchmark/tests/regression/regression_tests.json diff --git a/agbenchmark/challenges/retrieval/r1/r1_test.py b/agbenchmark/challenges/retrieval/r1/r1_test.py index 45becaf7..489d298f 100644 --- a/agbenchmark/challenges/retrieval/r1/r1_test.py +++ b/agbenchmark/challenges/retrieval/r1/r1_test.py @@ -17,7 +17,12 @@ class TestRetrieval1(RetrievalChallenge): [(data.task, data.mock_func)], indirect=True, ) - def test_retrieval(self, request, workspace): + @pytest.mark.parametrize( + "regression_data", + [data], + indirect=True, + ) + def test_retrieval(self, workspace, current_challenge_data): file = self.open_file(workspace, data.ground.files[0]) score = self.scoring(file, data.ground) diff --git a/agbenchmark/conftest.py b/agbenchmark/conftest.py index 434f6dbd..78114c20 100644 --- a/agbenchmark/conftest.py +++ b/agbenchmark/conftest.py @@ -6,6 +6,7 @@ from agbenchmark.tests.regression.RegressionManager import RegressionManager import requests from requests.exceptions import RequestException from agbenchmark.mocks.MockManager import MockManager +from agbenchmark.challenges.define_task_types import ChallengeData @pytest.fixture(scope="module") @@ -64,21 +65,34 @@ def server_response(request, config): # print(f"Request succeeded with status code {response.status_code}") -regression_txt = "agbenchmark/tests/regression/regression_tests.txt" +regression_json = "agbenchmark/tests/regression/regression_tests.json" -regression_manager = RegressionManager(regression_txt) +regression_manager = RegressionManager(regression_json) + + +# this is to get the challenge_data from every test +@pytest.fixture(autouse=True) +def regression_data(request): + return request.param def pytest_runtest_makereport(item, call): - """Called for each test report. Generated for each stage - of a test run (setup, call, teardown).""" if call.when == "call": - if ( - call.excinfo is None - ): # if no error in the call stage, add it as a regression test - regression_manager.add_test(item.nodeid) - else: # otherwise, :( - regression_manager.remove_test(item.nodeid) + challenge_data = item.funcargs.get("regression_data", None) + difficulty = challenge_data.info.difficulty if challenge_data else "unknown" + dependencies = challenge_data.dependencies if challenge_data else [] + + test_details = { + "difficulty": difficulty, + "dependencies": dependencies, + "test": item.nodeid, + } + + print("pytest_runtest_makereport", test_details) + if call.excinfo is None: + regression_manager.add_test(item.nodeid.split("::")[1], test_details) + else: + regression_manager.remove_test(item.nodeid.split("::")[1]) def pytest_collection_modifyitems(items): @@ -86,7 +100,7 @@ def pytest_collection_modifyitems(items): to add regression marker to collected test items.""" for item in items: print("pytest_collection_modifyitems", item.nodeid) - if item.nodeid + "\n" in regression_manager.tests: + if item.nodeid.split("::")[1] in regression_manager.tests: print(regression_manager.tests) item.add_marker(pytest.mark.regression) diff --git a/agbenchmark/tests/basic_abilities/read_file/read_file_test.py b/agbenchmark/tests/basic_abilities/read_file/read_file_test.py index 494a9b07..7d14228c 100644 --- a/agbenchmark/tests/basic_abilities/read_file/read_file_test.py +++ b/agbenchmark/tests/basic_abilities/read_file/read_file_test.py @@ -25,6 +25,11 @@ class TestReadFile(BasicChallenge): [(data.task, data.mock_func)], indirect=True, ) + @pytest.mark.parametrize( + "regression_data", + [data], + indirect=True, + ) @pytest.mark.depends(on=data.dependencies) def test_read_file(self, workspace): file = self.open_file(workspace, data.ground.files[0]) diff --git a/agbenchmark/tests/basic_abilities/write_file/w_file_data.json b/agbenchmark/tests/basic_abilities/write_file/w_file_data.json index 562d1c36..1d262108 100644 --- a/agbenchmark/tests/basic_abilities/write_file/w_file_data.json +++ b/agbenchmark/tests/basic_abilities/write_file/w_file_data.json @@ -10,7 +10,7 @@ }, "mock_func": "basic_write_file_mock", "info": { - "difficulty": "easy", + "difficulty": "basic", "description": "Tests the writing to file", "side_effects": ["tests if there is in fact an LLM attached"] } diff --git a/agbenchmark/tests/basic_abilities/write_file/write_file_test.py b/agbenchmark/tests/basic_abilities/write_file/write_file_test.py index 0a4ef4a2..33012889 100644 --- a/agbenchmark/tests/basic_abilities/write_file/write_file_test.py +++ b/agbenchmark/tests/basic_abilities/write_file/write_file_test.py @@ -16,6 +16,11 @@ class TestWriteFile(BasicChallenge): [(data.task, data.mock_func)], indirect=True, ) + @pytest.mark.parametrize( + "regression_data", + [data], + indirect=True, + ) @pytest.mark.depends(name="test_write_file") def test_write_file(self, workspace): file = self.open_file(workspace, data.ground.files[0]) diff --git a/agbenchmark/tests/regression/RegressionManager.py b/agbenchmark/tests/regression/RegressionManager.py index 9117d53f..a1379eca 100644 --- a/agbenchmark/tests/regression/RegressionManager.py +++ b/agbenchmark/tests/regression/RegressionManager.py @@ -1,3 +1,6 @@ +import json + + class RegressionManager: """Abstracts interaction with the regression tests file""" @@ -6,17 +9,21 @@ class RegressionManager: self.load() def load(self) -> None: - with open(self.filename, "r") as f: - self.tests = f.readlines() + try: + with open(self.filename, "r") as f: + self.tests = json.load(f) + except (FileNotFoundError, json.decoder.JSONDecodeError): + self.tests = {} def save(self) -> None: with open(self.filename, "w") as f: - f.writelines(self.tests) + json.dump(self.tests, f, indent=4) - def add_test(self, test_id) -> None: - if f"{test_id}\n" not in self.tests: - self.tests.append(f"{test_id}\n") + def add_test(self, test_name: str, test_details: dict) -> None: + self.tests[test_name] = test_details + self.save() - def remove_test(self, test_id) -> None: - if f"{test_id}\n" in self.tests: - self.tests.remove(f"{test_id}\n") + def remove_test(self, test_name: str) -> None: + if test_name in self.tests: + del self.tests[test_name] + self.save() diff --git a/agbenchmark/tests/regression/regression_tests.json b/agbenchmark/tests/regression/regression_tests.json new file mode 100644 index 00000000..9e26dfee --- /dev/null +++ b/agbenchmark/tests/regression/regression_tests.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/agbenchmark/tests/regression/regression_tests.txt b/agbenchmark/tests/regression/regression_tests.txt index 57b94cd7..8af722f0 100644 --- a/agbenchmark/tests/regression/regression_tests.txt +++ b/agbenchmark/tests/regression/regression_tests.txt @@ -1,3 +1,14 @@ -agbenchmark/challenges/retrieval/r1/r1_test.py::TestRetrieval1::test_retrieval[server_response0] -agbenchmark/tests/basic_abilities/write_file/write_file_test.py::TestWriteFile::test_write_file[server_response0] -agbenchmark/tests/basic_abilities/read_file/read_file_test.py::TestReadFile::test_read_file[server_response0] +{ + "agbenchmark/tests/basic_abilities/write_file/write_file_test.py": { + "difficulty": "easy", + "dependencies": [], + "test": "agbenchmark/tests/basic_abilities/write_file/write_file_test.py::TestWriteFile::test_write_file[regression_data0-server_response0]" + }, + "agbenchmark/tests/basic_abilities/read_file/read_file_test.py": { + "difficulty": "basic", + "dependencies": [ + "test_write_file" + ], + "test": "agbenchmark/tests/basic_abilities/read_file/read_file_test.py::TestReadFile::test_read_file[regression_data0-server_response0]" + } +} \ No newline at end of file From 7604ae07bb6d79cfe8e5a28fdf3fa85c83603b1b Mon Sep 17 00:00:00 2001 From: Silen Naihin Date: Sun, 25 Jun 2023 19:30:04 -0400 Subject: [PATCH 8/9] can now put file extensions or names in files data --- agbenchmark/Challenge.py | 22 ++++++++++++++++++- .../challenges/retrieval/r1/r1_test.py | 12 +++++----- .../read_file/read_file_test.py | 12 +++++----- .../write_file/w_file_data.json | 2 +- .../write_file/write_file_test.py | 12 +++++----- .../tests/regression/regression_tests.json | 15 ++++++++++++- 6 files changed, 57 insertions(+), 18 deletions(-) diff --git a/agbenchmark/Challenge.py b/agbenchmark/Challenge.py index 9828a0e9..d159296b 100644 --- a/agbenchmark/Challenge.py +++ b/agbenchmark/Challenge.py @@ -1,5 +1,5 @@ import os -from typing import Optional +import glob from agbenchmark.challenges.define_task_types import Ground @@ -14,6 +14,26 @@ class Challenge: with open(workspace_dir, "r") as f: return f.read() + @staticmethod + def open_files(workspace: str, file_patterns: list): + script_dir = os.path.abspath(workspace) + files_contents = [] + + for file_pattern in file_patterns: + # Check if it is a file extension + if file_pattern.startswith("."): + # Find all files with the given extension in the workspace + matching_files = glob.glob(os.path.join(script_dir, "*" + file_pattern)) + else: + # Otherwise, it is a specific file + matching_files = [os.path.join(script_dir, file_pattern)] + + for file_path in matching_files: + with open(file_path, "r") as f: + files_contents.append(f.read()) + + return files_contents + @staticmethod def write_to_file(workspace: str, filename: str, content: str): script_dir = os.path.abspath(workspace) diff --git a/agbenchmark/challenges/retrieval/r1/r1_test.py b/agbenchmark/challenges/retrieval/r1/r1_test.py index 489d298f..2a7d92a7 100644 --- a/agbenchmark/challenges/retrieval/r1/r1_test.py +++ b/agbenchmark/challenges/retrieval/r1/r1_test.py @@ -23,10 +23,12 @@ class TestRetrieval1(RetrievalChallenge): indirect=True, ) def test_retrieval(self, workspace, current_challenge_data): - file = self.open_file(workspace, data.ground.files[0]) + files_contents = self.open_files(workspace, data.ground.files) - score = self.scoring(file, data.ground) + scores = [] + for file_content in files_contents: + score = self.scoring(file_content, data.ground) + print("Your score is:", score) + scores.append(score) - print("You score is:", score) - - assert score + assert 1 in scores diff --git a/agbenchmark/tests/basic_abilities/read_file/read_file_test.py b/agbenchmark/tests/basic_abilities/read_file/read_file_test.py index 7d14228c..90946670 100644 --- a/agbenchmark/tests/basic_abilities/read_file/read_file_test.py +++ b/agbenchmark/tests/basic_abilities/read_file/read_file_test.py @@ -32,10 +32,12 @@ class TestReadFile(BasicChallenge): ) @pytest.mark.depends(on=data.dependencies) def test_read_file(self, workspace): - file = self.open_file(workspace, data.ground.files[0]) + files_contents = self.open_files(workspace, data.ground.files) - score = self.scoring(file, data.ground) + scores = [] + for file_content in files_contents: + score = self.scoring(file_content, data.ground) + print("Your score is:", score) + scores.append(score) - print("You score is:", score) - - assert score + assert 1 in scores diff --git a/agbenchmark/tests/basic_abilities/write_file/w_file_data.json b/agbenchmark/tests/basic_abilities/write_file/w_file_data.json index 1d262108..037c5bd8 100644 --- a/agbenchmark/tests/basic_abilities/write_file/w_file_data.json +++ b/agbenchmark/tests/basic_abilities/write_file/w_file_data.json @@ -6,7 +6,7 @@ "answer": "Washington", "should_contain": ["Washington"], "should_not_contain": ["New York", "Los Angeles", "San Francisco"], - "files": ["file_to_check.txt"] + "files": [".txt"] }, "mock_func": "basic_write_file_mock", "info": { diff --git a/agbenchmark/tests/basic_abilities/write_file/write_file_test.py b/agbenchmark/tests/basic_abilities/write_file/write_file_test.py index 33012889..187378ff 100644 --- a/agbenchmark/tests/basic_abilities/write_file/write_file_test.py +++ b/agbenchmark/tests/basic_abilities/write_file/write_file_test.py @@ -23,10 +23,12 @@ class TestWriteFile(BasicChallenge): ) @pytest.mark.depends(name="test_write_file") def test_write_file(self, workspace): - file = self.open_file(workspace, data.ground.files[0]) + files_contents = self.open_files(workspace, data.ground.files) - score = self.scoring(file, data.ground) + scores = [] + for file_content in files_contents: + score = self.scoring(file_content, data.ground) + print("Your score is:", score) + scores.append(score) - print("You score is:", score) - - assert score + assert 1 in scores diff --git a/agbenchmark/tests/regression/regression_tests.json b/agbenchmark/tests/regression/regression_tests.json index 9e26dfee..c84fc9c9 100644 --- a/agbenchmark/tests/regression/regression_tests.json +++ b/agbenchmark/tests/regression/regression_tests.json @@ -1 +1,14 @@ -{} \ No newline at end of file +{ + "TestWriteFile": { + "difficulty": "basic", + "dependencies": [], + "test": "agbenchmark/tests/basic_abilities/write_file/write_file_test.py::TestWriteFile::test_write_file[regression_data0-server_response0]" + }, + "TestReadFile": { + "difficulty": "basic", + "dependencies": [ + "test_write_file" + ], + "test": "agbenchmark/tests/basic_abilities/read_file/read_file_test.py::TestReadFile::test_read_file[regression_data0-server_response0]" + } +} \ No newline at end of file From 4be22ae5abc884404370196bf71da86affe82131 Mon Sep 17 00:00:00 2001 From: Silen Naihin Date: Mon, 26 Jun 2023 09:27:20 -0400 Subject: [PATCH 9/9] mini agi attempt --- agbenchmark/conftest.py | 44 +++++++++++-------- .../tests/regression/regression_tests.json | 15 +------ agent/agbenchmark_run.py | 27 ++++++++++++ agent/mini-agi | 1 + 4 files changed, 55 insertions(+), 32 deletions(-) create mode 100644 agent/agbenchmark_run.py create mode 160000 agent/mini-agi diff --git a/agbenchmark/conftest.py b/agbenchmark/conftest.py index 78114c20..b3b69f19 100644 --- a/agbenchmark/conftest.py +++ b/agbenchmark/conftest.py @@ -7,6 +7,7 @@ import requests from requests.exceptions import RequestException from agbenchmark.mocks.MockManager import MockManager from agbenchmark.challenges.define_task_types import ChallengeData +import subprocess @pytest.fixture(scope="module") @@ -42,27 +43,34 @@ def server_response(request, config): else: task = request.param mock_function_name = None - # print(f"Server starting at {request.module}") - # try: - # response = requests.post( - # f"{config['hostname']}:{config['port']}", data={"task": task} - # ) - # response.raise_for_status() # This will raise an HTTPError if the status is 4xx or 5xx - # except RequestException: - # # If an exception occurs (could be connection, timeout, or HTTP errors), we use the mock - if mock_function_name: - mock_manager = MockManager( - task - ) # workspace doesn't need to be passed in, stays the same - print("Server unavailable, using mock", mock_function_name) - mock_manager.delegate(mock_function_name) - else: - print("No mock provided") + # get the current file's directory + current_dir = os.path.dirname(os.path.abspath(__file__)) + # construct the script's path + script_path = os.path.join(current_dir, "..", "agent", "agbenchmark_run.py") + + # form the command + command = ["python", script_path, task] + + # if mock_function_name: + # mock_manager = MockManager( + # task + # ) # workspace doesn't need to be passed in, stays the same + # print("Server unavailable, using mock", mock_function_name) + # mock_manager.delegate(mock_function_name) # else: - # # This code is run if no exception occurred - # print(f"Request succeeded with status code {response.status_code}") + # print("No mock provided") + + try: + # run the command and wait for it to complete + result = subprocess.run( + command, shell=True, check=True, text=True, capture_output=True + ) + return result + except subprocess.CalledProcessError as e: + print(f"Subprocess failed with the following error:\n{e}") + # If the subprocess returns a non-zero exit status regression_json = "agbenchmark/tests/regression/regression_tests.json" diff --git a/agbenchmark/tests/regression/regression_tests.json b/agbenchmark/tests/regression/regression_tests.json index c84fc9c9..9e26dfee 100644 --- a/agbenchmark/tests/regression/regression_tests.json +++ b/agbenchmark/tests/regression/regression_tests.json @@ -1,14 +1 @@ -{ - "TestWriteFile": { - "difficulty": "basic", - "dependencies": [], - "test": "agbenchmark/tests/basic_abilities/write_file/write_file_test.py::TestWriteFile::test_write_file[regression_data0-server_response0]" - }, - "TestReadFile": { - "difficulty": "basic", - "dependencies": [ - "test_write_file" - ], - "test": "agbenchmark/tests/basic_abilities/read_file/read_file_test.py::TestReadFile::test_read_file[regression_data0-server_response0]" - } -} \ No newline at end of file +{} \ No newline at end of file diff --git a/agent/agbenchmark_run.py b/agent/agbenchmark_run.py new file mode 100644 index 00000000..f509f5e6 --- /dev/null +++ b/agent/agbenchmark_run.py @@ -0,0 +1,27 @@ +import argparse +import subprocess +import os + + +def main(objective): + # get the current directory + current_dir = os.path.dirname(os.path.abspath(__file__)) + + # form the command + command = ( + f"python {os.path.join(current_dir, 'mini-agi', 'miniagi.py')} {objective}" + ) + + # run the command + subprocess.run(command, shell=True) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Run miniagi.py with an objective.") + parser.add_argument( + "objective", type=str, help="The objective to pass to miniagi.py" + ) + + args = parser.parse_args() + + main(args.objective) diff --git a/agent/mini-agi b/agent/mini-agi new file mode 160000 index 00000000..d2add8f1 --- /dev/null +++ b/agent/mini-agi @@ -0,0 +1 @@ +Subproject commit d2add8f18caf96934a2d193583720cfc9b89451b