mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-17 22:14:28 +01:00
refactor(benchmark): Interface & type consoledation, and arch change, to allow adding challenge providers
Squashed commit of the following: commit 7d6476d3297860f74c276d571da995d958a8cc1a Author: Reinier van der Leer <pwuts@agpt.co> Date: Tue Jan 9 18:10:45 2024 +0100 refactor(benchmark/challenge): Set up structure to support more challenge providers - Move `Challenge`, `ChallengeData`, `load_challenges` to `challenges/builtin.py` and rename to `BuiltinChallenge`, `BuiltinChallengeSpec`, `load_builtin_challenges` - Create `BaseChallenge` to serve as interface and base class for different challenge implementations - Create `ChallengeInfo` model to serve as universal challenge info object - Create `get_challenge_from_source_uri` function in `challenges/__init__.py` - Replace `ChallengeData` by `ChallengeInfo` everywhere except in `BuiltinChallenge` - Add strong typing to `task_informations` store in app.py - Use `call.duration` in `finalize_test_report` and remove `timer` fixture - Update docstring on `challenges/__init__.py:get_unique_categories` - Add docstring to `generate_test.py` commit 5df2aa7939b45d85a2c2b5de9ac0522330d1502a Author: Reinier van der Leer <pwuts@agpt.co> Date: Tue Jan 9 16:58:01 2024 +0100 refactor(benchmark): Refactor & rename functions in agent_interface.py and agent_api_interface.py - `copy_artifacts_into_temp_folder` -> `copy_challenge_artifacts_into_workspace` - `copy_agent_artifacts_into_folder` -> `download_agent_artifacts_into_folder` - Reorder parameters of `run_api_agent`, `copy_challenge_artifacts_into_workspace`; use `Path` instead of `str` commit 6a256fef4c7950b7ee82fb801e70c83afe6b6f8b Author: Reinier van der Leer <pwuts@agpt.co> Date: Tue Jan 9 16:02:25 2024 +0100 refactor(benchmark): Refactor & typefix report generation and handling logic - Rename functions in reports.py and ReportManager.py to better reflect what they do - `get_previous_test_results` -> `get_and_update_success_history` - `generate_single_call_report` -> `initialize_test_report` - `finalize_reports` -> `finalize_test_report` - `ReportManager.end_info_report` -> `SessionReportManager.finalize_session_report` - Modify `pytest_runtest_makereport` hook in conftest.py to finalize the report immediately after the challenge finishes running instead of after teardown - Move result processing logic from `initialize_test_report` to `finalize_test_report` in reports.py - Use `Test` and `Report` types from report_types.py where possible instead of untyped dicts: reports.py, utils.py, ReportManager.py - Differentiate `ReportManager` into `SessionReportManager`, `RegressionTestsTracker`, `SuccessRateTracker` - Move filtering of optional challenge categories from challenge.py (`Challenge.skip_optional_categories`) to conftest.py (`pytest_collection_modifyitems`) - Remove unused `scores` fixture in conftest.py commit 370d6dbf5df75d78e3878877968e8cd309d6d7fb Author: Reinier van der Leer <pwuts@agpt.co> Date: Tue Jan 9 15:16:43 2024 +0100 refactor(benchmark): Simplify models in report_types.py - Removed ForbidOptionalMeta and BaseModelBenchmark classes. - Changed model attributes to optional: `Metrics.difficulty`, `Metrics.success`, `Metrics.success_percentage`, `Metrics.run_time`, and `Test.reached_cutoff`. - Added validator to `Metrics` model to require `success` and `run_time` fields if `attempted=True`. - Added default values to all optional model fields. - Removed duplicate imports. - Added condition in process_report.py to prevent null lookups if `metrics.difficulty` is not set.
This commit is contained in:
@@ -1,75 +1,24 @@
|
||||
import glob
|
||||
"""
|
||||
AGBenchmark's test discovery endpoint for Pytest.
|
||||
|
||||
This module is picked up by Pytest's *_test.py file matching pattern, and all challenge
|
||||
classes in the module that conform to the `Test*` pattern are collected.
|
||||
"""
|
||||
|
||||
import importlib
|
||||
import logging
|
||||
import os
|
||||
from collections import deque
|
||||
from pathlib import Path
|
||||
|
||||
from agbenchmark.utils.challenge import Challenge
|
||||
from agbenchmark.utils.data_types import ChallengeData
|
||||
|
||||
DATA_CATEGORY = {}
|
||||
from agbenchmark.challenges.builtin import load_builtin_challenges
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
DATA_CATEGORY = {}
|
||||
|
||||
def create_challenge_from_spec_file(spec_file: Path) -> type[Challenge]:
|
||||
challenge = Challenge.from_challenge_spec(spec_file)
|
||||
DATA_CATEGORY[challenge.data.name] = challenge.data.category[0].value
|
||||
return challenge
|
||||
|
||||
|
||||
def create_challenge_from_spec_file_path(spec_file_path: str) -> type[Challenge]:
|
||||
spec_file = Path(spec_file_path).resolve()
|
||||
return create_challenge_from_spec_file(spec_file)
|
||||
|
||||
|
||||
def load_challenges() -> None:
|
||||
logger.info("Loading challenges...")
|
||||
|
||||
challenges_path = os.path.join(os.path.dirname(__file__), "challenges")
|
||||
logger.debug(f"Looking for challenges in {challenges_path}...")
|
||||
|
||||
json_files = deque(
|
||||
glob.glob(
|
||||
f"{challenges_path}/**/data.json",
|
||||
recursive=True,
|
||||
)
|
||||
)
|
||||
|
||||
logger.debug(f"Found {len(json_files)} challenges.")
|
||||
logger.debug(f"Sample path: {json_files[0]}")
|
||||
|
||||
loaded, ignored = 0, 0
|
||||
while json_files:
|
||||
# Take and remove the first element from json_files
|
||||
json_file = json_files.popleft()
|
||||
if challenge_should_be_ignored(json_file):
|
||||
ignored += 1
|
||||
continue
|
||||
|
||||
challenge_info = ChallengeData.parse_file(json_file)
|
||||
|
||||
challenge_class = create_challenge_from_spec_file_path(json_file)
|
||||
|
||||
logger.debug(f"Generated test for {challenge_info.name}")
|
||||
_add_challenge_to_module(challenge_class)
|
||||
loaded += 1
|
||||
|
||||
logger.info(f"Loading challenges complete: loaded {loaded}, ignored {ignored}.")
|
||||
|
||||
|
||||
def challenge_should_be_ignored(json_file_path: str):
|
||||
return (
|
||||
"challenges/deprecated" in json_file_path
|
||||
or "challenges/library" in json_file_path
|
||||
)
|
||||
|
||||
|
||||
def _add_challenge_to_module(challenge: type[Challenge]):
|
||||
# Load challenges and attach them to this module
|
||||
for challenge in load_builtin_challenges():
|
||||
# Attach the Challenge class to this module so it can be discovered by pytest
|
||||
module = importlib.import_module(__name__)
|
||||
setattr(module, f"{challenge.__name__}", challenge)
|
||||
setattr(module, challenge.__name__, challenge)
|
||||
|
||||
|
||||
load_challenges()
|
||||
# Build a map of challenge names and their primary category
|
||||
DATA_CATEGORY[challenge.info.name] = challenge.info.category[0].value
|
||||
|
||||
Reference in New Issue
Block a user