mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-18 06:24:20 +01:00
Squashed commit of the following: commit 7d6476d3297860f74c276d571da995d958a8cc1a Author: Reinier van der Leer <pwuts@agpt.co> Date: Tue Jan 9 18:10:45 2024 +0100 refactor(benchmark/challenge): Set up structure to support more challenge providers - Move `Challenge`, `ChallengeData`, `load_challenges` to `challenges/builtin.py` and rename to `BuiltinChallenge`, `BuiltinChallengeSpec`, `load_builtin_challenges` - Create `BaseChallenge` to serve as interface and base class for different challenge implementations - Create `ChallengeInfo` model to serve as universal challenge info object - Create `get_challenge_from_source_uri` function in `challenges/__init__.py` - Replace `ChallengeData` by `ChallengeInfo` everywhere except in `BuiltinChallenge` - Add strong typing to `task_informations` store in app.py - Use `call.duration` in `finalize_test_report` and remove `timer` fixture - Update docstring on `challenges/__init__.py:get_unique_categories` - Add docstring to `generate_test.py` commit 5df2aa7939b45d85a2c2b5de9ac0522330d1502a Author: Reinier van der Leer <pwuts@agpt.co> Date: Tue Jan 9 16:58:01 2024 +0100 refactor(benchmark): Refactor & rename functions in agent_interface.py and agent_api_interface.py - `copy_artifacts_into_temp_folder` -> `copy_challenge_artifacts_into_workspace` - `copy_agent_artifacts_into_folder` -> `download_agent_artifacts_into_folder` - Reorder parameters of `run_api_agent`, `copy_challenge_artifacts_into_workspace`; use `Path` instead of `str` commit 6a256fef4c7950b7ee82fb801e70c83afe6b6f8b Author: Reinier van der Leer <pwuts@agpt.co> Date: Tue Jan 9 16:02:25 2024 +0100 refactor(benchmark): Refactor & typefix report generation and handling logic - Rename functions in reports.py and ReportManager.py to better reflect what they do - `get_previous_test_results` -> `get_and_update_success_history` - `generate_single_call_report` -> `initialize_test_report` - `finalize_reports` -> `finalize_test_report` - `ReportManager.end_info_report` -> `SessionReportManager.finalize_session_report` - Modify `pytest_runtest_makereport` hook in conftest.py to finalize the report immediately after the challenge finishes running instead of after teardown - Move result processing logic from `initialize_test_report` to `finalize_test_report` in reports.py - Use `Test` and `Report` types from report_types.py where possible instead of untyped dicts: reports.py, utils.py, ReportManager.py - Differentiate `ReportManager` into `SessionReportManager`, `RegressionTestsTracker`, `SuccessRateTracker` - Move filtering of optional challenge categories from challenge.py (`Challenge.skip_optional_categories`) to conftest.py (`pytest_collection_modifyitems`) - Remove unused `scores` fixture in conftest.py commit 370d6dbf5df75d78e3878877968e8cd309d6d7fb Author: Reinier van der Leer <pwuts@agpt.co> Date: Tue Jan 9 15:16:43 2024 +0100 refactor(benchmark): Simplify models in report_types.py - Removed ForbidOptionalMeta and BaseModelBenchmark classes. - Changed model attributes to optional: `Metrics.difficulty`, `Metrics.success`, `Metrics.success_percentage`, `Metrics.run_time`, and `Test.reached_cutoff`. - Added validator to `Metrics` model to require `success` and `run_time` fields if `attempted=True`. - Added default values to all optional model fields. - Removed duplicate imports. - Added condition in process_report.py to prevent null lookups if `metrics.difficulty` is not set.
120 lines
4.0 KiB
Python
120 lines
4.0 KiB
Python
import json
|
|
import sys
|
|
from datetime import datetime
|
|
from pathlib import Path
|
|
from typing import Optional
|
|
|
|
from pydantic import BaseSettings, Field
|
|
|
|
|
|
def _calculate_info_test_path(base_path: Path, benchmark_start_time: datetime) -> Path:
|
|
"""
|
|
Calculates the path to the directory where the test report will be saved.
|
|
"""
|
|
# Ensure the reports path exists
|
|
base_path.mkdir(parents=True, exist_ok=True)
|
|
|
|
# Get current UTC date-time stamp
|
|
date_stamp = benchmark_start_time.strftime("%Y%m%dT%H%M%S")
|
|
|
|
# Default run name
|
|
run_name = "full_run"
|
|
|
|
# Map command-line arguments to their respective labels
|
|
arg_labels = {
|
|
"--test": None,
|
|
"--category": None,
|
|
"--maintain": "maintain",
|
|
"--improve": "improve",
|
|
"--explore": "explore",
|
|
}
|
|
|
|
# Identify the relevant command-line argument
|
|
for arg, label in arg_labels.items():
|
|
if arg in sys.argv:
|
|
test_arg = sys.argv[sys.argv.index(arg) + 1] if label is None else None
|
|
run_name = arg.strip("--")
|
|
if test_arg:
|
|
run_name = f"{run_name}_{test_arg}"
|
|
break
|
|
|
|
# Create the full new directory path with ISO standard UTC date-time stamp
|
|
report_path = base_path / f"{date_stamp}_{run_name}"
|
|
|
|
# Ensure the new directory is created
|
|
# FIXME: this is not a desirable side-effect of loading the config
|
|
report_path.mkdir(exist_ok=True)
|
|
|
|
return report_path
|
|
|
|
|
|
class AgentBenchmarkConfig(BaseSettings, extra="allow"):
|
|
"""
|
|
Configuration model and loader for the AGBenchmark.
|
|
|
|
Projects that want to use AGBenchmark should contain an agbenchmark_config folder
|
|
with a config.json file that - at minimum - specifies the `host` at which the
|
|
subject application exposes an Agent Protocol compliant API.
|
|
"""
|
|
|
|
agbenchmark_config_dir: Path = Field(..., exclude=True)
|
|
"""Path to the agbenchmark_config folder of the subject agent application."""
|
|
|
|
categories: list[str] | None = None
|
|
"""Categories to benchmark the agent for. If omitted, all categories are assumed."""
|
|
|
|
host: str
|
|
"""Host (scheme://address:port) of the subject agent application."""
|
|
|
|
@classmethod
|
|
def load(cls, config_dir: Optional[Path] = None) -> "AgentBenchmarkConfig":
|
|
config_dir = config_dir or cls.find_config_folder()
|
|
with (config_dir / "config.json").open("r") as f:
|
|
return cls(
|
|
agbenchmark_config_dir=config_dir,
|
|
**json.load(f),
|
|
)
|
|
|
|
@staticmethod
|
|
def find_config_folder(for_dir: Path = Path.cwd()) -> Path:
|
|
"""
|
|
Find the closest ancestor folder containing an agbenchmark_config folder,
|
|
and returns the path of that agbenchmark_config folder.
|
|
"""
|
|
current_directory = for_dir
|
|
while current_directory != Path("/"):
|
|
if (path := current_directory / "agbenchmark_config").exists():
|
|
if (path / "config.json").is_file():
|
|
return path
|
|
current_directory = current_directory.parent
|
|
raise FileNotFoundError(
|
|
"No 'agbenchmark_config' directory found in the path hierarchy."
|
|
)
|
|
|
|
@property
|
|
def config_file(self) -> Path:
|
|
return self.agbenchmark_config_dir / "config.json"
|
|
|
|
@property
|
|
def reports_folder(self) -> Path:
|
|
return self.agbenchmark_config_dir / "reports"
|
|
|
|
def get_report_dir(self, benchmark_start_time: datetime) -> Path:
|
|
return _calculate_info_test_path(self.reports_folder, benchmark_start_time)
|
|
|
|
@property
|
|
def regression_tests_file(self) -> Path:
|
|
return self.reports_folder / "regression_tests.json"
|
|
|
|
@property
|
|
def success_rate_file(self) -> Path:
|
|
return self.reports_folder / "success_rate.json"
|
|
|
|
@property
|
|
def challenges_already_beaten_file(self) -> Path:
|
|
return self.agbenchmark_config_dir / "challenges_already_beaten.json"
|
|
|
|
@property
|
|
def temp_folder(self) -> Path:
|
|
return self.agbenchmark_config_dir / "temp_folder"
|