mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-21 07:54:21 +01:00
Squashed commit of the following: commit 7d6476d3297860f74c276d571da995d958a8cc1a Author: Reinier van der Leer <pwuts@agpt.co> Date: Tue Jan 9 18:10:45 2024 +0100 refactor(benchmark/challenge): Set up structure to support more challenge providers - Move `Challenge`, `ChallengeData`, `load_challenges` to `challenges/builtin.py` and rename to `BuiltinChallenge`, `BuiltinChallengeSpec`, `load_builtin_challenges` - Create `BaseChallenge` to serve as interface and base class for different challenge implementations - Create `ChallengeInfo` model to serve as universal challenge info object - Create `get_challenge_from_source_uri` function in `challenges/__init__.py` - Replace `ChallengeData` by `ChallengeInfo` everywhere except in `BuiltinChallenge` - Add strong typing to `task_informations` store in app.py - Use `call.duration` in `finalize_test_report` and remove `timer` fixture - Update docstring on `challenges/__init__.py:get_unique_categories` - Add docstring to `generate_test.py` commit 5df2aa7939b45d85a2c2b5de9ac0522330d1502a Author: Reinier van der Leer <pwuts@agpt.co> Date: Tue Jan 9 16:58:01 2024 +0100 refactor(benchmark): Refactor & rename functions in agent_interface.py and agent_api_interface.py - `copy_artifacts_into_temp_folder` -> `copy_challenge_artifacts_into_workspace` - `copy_agent_artifacts_into_folder` -> `download_agent_artifacts_into_folder` - Reorder parameters of `run_api_agent`, `copy_challenge_artifacts_into_workspace`; use `Path` instead of `str` commit 6a256fef4c7950b7ee82fb801e70c83afe6b6f8b Author: Reinier van der Leer <pwuts@agpt.co> Date: Tue Jan 9 16:02:25 2024 +0100 refactor(benchmark): Refactor & typefix report generation and handling logic - Rename functions in reports.py and ReportManager.py to better reflect what they do - `get_previous_test_results` -> `get_and_update_success_history` - `generate_single_call_report` -> `initialize_test_report` - `finalize_reports` -> `finalize_test_report` - `ReportManager.end_info_report` -> `SessionReportManager.finalize_session_report` - Modify `pytest_runtest_makereport` hook in conftest.py to finalize the report immediately after the challenge finishes running instead of after teardown - Move result processing logic from `initialize_test_report` to `finalize_test_report` in reports.py - Use `Test` and `Report` types from report_types.py where possible instead of untyped dicts: reports.py, utils.py, ReportManager.py - Differentiate `ReportManager` into `SessionReportManager`, `RegressionTestsTracker`, `SuccessRateTracker` - Move filtering of optional challenge categories from challenge.py (`Challenge.skip_optional_categories`) to conftest.py (`pytest_collection_modifyitems`) - Remove unused `scores` fixture in conftest.py commit 370d6dbf5df75d78e3878877968e8cd309d6d7fb Author: Reinier van der Leer <pwuts@agpt.co> Date: Tue Jan 9 15:16:43 2024 +0100 refactor(benchmark): Simplify models in report_types.py - Removed ForbidOptionalMeta and BaseModelBenchmark classes. - Changed model attributes to optional: `Metrics.difficulty`, `Metrics.success`, `Metrics.success_percentage`, `Metrics.run_time`, and `Test.reached_cutoff`. - Added validator to `Metrics` model to require `success` and `run_time` fields if `attempted=True`. - Added default values to all optional model fields. - Removed duplicate imports. - Added condition in process_report.py to prevent null lookups if `metrics.difficulty` is not set.
100 lines
3.0 KiB
Python
100 lines
3.0 KiB
Python
import logging
|
|
from abc import ABC, abstractmethod
|
|
from pathlib import Path
|
|
from typing import AsyncIterator, ClassVar, Optional
|
|
|
|
import pytest
|
|
from agent_protocol_client import AgentApi, Step
|
|
from colorama import Fore, Style
|
|
from pydantic import BaseModel, Field
|
|
|
|
from agbenchmark.config import AgentBenchmarkConfig
|
|
from agbenchmark.utils.data_types import Category, DifficultyLevel, EvalResult
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class ChallengeInfo(BaseModel):
|
|
eval_id: str = ""
|
|
name: str
|
|
task: str
|
|
task_artifacts_dir: Optional[Path] = None
|
|
category: list[Category]
|
|
difficulty: Optional[DifficultyLevel] = None
|
|
description: Optional[str] = None
|
|
dependencies: list[str] = Field(default_factory=list)
|
|
reference_answer: Optional[str]
|
|
|
|
source_uri: str
|
|
"""Internal reference indicating the source of the challenge specification"""
|
|
|
|
|
|
class BaseChallenge(ABC):
|
|
"""
|
|
The base class and shared interface for all specific challenge implementations.
|
|
"""
|
|
|
|
info: ClassVar[ChallengeInfo]
|
|
|
|
@classmethod
|
|
@abstractmethod
|
|
def from_source_uri(cls, source_uri: str) -> type["BaseChallenge"]:
|
|
"""
|
|
Construct an individual challenge subclass from a suitable `source_uri` (as in
|
|
`ChallengeInfo.source_uri`).
|
|
"""
|
|
...
|
|
|
|
@abstractmethod
|
|
def test_method(
|
|
self, config: AgentBenchmarkConfig, request: pytest.FixtureRequest
|
|
) -> None:
|
|
"""
|
|
Test method for use by Pytest-based benchmark sessions. Should return normally
|
|
if the challenge passes, and raise a (preferably descriptive) error otherwise.
|
|
"""
|
|
...
|
|
|
|
@classmethod
|
|
async def run_challenge(
|
|
cls, config: AgentBenchmarkConfig, timeout: int
|
|
) -> AsyncIterator[Step]:
|
|
"""
|
|
Runs the challenge on the subject agent with the specified timeout.
|
|
Also prints basic challenge and status info to STDOUT.
|
|
|
|
Params:
|
|
config: The subject agent's benchmark config.
|
|
timeout: Timeout (seconds) after which to stop the run if not finished.
|
|
|
|
Yields:
|
|
Step: The steps generated by the agent for the challenge task.
|
|
"""
|
|
# avoid circular import
|
|
from agbenchmark.agent_api_interface import run_api_agent
|
|
|
|
print()
|
|
print(
|
|
f"{Fore.MAGENTA + Style.BRIGHT}{'='*24} "
|
|
f"Starting {cls.info.name} challenge"
|
|
f" {'='*24}{Style.RESET_ALL}"
|
|
)
|
|
print(f"{Fore.CYAN}Timeout:{Fore.RESET} {timeout} seconds")
|
|
print(f"{Fore.CYAN}Task:{Fore.RESET} {cls.info.task}")
|
|
|
|
print()
|
|
logger.debug(f"Starting {cls.info.name} challenge run")
|
|
i = 0
|
|
async for step in run_api_agent(cls.info.task, config, timeout):
|
|
i += 1
|
|
print(f"[{cls.info.name}] - step {step.name} ({i}. request)")
|
|
yield step
|
|
logger.debug(f"Finished {cls.info.name} challenge run")
|
|
|
|
@classmethod
|
|
@abstractmethod
|
|
async def evaluate_task_state(
|
|
cls, agent: AgentApi, task_id: str
|
|
) -> list[EvalResult]:
|
|
...
|