mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2026-01-31 11:54:30 +01:00
Squashed commit of the following: commit 7d6476d3297860f74c276d571da995d958a8cc1a Author: Reinier van der Leer <pwuts@agpt.co> Date: Tue Jan 9 18:10:45 2024 +0100 refactor(benchmark/challenge): Set up structure to support more challenge providers - Move `Challenge`, `ChallengeData`, `load_challenges` to `challenges/builtin.py` and rename to `BuiltinChallenge`, `BuiltinChallengeSpec`, `load_builtin_challenges` - Create `BaseChallenge` to serve as interface and base class for different challenge implementations - Create `ChallengeInfo` model to serve as universal challenge info object - Create `get_challenge_from_source_uri` function in `challenges/__init__.py` - Replace `ChallengeData` by `ChallengeInfo` everywhere except in `BuiltinChallenge` - Add strong typing to `task_informations` store in app.py - Use `call.duration` in `finalize_test_report` and remove `timer` fixture - Update docstring on `challenges/__init__.py:get_unique_categories` - Add docstring to `generate_test.py` commit 5df2aa7939b45d85a2c2b5de9ac0522330d1502a Author: Reinier van der Leer <pwuts@agpt.co> Date: Tue Jan 9 16:58:01 2024 +0100 refactor(benchmark): Refactor & rename functions in agent_interface.py and agent_api_interface.py - `copy_artifacts_into_temp_folder` -> `copy_challenge_artifacts_into_workspace` - `copy_agent_artifacts_into_folder` -> `download_agent_artifacts_into_folder` - Reorder parameters of `run_api_agent`, `copy_challenge_artifacts_into_workspace`; use `Path` instead of `str` commit 6a256fef4c7950b7ee82fb801e70c83afe6b6f8b Author: Reinier van der Leer <pwuts@agpt.co> Date: Tue Jan 9 16:02:25 2024 +0100 refactor(benchmark): Refactor & typefix report generation and handling logic - Rename functions in reports.py and ReportManager.py to better reflect what they do - `get_previous_test_results` -> `get_and_update_success_history` - `generate_single_call_report` -> `initialize_test_report` - `finalize_reports` -> `finalize_test_report` - `ReportManager.end_info_report` -> `SessionReportManager.finalize_session_report` - Modify `pytest_runtest_makereport` hook in conftest.py to finalize the report immediately after the challenge finishes running instead of after teardown - Move result processing logic from `initialize_test_report` to `finalize_test_report` in reports.py - Use `Test` and `Report` types from report_types.py where possible instead of untyped dicts: reports.py, utils.py, ReportManager.py - Differentiate `ReportManager` into `SessionReportManager`, `RegressionTestsTracker`, `SuccessRateTracker` - Move filtering of optional challenge categories from challenge.py (`Challenge.skip_optional_categories`) to conftest.py (`pytest_collection_modifyitems`) - Remove unused `scores` fixture in conftest.py commit 370d6dbf5df75d78e3878877968e8cd309d6d7fb Author: Reinier van der Leer <pwuts@agpt.co> Date: Tue Jan 9 15:16:43 2024 +0100 refactor(benchmark): Simplify models in report_types.py - Removed ForbidOptionalMeta and BaseModelBenchmark classes. - Changed model attributes to optional: `Metrics.difficulty`, `Metrics.success`, `Metrics.success_percentage`, `Metrics.run_time`, and `Test.reached_cutoff`. - Added validator to `Metrics` model to require `success` and `run_time` fields if `attempted=True`. - Added default values to all optional model fields. - Removed duplicate imports. - Added condition in process_report.py to prevent null lookups if `metrics.difficulty` is not set.
155 lines
5.4 KiB
Python
155 lines
5.4 KiB
Python
import json
|
|
import logging
|
|
import os
|
|
import sys
|
|
from pathlib import Path
|
|
|
|
import pytest
|
|
|
|
from agbenchmark.challenges import ChallengeInfo
|
|
from agbenchmark.config import AgentBenchmarkConfig
|
|
from agbenchmark.reports.processing.report_types import Metrics, Test
|
|
from agbenchmark.reports.ReportManager import SingletonReportManager
|
|
from agbenchmark.utils.data_types import DifficultyLevel
|
|
from agbenchmark.utils.utils import calculate_success_percentage
|
|
|
|
# from agbenchmark.utils.get_data_from_helicone import get_data_from_helicone
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
def get_and_update_success_history(test_name: str, info_details: Test) -> list[bool]:
|
|
mock = os.getenv("IS_MOCK") # Check if --mock is in sys.argv
|
|
|
|
prev_test_results = SingletonReportManager().SUCCESS_RATE_TRACKER.tests.get(
|
|
test_name, []
|
|
)
|
|
|
|
if not mock and info_details.metrics.success is not None:
|
|
# only add if it's an actual test
|
|
prev_test_results.append(info_details.metrics.success)
|
|
SingletonReportManager().SUCCESS_RATE_TRACKER.update(
|
|
test_name, prev_test_results
|
|
)
|
|
|
|
# can calculate success rate regardless of mock
|
|
info_details.metrics.success_percentage = calculate_success_percentage(
|
|
prev_test_results
|
|
)
|
|
|
|
return prev_test_results
|
|
|
|
|
|
def update_regression_tests(
|
|
prev_test_results: list[bool],
|
|
info_details: Test,
|
|
test_name: str,
|
|
) -> None:
|
|
if len(prev_test_results) >= 3 and prev_test_results[-3:] == [True, True, True]:
|
|
# if the last 3 tests were successful, add to the regression tests
|
|
info_details.is_regression = True
|
|
SingletonReportManager().REGRESSION_MANAGER.add_test(
|
|
test_name, info_details.dict(include={"difficulty", "data_path"})
|
|
)
|
|
|
|
|
|
def initialize_test_report(
|
|
item: pytest.Item,
|
|
challenge_info: ChallengeInfo,
|
|
):
|
|
difficulty = challenge_info.difficulty
|
|
if isinstance(difficulty, DifficultyLevel):
|
|
difficulty = difficulty.value
|
|
|
|
# Extract the challenge_location from the class
|
|
# challenge_location: str = getattr(item.cls, "CHALLENGE_LOCATION", "")
|
|
# test_name = item.nodeid.split("::")[1]
|
|
# item.test_name = test_name
|
|
|
|
test_info = dict(item.user_properties).get("info_details") or Test(
|
|
data_path=challenge_info.source_uri,
|
|
is_regression=False,
|
|
category=[c.value for c in challenge_info.category],
|
|
task=challenge_info.task,
|
|
answer=challenge_info.reference_answer or "",
|
|
description=challenge_info.description or "",
|
|
metrics=Metrics(
|
|
difficulty=difficulty,
|
|
attempted=False,
|
|
),
|
|
)
|
|
|
|
# user facing reporting
|
|
if item:
|
|
item.user_properties.append(("info_details", test_info))
|
|
|
|
return test_info
|
|
|
|
|
|
def finalize_test_report(
|
|
item: pytest.Item, call: pytest.CallInfo, config: AgentBenchmarkConfig
|
|
) -> None:
|
|
user_properties: dict = dict(item.user_properties)
|
|
|
|
info_details: Test = user_properties.get("info_details", {})
|
|
test_name: str = user_properties.get("test_name", "")
|
|
|
|
mock = os.getenv("IS_MOCK") # Check if --mock is in sys.argv
|
|
|
|
logger.debug(f"Finalizing report with CallInfo: {vars(call)}")
|
|
if call.excinfo is None:
|
|
info_details.metrics.success = True
|
|
else:
|
|
if not mock: # don't remove if it's a mock test
|
|
SingletonReportManager().REGRESSION_MANAGER.remove_test(test_name)
|
|
info_details.metrics.fail_reason = str(call.excinfo.value)
|
|
if call.excinfo.typename == "Skipped":
|
|
info_details.metrics.attempted = False
|
|
info_details.metrics.attempted = True
|
|
info_details.metrics.run_time = f"{str(round(call.duration, 3))} seconds"
|
|
info_details.reached_cutoff = user_properties.get("timed_out", False)
|
|
|
|
prev_test_results: list[bool] = get_and_update_success_history(
|
|
test_name, info_details
|
|
)
|
|
|
|
update_regression_tests(prev_test_results, info_details, test_name)
|
|
|
|
if info_details and test_name:
|
|
# if "--mock" not in sys.argv and os.environ.get("HELICONE_API_KEY"):
|
|
# logger.debug("Getting cost from Helicone")
|
|
# info_details.metrics.cost = get_data_from_helicone(test_name)
|
|
# logger.debug(f"Cost: {cost}")
|
|
|
|
if "--mock" not in sys.argv:
|
|
update_challenges_already_beaten(
|
|
config.challenges_already_beaten_file, info_details, test_name
|
|
)
|
|
|
|
SingletonReportManager().INFO_MANAGER.add_test_report(test_name, info_details)
|
|
|
|
|
|
def update_challenges_already_beaten(
|
|
challenges_already_beaten_file: Path, info_details: Test, test_name: str
|
|
) -> None:
|
|
current_run_successful = info_details.metrics.success
|
|
try:
|
|
with open(challenges_already_beaten_file, "r") as f:
|
|
challenge_data = json.load(f)
|
|
except FileNotFoundError:
|
|
challenge_data = {}
|
|
challenge_beaten_in_the_past = challenge_data.get(test_name)
|
|
|
|
challenge_data[test_name] = True
|
|
if challenge_beaten_in_the_past is None and not current_run_successful:
|
|
challenge_data[test_name] = False
|
|
|
|
with open(challenges_already_beaten_file, "w") as f:
|
|
json.dump(challenge_data, f, indent=4)
|
|
|
|
|
|
def session_finish(agbenchmark_config: AgentBenchmarkConfig) -> None:
|
|
SingletonReportManager().INFO_MANAGER.finalize_session_report(agbenchmark_config)
|
|
SingletonReportManager().REGRESSION_MANAGER.save()
|
|
SingletonReportManager().SUCCESS_RATE_TRACKER.save()
|