feat(agent): Add Sentry integration for telemetry

* Add Sentry integration for telemetry
   - Add `sentry_sdk` dependency
   - Add setup logic and config flow using `TELEMETRY_OPT_IN` environment variable
      - Add app/telemetry.py with `setup_telemetry` helper routine
      - Call `setup_telemetry` in `cli()` in app/cli.py
      - Add `TELEMETRY_OPT_IN` to .env.template
      - Add helper function `env_file_exists` and routine `set_env_config_value` to app/utils.py
         - Add unit tests for `set_env_config_value` in test_utils.py
      - Add prompt to startup to ask whether the user wants to enable telemetry if the env variable isn't set

* Add `capture_exception` statements for LLM parsing errors and command failures
This commit is contained in:
Reinier van der Leer
2024-02-13 18:10:52 +01:00
parent 3b8d63dfb6
commit 393d6b97e6
9 changed files with 250 additions and 4 deletions

View File

@@ -5,6 +5,10 @@
## OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
OPENAI_API_KEY=your-openai-api-key
## TELEMETRY_OPT_IN - Share telemetry on errors and other issues with the AutoGPT team, e.g. through Sentry.
## This helps us to spot and solve problems earlier & faster. (Default: DISABLED)
# TELEMETRY_OPT_IN=true
## EXECUTE_LOCAL_COMMANDS - Allow local command execution (Default: False)
# EXECUTE_LOCAL_COMMANDS=False

View File

@@ -10,6 +10,7 @@ if TYPE_CHECKING:
from autogpt.config import Config
from autogpt.models.command_registry import CommandRegistry
import sentry_sdk
from pydantic import Field
from autogpt.core.configuration import Configurable
@@ -256,6 +257,7 @@ class Agent(
logger.warning(
f"{command_name}({fmt_kwargs(command_args)}) raised an error: {e}"
)
sentry_sdk.capture_exception(e)
result_tlength = self.llm_provider.count_tokens(str(result), self.llm.name)
if result_tlength > self.send_token_limit // 3:

View File

@@ -7,10 +7,14 @@ import click
from autogpt.logs.config import LogFormatName
from .telemetry import setup_telemetry
@click.group(invoke_without_command=True)
@click.pass_context
def cli(ctx: click.Context):
setup_telemetry()
# Invoke `run` by default
if ctx.invoked_subcommand is None:
ctx.invoke(run)

View File

@@ -0,0 +1,40 @@
import os
import click
from .utils import env_file_exists, set_env_config_value
def setup_telemetry() -> None:
if os.getenv("TELEMETRY_OPT_IN") is None:
# If no .env file is present, don't bother asking to enable telemetry,
# to prevent repeated asking in non-persistent environments.
if not env_file_exists():
return
print()
allow_telemetry = click.prompt(
"❓ Do you want to enable telemetry? ❓\n"
"This means AutoGPT will send diagnostic data to the core development team "
"when something goes wrong, and will help us to diagnose and fix problems "
"earlier and faster.\n"
"Please enter 'yes' or 'no'",
type=bool,
)
set_env_config_value("TELEMETRY_OPT_IN", "true" if allow_telemetry else "false")
print(
"💡 If you ever change your mind, you can adjust 'TELEMETRY_OPT_IN' in .env"
)
print()
if os.getenv("TELEMETRY_OPT_IN", "").lower() == "true":
_setup_sentry()
def _setup_sentry() -> None:
import sentry_sdk
sentry_sdk.init(
dsn="https://dc266f2f7a2381194d1c0fa36dff67d8@o4505260022104064.ingest.sentry.io/4506739844710400", # noqa
environment=os.getenv("TELEMETRY_ENVIRONMENT"),
)

View File

@@ -2,6 +2,8 @@ import logging
import os
import re
import sys
from pathlib import Path
from typing import TYPE_CHECKING
import requests
from colorama import Fore, Style
@@ -9,13 +11,14 @@ from git import InvalidGitRepositoryError, Repo
from prompt_toolkit import ANSI, PromptSession
from prompt_toolkit.history import InMemoryHistory
from autogpt.config import Config
if TYPE_CHECKING:
from autogpt.config import Config
logger = logging.getLogger(__name__)
session = PromptSession(history=InMemoryHistory())
async def clean_input(config: Config, prompt: str = ""):
async def clean_input(config: "Config", prompt: str = ""):
try:
if config.chat_messages_enabled:
for plugin in config.plugins:
@@ -149,7 +152,7 @@ By using the System, you agree to indemnify, defend, and hold harmless the Proje
return legal_text
def print_motd(config: Config, logger: logging.Logger):
def print_motd(config: "Config", logger: logging.Logger):
motd, is_new_motd = get_latest_bulletin()
if motd:
motd = markdown_to_ansi_style(motd)
@@ -188,3 +191,31 @@ def print_python_version_info(logger: logging.Logger):
"parts of AutoGPT with this version. "
"Please consider upgrading to Python 3.10 or higher.",
)
ENV_FILE_PATH = Path(__file__).parent.parent.parent / ".env"
def env_file_exists() -> bool:
return ENV_FILE_PATH.is_file()
def set_env_config_value(key: str, value: str) -> None:
"""Sets the specified env variable and updates it in .env as well"""
os.environ[key] = value
with ENV_FILE_PATH.open("r+") as file:
lines = file.readlines()
file.seek(0)
key_already_in_file = False
for line in lines:
if re.match(rf"^(?:# )?{key}=.*$", line):
file.write(f"{key}={value}\n")
key_already_in_file = True
else:
file.write(line)
if not key_already_in_file:
file.write(f"{key}={value}\n")
file.truncate()

View File

@@ -5,6 +5,7 @@ import os
from pathlib import Path
from typing import Callable, Coroutine, Iterator, Optional, ParamSpec, TypeVar
import sentry_sdk
import tenacity
import tiktoken
import yaml
@@ -460,6 +461,10 @@ class OpenAIProvider(
except Exception as e:
self._logger.warning(f"Parsing attempt #{attempts} failed: {e}")
self._logger.debug(f"Parsing failed on response: '''{assistant_msg}'''")
sentry_sdk.capture_exception(
error=e,
extras={"assistant_msg": assistant_msg, "i_attempt": attempts},
)
if attempts < self._configuration.fix_failed_parse_tries:
model_prompt.append(
ChatMessage.system(f"ERROR PARSING YOUR RESPONSE:\n\n{e}")

View File

@@ -5704,6 +5704,51 @@ trio = ">=0.17,<1.0"
trio-websocket = ">=0.9,<1.0"
urllib3 = {version = ">=1.26,<3", extras = ["socks"]}
[[package]]
name = "sentry-sdk"
version = "1.40.4"
description = "Python client for Sentry (https://sentry.io)"
optional = false
python-versions = "*"
files = [
{file = "sentry-sdk-1.40.4.tar.gz", hash = "sha256:657abae98b0050a0316f0873d7149f951574ae6212f71d2e3a1c4c88f62d6456"},
{file = "sentry_sdk-1.40.4-py2.py3-none-any.whl", hash = "sha256:ac5cf56bb897ec47135d239ddeedf7c1c12d406fb031a4c0caa07399ed014d7e"},
]
[package.dependencies]
certifi = "*"
urllib3 = {version = ">=1.26.11", markers = "python_version >= \"3.6\""}
[package.extras]
aiohttp = ["aiohttp (>=3.5)"]
arq = ["arq (>=0.23)"]
asyncpg = ["asyncpg (>=0.23)"]
beam = ["apache-beam (>=2.12)"]
bottle = ["bottle (>=0.12.13)"]
celery = ["celery (>=3)"]
chalice = ["chalice (>=1.16.0)"]
clickhouse-driver = ["clickhouse-driver (>=0.2.0)"]
django = ["django (>=1.8)"]
falcon = ["falcon (>=1.4)"]
fastapi = ["fastapi (>=0.79.0)"]
flask = ["blinker (>=1.1)", "flask (>=0.11)", "markupsafe"]
grpcio = ["grpcio (>=1.21.1)"]
httpx = ["httpx (>=0.16.0)"]
huey = ["huey (>=2)"]
loguru = ["loguru (>=0.5)"]
opentelemetry = ["opentelemetry-distro (>=0.35b0)"]
opentelemetry-experimental = ["opentelemetry-distro (>=0.40b0,<1.0)", "opentelemetry-instrumentation-aiohttp-client (>=0.40b0,<1.0)", "opentelemetry-instrumentation-django (>=0.40b0,<1.0)", "opentelemetry-instrumentation-fastapi (>=0.40b0,<1.0)", "opentelemetry-instrumentation-flask (>=0.40b0,<1.0)", "opentelemetry-instrumentation-requests (>=0.40b0,<1.0)", "opentelemetry-instrumentation-sqlite3 (>=0.40b0,<1.0)", "opentelemetry-instrumentation-urllib (>=0.40b0,<1.0)"]
pure-eval = ["asttokens", "executing", "pure_eval"]
pymongo = ["pymongo (>=3.1)"]
pyspark = ["pyspark (>=2.4.4)"]
quart = ["blinker (>=1.1)", "quart (>=0.16.1)"]
rq = ["rq (>=0.6)"]
sanic = ["sanic (>=0.8)"]
sqlalchemy = ["sqlalchemy (>=1.2)"]
starlette = ["starlette (>=0.19.1)"]
starlite = ["starlite (>=1.48)"]
tornado = ["tornado (>=5)"]
[[package]]
name = "setuptools"
version = "69.0.3"
@@ -7191,4 +7236,4 @@ benchmark = ["agbenchmark"]
[metadata]
lock-version = "2.0"
python-versions = "^3.10"
content-hash = "a80f52e61947dc4f87cabb5a5221e6bee40d27b4228a399da5211aae056bc7bc"
content-hash = "3a600c147d7ccb149a2230a3ad2efe50664dfecaf5c311669b220d8aa5450e53"

View File

@@ -59,6 +59,7 @@ readability-lxml = "^0.8.1"
redis = "*"
requests = "*"
selenium = "^4.11.2"
sentry-sdk = "^1.40.4"
spacy = "^3.0.0"
tenacity = "^8.2.2"
tiktoken = "^0.5.0"

View File

@@ -1,15 +1,18 @@
import json
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import requests
from git import InvalidGitRepositoryError
import autogpt.app.utils
from autogpt.app.utils import (
get_bulletin_from_web,
get_current_git_branch,
get_latest_bulletin,
set_env_config_value,
)
from autogpt.json_utils.utilities import extract_dict_from_response
from autogpt.utils import validate_yaml_file
@@ -224,3 +227,114 @@ def test_extract_json_from_response_json_contained_in_string(valid_json_response
assert (
extract_dict_from_response(emulated_response_from_openai) == valid_json_response
)
@pytest.fixture
def mock_env_file_path(tmp_path):
return tmp_path / ".env"
env_file_initial_content = """
# This is a comment
EXISTING_KEY=EXISTING_VALUE
## This is also a comment
# DISABLED_KEY=DISABLED_VALUE
# Another comment
UNUSED_KEY=UNUSED_VALUE
"""
@pytest.fixture
def mock_env_file(mock_env_file_path: Path, monkeypatch: pytest.MonkeyPatch):
mock_env_file_path.write_text(env_file_initial_content)
monkeypatch.setattr(autogpt.app.utils, "ENV_FILE_PATH", mock_env_file_path)
return mock_env_file_path
@pytest.fixture
def mock_environ(monkeypatch: pytest.MonkeyPatch):
env = {}
monkeypatch.setattr(os, "environ", env)
return env
def test_set_env_config_value_updates_existing_key(
mock_env_file: Path, mock_environ: dict
):
# Before updating, ensure the original content is as expected
with mock_env_file.open("r") as file:
assert file.readlines() == env_file_initial_content.splitlines(True)
set_env_config_value("EXISTING_KEY", "NEW_VALUE")
with mock_env_file.open("r") as file:
content = file.readlines()
# Ensure only the relevant line is altered
expected_content_lines = [
"\n",
"# This is a comment\n",
"EXISTING_KEY=NEW_VALUE\n", # existing key + new value
"\n",
"## This is also a comment\n",
"# DISABLED_KEY=DISABLED_VALUE\n",
"\n",
"# Another comment\n",
"UNUSED_KEY=UNUSED_VALUE\n",
]
assert content == expected_content_lines
assert mock_environ["EXISTING_KEY"] == "NEW_VALUE"
def test_set_env_config_value_uncomments_and_updates_disabled_key(
mock_env_file: Path, mock_environ: dict
):
# Before adding, ensure the original content is as expected
with mock_env_file.open("r") as file:
assert file.readlines() == env_file_initial_content.splitlines(True)
set_env_config_value("DISABLED_KEY", "ENABLED_NEW_VALUE")
with mock_env_file.open("r") as file:
content = file.readlines()
# Ensure only the relevant line is altered
expected_content_lines = [
"\n",
"# This is a comment\n",
"EXISTING_KEY=EXISTING_VALUE\n",
"\n",
"## This is also a comment\n",
"DISABLED_KEY=ENABLED_NEW_VALUE\n", # disabled -> enabled + new value
"\n",
"# Another comment\n",
"UNUSED_KEY=UNUSED_VALUE\n",
]
assert content == expected_content_lines
assert mock_environ["DISABLED_KEY"] == "ENABLED_NEW_VALUE"
def test_set_env_config_value_adds_new_key(mock_env_file: Path, mock_environ: dict):
# Before adding, ensure the original content is as expected
with mock_env_file.open("r") as file:
assert file.readlines() == env_file_initial_content.splitlines(True)
set_env_config_value("NEW_KEY", "NEW_VALUE")
with mock_env_file.open("r") as file:
content = file.readlines()
# Ensure the new key-value pair is added without altering the rest
expected_content_lines = [
"\n",
"# This is a comment\n",
"EXISTING_KEY=EXISTING_VALUE\n",
"\n",
"## This is also a comment\n",
"# DISABLED_KEY=DISABLED_VALUE\n",
"\n",
"# Another comment\n",
"UNUSED_KEY=UNUSED_VALUE\n",
"NEW_KEY=NEW_VALUE\n", # New key-value pair added at the end
]
assert content == expected_content_lines
assert mock_environ["NEW_KEY"] == "NEW_VALUE"