mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-17 05:54:26 +01:00
- feat(agent/core): Add `AnthropicProvider`
- Add `ANTHROPIC_API_KEY` to .env.template and docs
Notable differences in logic compared to `OpenAIProvider`:
- Merges subsequent user messages in `AnthropicProvider._get_chat_completion_args`
- Merges and extracts all system messages into `system` parameter in `AnthropicProvider._get_chat_completion_args`
- Supports prefill; merges prefill content (if any) into generated response
- Prompt changes to improve compatibility with `AnthropicProvider`
Anthropic has a slightly different API compared to OpenAI, and has much stricter input validation. E.g. Anthropic only supports a single `system` prompt, where OpenAI allows multiple `system` messages. Anthropic also forbids sequences of multiple `user` or `assistant` messages and requires that messages alternate between roles.
- Move response format instruction from separate message into main system prompt
- Fix clock message format
- Add pre-fill to `OneShot` generated prompt
- refactor(agent/core): Tweak `model_providers.schema`
- Simplify `ModelProviderUsage`
- Remove attribute `total_tokens` as it is always equal to `prompt_tokens + completion_tokens`
- Modify signature of `update_usage(..)`; no longer requires a full `ModelResponse` object as input
- Improve `ModelProviderBudget`
- Change type of attribute `usage` to `defaultdict[str, ModelProviderUsage]` -> allow per-model usage tracking
- Modify signature of `update_usage_and_cost(..)`; no longer requires a full `ModelResponse` object as input
- Allow `ModelProviderBudget` zero-argument instantiation
- Fix type of `AssistantChatMessage.role` to match `ChatMessage.role` (str -> `ChatMessage.Role`)
- Add shared attributes and constructor to `ModelProvider` base class
- Add `max_output_tokens` parameter to `create_chat_completion` interface
- Add pre-filling as a global feature
- Add `prefill_response` field to `ChatPrompt` model
- Add `prefill_response` parameter to `create_chat_completion` interface
- Add `ChatModelProvider.get_available_models()` and remove `ApiManager`
- Remove unused `OpenAIChatParser` typedef in openai.py
- Remove redundant `budget` attribute definition on `OpenAISettings`
- Remove unnecessary `usage` in `OpenAIProvider` > `default_settings` > `budget`
- feat(agent): Allow use of any available LLM provider through `MultiProvider`
- Add `MultiProvider` (`model_providers.multi`)
- Replace all references to / uses of `OpenAIProvider` with `MultiProvider`
- Change type of `Config.smart_llm` and `Config.fast_llm` from `str` to `ModelName`
- feat(agent/core): Validate function call arguments in `create_chat_completion`
- Add `validate_call` method to `CompletionModelFunction` in `model_providers.schema`
- Add `validate_tool_calls` utility function in `model_providers.utils`
- Add tool call validation step to `create_chat_completion` in `OpenAIProvider` and `AnthropicProvider`
- Remove (now redundant) command argument validation logic in agent.py and models/command.py
- refactor(agent): Rename `get_openai_command_specs` to `function_specs_from_commands`
164 lines
3.5 KiB
TOML
164 lines
3.5 KiB
TOML
[tool.poetry]
|
|
name = "agpt"
|
|
version = "0.5.0"
|
|
authors = [
|
|
"Significant Gravitas <support@agpt.co>",
|
|
]
|
|
readme = "README.md"
|
|
description = "An open-source attempt to make GPT-4 autonomous"
|
|
homepage = "https://github.com/Significant-Gravitas/AutoGPT/tree/master/autogpts/autogpt"
|
|
classifiers = [
|
|
"Programming Language :: Python :: 3",
|
|
"License :: OSI Approved :: MIT License",
|
|
"Operating System :: OS Independent",
|
|
]
|
|
packages = [{ include = "autogpt" }]
|
|
|
|
|
|
[tool.poetry.scripts]
|
|
autogpt = "autogpt.app.cli:cli"
|
|
serve = "autogpt.app.cli:serve"
|
|
|
|
|
|
[tool.poetry.dependencies]
|
|
python = "^3.10"
|
|
anthropic = "^0.25.1"
|
|
# autogpt-forge = { path = "../forge" }
|
|
autogpt-forge = {git = "https://github.com/Significant-Gravitas/AutoGPT.git", subdirectory = "autogpts/forge"}
|
|
beautifulsoup4 = "^4.12.2"
|
|
boto3 = "^1.33.6"
|
|
charset-normalizer = "^3.1.0"
|
|
click = "*"
|
|
colorama = "^0.4.6"
|
|
demjson3 = "^3.0.0"
|
|
distro = "^1.8.0"
|
|
docker = "*"
|
|
duckduckgo-search = "^5.0.0"
|
|
en-core-web-sm = {url = "https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.5.0/en_core_web_sm-3.5.0-py3-none-any.whl"}
|
|
fastapi = "^0.109.1"
|
|
ftfy = "^6.1.1"
|
|
gitpython = "^3.1.32"
|
|
google-api-python-client = "*"
|
|
gTTS = "^2.3.1"
|
|
hypercorn = "^0.14.4"
|
|
inflection = "*"
|
|
jsonschema = "*"
|
|
numpy = "*"
|
|
openai = "^1.7.2"
|
|
orjson = "^3.8.10"
|
|
Pillow = "*"
|
|
pinecone-client = "^2.2.1"
|
|
playsound = "~1.2.2"
|
|
pydantic = "*"
|
|
pylatexenc = "*"
|
|
pypdf = "^3.1.0"
|
|
python-docx = "*"
|
|
python-dotenv = "^1.0.0"
|
|
pyyaml = "^6.0"
|
|
readability-lxml = "^0.8.1"
|
|
redis = "*"
|
|
requests = "*"
|
|
selenium = "^4.11.2"
|
|
sentry-sdk = "^1.40.4"
|
|
spacy = "^3.0.0"
|
|
tenacity = "^8.2.2"
|
|
tiktoken = "^0.5.0"
|
|
webdriver-manager = "*"
|
|
|
|
# OpenAI and Generic plugins import
|
|
openapi-python-client = "^0.14.0"
|
|
|
|
# Benchmarking
|
|
agbenchmark = { path = "../../benchmark", optional = true }
|
|
# agbenchmark = {git = "https://github.com/Significant-Gravitas/AutoGPT.git", subdirectory = "benchmark", optional = true}
|
|
google-cloud-logging = "^3.8.0"
|
|
google-cloud-storage = "^2.13.0"
|
|
psycopg2-binary = "^2.9.9"
|
|
|
|
[tool.poetry.extras]
|
|
benchmark = ["agbenchmark"]
|
|
|
|
[tool.poetry.group.dev.dependencies]
|
|
black = "*"
|
|
boto3-stubs = {extras = ["s3"], version = "^1.33.6"}
|
|
flake8 = "*"
|
|
gitpython = "^3.1.32"
|
|
isort = "*"
|
|
mypy = "*"
|
|
pre-commit = "*"
|
|
types-beautifulsoup4 = "*"
|
|
types-colorama = "*"
|
|
types-Markdown = "*"
|
|
types-Pillow = "*"
|
|
|
|
# Testing
|
|
asynctest = "*"
|
|
coverage = "*"
|
|
pytest = "*"
|
|
pytest-asyncio = "*"
|
|
pytest-benchmark = "*"
|
|
pytest-cov = "*"
|
|
pytest-integration = "*"
|
|
pytest-mock = "*"
|
|
pytest-recording = "*"
|
|
pytest-xdist = "*"
|
|
vcrpy = {git = "https://github.com/Significant-Gravitas/vcrpy.git", rev = "master"}
|
|
|
|
|
|
[build-system]
|
|
requires = ["poetry-core"]
|
|
build-backend = "poetry.core.masonry.api"
|
|
|
|
|
|
[tool.black]
|
|
line-length = 88
|
|
target-version = ['py310']
|
|
include = '\.pyi?$'
|
|
packages = ["autogpt"]
|
|
extend-exclude = '.+/(dist|.venv|venv|build|data)/.+'
|
|
|
|
|
|
[tool.isort]
|
|
profile = "black"
|
|
multi_line_output = 3
|
|
include_trailing_comma = true
|
|
force_grid_wrap = 0
|
|
use_parentheses = true
|
|
ensure_newline_before_comments = true
|
|
line_length = 88
|
|
sections = [
|
|
"FUTURE",
|
|
"STDLIB",
|
|
"THIRDPARTY",
|
|
"FIRSTPARTY",
|
|
"LOCALFOLDER"
|
|
]
|
|
extend_skip = [
|
|
"agbenchmark_config/temp_folder/",
|
|
"data/",
|
|
]
|
|
|
|
|
|
[tool.mypy]
|
|
follow_imports = 'skip'
|
|
check_untyped_defs = true
|
|
disallow_untyped_calls = true
|
|
files = [
|
|
'autogpt/**/*.py',
|
|
'tests/**/*.py'
|
|
]
|
|
|
|
[[tool.mypy.overrides]]
|
|
module = [
|
|
'requests.*',
|
|
'yaml.*'
|
|
]
|
|
ignore_missing_imports = true
|
|
|
|
|
|
[tool.pytest.ini_options]
|
|
markers = [
|
|
"requires_openai_api_key",
|
|
"requires_huggingface_api_key"
|
|
]
|