diff --git a/.github/workflows/autogpt-ci.yml b/.github/workflows/autogpt-ci.yml index 056431c0..7ceb4785 100644 --- a/.github/workflows/autogpt-ci.yml +++ b/.github/workflows/autogpt-ci.yml @@ -6,19 +6,16 @@ on: paths: - 'autogpts/autogpt/**' - '!autogpts/autogpt/tests/vcr_cassettes' - - '!autogpts/autogpt/tests/challenges/current_score.json' pull_request: branches: [ stable, master, release-* ] paths: - 'autogpts/autogpt/**' - '!autogpts/autogpt/tests/vcr_cassettes' - - '!autogpts/autogpt/tests/challenges/current_score.json' pull_request_target: branches: [ master, release-*, ci-test* ] paths: - 'autogpts/autogpt/**' - '!autogpts/autogpt/tests/vcr_cassettes' - - '!autogpts/autogpt/tests/challenges/current_score.json' concurrency: group: ${{ format('autogpt-ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }} @@ -169,8 +166,7 @@ jobs: poetry run pytest -vv \ --cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \ --numprocesses=logical --durations=10 \ - tests/unit tests/integration tests/challenges - poetry run python tests/challenges/utils/build_current_score.py + tests/unit tests/integration env: CI: true PROXY: ${{ github.event_name == 'pull_request_target' && secrets.PROXY || '' }} @@ -199,19 +195,6 @@ jobs: echo "config_key=$config_key" >> $GITHUB_OUTPUT - - name: Push updated challenge scores - if: github.event_name == 'push' - run: | - score_file="tests/challenges/current_score.json" - - if ! git diff --quiet $score_file; then - git add $score_file - git commit -m "Update challenge scores" - git push origin HEAD:${{ github.ref_name }} - else - echo "The challenge scores didn't change." - fi - - id: push_cassettes name: Push updated cassettes # For pull requests, push updated cassettes even when tests fail diff --git a/.github/workflows/autogpt-docker-ci.yml b/.github/workflows/autogpt-docker-ci.yml index 408a7dba..282e05c0 100644 --- a/.github/workflows/autogpt-docker-ci.yml +++ b/.github/workflows/autogpt-docker-ci.yml @@ -6,13 +6,11 @@ on: paths: - 'autogpts/autogpt/**' - '!autogpts/autogpt/tests/vcr_cassettes' - - '!autogpts/autogpt/tests/challenges/current_score.json' pull_request: branches: [ master, release-*, stable ] paths: - 'autogpts/autogpt/**' - '!autogpts/autogpt/tests/vcr_cassettes' - - '!autogpts/autogpt/tests/challenges/current_score.json' concurrency: group: ${{ format('autogpt-docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }} diff --git a/.github/workflows/pr-label.yml b/.github/workflows/pr-label.yml index fb50e7c1..077211f4 100644 --- a/.github/workflows/pr-label.yml +++ b/.github/workflows/pr-label.yml @@ -6,7 +6,6 @@ on: branches: [ master, release-* ] paths-ignore: - 'autogpts/autogpt/tests/vcr_cassettes' - - 'autogpts/autogpt/tests/challenges/current_score.json' - 'benchmark/reports/**' # So that the `dirtyLabel` is removed if conflicts are resolve # We recommend `pull_request_target` so that github secrets are available. diff --git a/autogpts/autogpt/.gitignore b/autogpts/autogpt/.gitignore index c2511764..1180cd49 100644 --- a/autogpts/autogpt/.gitignore +++ b/autogpts/autogpt/.gitignore @@ -17,6 +17,7 @@ log-ingestion.txt *.mp3 mem.sqlite3 venvAutoGPT +data/* # Byte-compiled / optimized / DLL files __pycache__/ @@ -163,6 +164,7 @@ CURRENT_BULLETIN.md # AgBenchmark agbenchmark_config/reports/ +agbenchmark_config/workspace/ # Nodejs package-lock.json diff --git a/autogpts/autogpt/Dockerfile b/autogpts/autogpt/Dockerfile index cd1ef6bd..7121c691 100644 --- a/autogpts/autogpt/Dockerfile +++ b/autogpts/autogpt/Dockerfile @@ -36,12 +36,12 @@ CMD [] # dev build -> include everything FROM autogpt-base as autogpt-dev -RUN poetry install --no-root --without benchmark +RUN poetry install --no-root ONBUILD COPY . ./ # release build -> include bare minimum FROM autogpt-base as autogpt-release -RUN poetry install --no-root --without dev,benchmark +RUN poetry install --no-root --without dev ONBUILD COPY autogpt/ ./autogpt ONBUILD COPY scripts/ ./scripts ONBUILD COPY plugins/ ./plugins diff --git a/autogpts/autogpt/agbenchmark_config/benchmarks.py b/autogpts/autogpt/agbenchmark_config/benchmarks.py index 7253a2fc..46748658 100644 --- a/autogpts/autogpt/agbenchmark_config/benchmarks.py +++ b/autogpts/autogpt/agbenchmark_config/benchmarks.py @@ -5,13 +5,10 @@ from pathlib import Path from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings from autogpt.app.main import _configure_openai_provider, run_interaction_loop from autogpt.commands import COMMAND_CATEGORIES -from autogpt.config import AIConfig, ConfigBuilder +from autogpt.config import AIProfile, ConfigBuilder from autogpt.logs.config import configure_logging -from autogpt.memory.vector import get_memory from autogpt.models.command_registry import CommandRegistry -from autogpt.workspace import Workspace -PROJECT_DIR = Path().resolve() LOG_DIR = Path(__file__).parent / "logs" @@ -21,7 +18,7 @@ def run_specific_agent(task: str, continuous_mode: bool = False) -> None: def bootstrap_agent(task: str, continuous_mode: bool) -> Agent: - config = ConfigBuilder.build_config_from_env(workdir=PROJECT_DIR) + config = ConfigBuilder.build_config_from_env() config.debug_mode = False config.continuous_mode = continuous_mode config.continuous_limit = 20 @@ -29,14 +26,16 @@ def bootstrap_agent(task: str, continuous_mode: bool) -> Agent: config.noninteractive_mode = True config.plain_output = True config.memory_backend = "no_memory" - config.workspace_path = Workspace.init_workspace_directory(config) - config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path) - configure_logging(config, LOG_DIR) + configure_logging( + debug_mode=config.debug_mode, + plain_output=config.plain_output, + log_dir=LOG_DIR, + ) command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config) - ai_config = AIConfig( + ai_profile = AIProfile( ai_name="AutoGPT", ai_role="a multi-purpose AI assistant.", ai_goals=[task], @@ -47,10 +46,11 @@ def bootstrap_agent(task: str, continuous_mode: bool) -> Agent: agent_settings = AgentSettings( name=Agent.default_settings.name, description=Agent.default_settings.description, - ai_config=ai_config, + ai_profile=ai_profile, config=AgentConfiguration( fast_llm=config.fast_llm, smart_llm=config.smart_llm, + allow_fs_access=not config.restrict_to_workspace, use_functions_api=config.openai_functions, plugins=config.plugins, ), @@ -58,13 +58,14 @@ def bootstrap_agent(task: str, continuous_mode: bool) -> Agent: history=Agent.default_settings.history.copy(deep=True), ) - return Agent( + agent = Agent( settings=agent_settings, llm_provider=_configure_openai_provider(config), command_registry=command_registry, - memory=get_memory(config), legacy_config=config, ) + agent.attach_fs(config.app_data_dir / "agents" / "AutoGPT-benchmark") # HACK + return agent if __name__ == "__main__": diff --git a/autogpts/autogpt/agbenchmark_config/config.json b/autogpts/autogpt/agbenchmark_config/config.json index 995574c2..154fe388 100644 --- a/autogpts/autogpt/agbenchmark_config/config.json +++ b/autogpts/autogpt/agbenchmark_config/config.json @@ -1 +1,8 @@ -{"workspace": {"input": "auto_gpt_workspace", "output":"auto_gpt_workspace" }, "entry_path": "agbenchmark.benchmarks"} +{ + "workspace": { + "input": "agbenchmark_config/workspace", + "output": "agbenchmark_config/workspace" + }, + "entry_path": "agbenchmark.benchmarks", + "host": "http://localhost:8000" +} diff --git a/autogpts/autogpt/autogpt/__main__.py b/autogpts/autogpt/autogpt/__main__.py index 3b1122a4..e5b92456 100644 --- a/autogpts/autogpt/autogpt/__main__.py +++ b/autogpts/autogpt/autogpt/__main__.py @@ -2,4 +2,4 @@ import autogpt.app.cli if __name__ == "__main__": - autogpt.app.cli.main() + autogpt.app.cli.cli() diff --git a/autogpts/autogpt/autogpt/agent_factory/configurators.py b/autogpts/autogpt/autogpt/agent_factory/configurators.py new file mode 100644 index 00000000..3af41774 --- /dev/null +++ b/autogpts/autogpt/autogpt/agent_factory/configurators.py @@ -0,0 +1,116 @@ +from typing import Optional + +from autogpt.agent_manager import AgentManager +from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings +from autogpt.commands import COMMAND_CATEGORIES +from autogpt.config import AIDirectives, AIProfile, Config +from autogpt.core.resource.model_providers import ChatModelProvider +from autogpt.logs.config import configure_chat_plugins +from autogpt.logs.helpers import print_attribute +from autogpt.models.command_registry import CommandRegistry +from autogpt.plugins import scan_plugins + + +def create_agent( + task: str, + ai_profile: AIProfile, + app_config: Config, + llm_provider: ChatModelProvider, + directives: Optional[AIDirectives] = None, +) -> Agent: + if not task: + raise ValueError("No task specified for new agent") + if not directives: + directives = AIDirectives.from_file(app_config.prompt_settings_file) + + agent = _configure_agent( + task=task, + ai_profile=ai_profile, + directives=directives, + app_config=app_config, + llm_provider=llm_provider, + ) + + agent.state.agent_id = AgentManager.generate_id(agent.ai_profile.ai_name) + + return agent + + +def configure_agent_with_state( + state: AgentSettings, + app_config: Config, + llm_provider: ChatModelProvider, +) -> Agent: + return _configure_agent( + state=state, + app_config=app_config, + llm_provider=llm_provider, + ) + + +def _configure_agent( + app_config: Config, + llm_provider: ChatModelProvider, + task: str = "", + ai_profile: Optional[AIProfile] = None, + directives: Optional[AIDirectives] = None, + state: Optional[AgentSettings] = None, +) -> Agent: + if not (state or task and ai_profile and directives): + raise TypeError( + "Either (state) or (task, ai_profile, directives) must be specified" + ) + + app_config.plugins = scan_plugins(app_config, app_config.debug_mode) + configure_chat_plugins(app_config) + + # Create a CommandRegistry instance and scan default folder + command_registry = CommandRegistry.with_command_modules( + modules=COMMAND_CATEGORIES, + config=app_config, + ) + + agent_state = state or create_agent_state( + task=task, + ai_profile=ai_profile, + directives=directives, + app_config=app_config, + ) + + # TODO: configure memory + + print_attribute("Configured Browser", app_config.selenium_web_browser) + + return Agent( + settings=agent_state, + llm_provider=llm_provider, + command_registry=command_registry, + legacy_config=app_config, + ) + + +def create_agent_state( + task: str, + ai_profile: AIProfile, + directives: AIDirectives, + app_config: Config, +) -> AgentSettings: + agent_prompt_config = Agent.default_settings.prompt_config.copy(deep=True) + agent_prompt_config.use_functions_api = app_config.openai_functions + + return AgentSettings( + name=Agent.default_settings.name, + description=Agent.default_settings.description, + task=task, + ai_profile=ai_profile, + directives=directives, + config=AgentConfiguration( + fast_llm=app_config.fast_llm, + smart_llm=app_config.smart_llm, + allow_fs_access=not app_config.restrict_to_workspace, + use_functions_api=app_config.openai_functions, + plugins=app_config.plugins, + ), + prompt_config=agent_prompt_config, + history=Agent.default_settings.history.copy(deep=True), + ) diff --git a/autogpts/autogpt/autogpt/agent_factory/generators.py b/autogpts/autogpt/autogpt/agent_factory/generators.py new file mode 100644 index 00000000..2713d5cb --- /dev/null +++ b/autogpts/autogpt/autogpt/agent_factory/generators.py @@ -0,0 +1,31 @@ +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from autogpt.agents.agent import Agent + from autogpt.config import Config + from autogpt.core.resource.model_providers.schema import ChatModelProvider + +from autogpt.config.ai_directives import AIDirectives + +from .configurators import _configure_agent +from .profile_generator import generate_agent_profile_for_task + + +async def generate_agent_for_task( + task: str, + app_config: "Config", + llm_provider: "ChatModelProvider", +) -> "Agent": + base_directives = AIDirectives.from_file(app_config.prompt_settings_file) + ai_profile, task_directives = await generate_agent_profile_for_task( + task=task, + app_config=app_config, + llm_provider=llm_provider, + ) + return _configure_agent( + task=task, + ai_profile=ai_profile, + directives=base_directives + task_directives, + app_config=app_config, + llm_provider=llm_provider, + ) diff --git a/autogpts/autogpt/autogpt/agent_factory/profile_generator.py b/autogpts/autogpt/autogpt/agent_factory/profile_generator.py new file mode 100644 index 00000000..a4617c81 --- /dev/null +++ b/autogpts/autogpt/autogpt/agent_factory/profile_generator.py @@ -0,0 +1,222 @@ +import logging + +from autogpt.config import AIDirectives, AIProfile, Config +from autogpt.core.configuration import SystemConfiguration, UserConfigurable +from autogpt.core.prompting import ( + ChatPrompt, + LanguageModelClassification, + PromptStrategy, +) +from autogpt.core.prompting.utils import json_loads +from autogpt.core.resource.model_providers.schema import ( + AssistantChatMessageDict, + ChatMessage, + ChatModelProvider, + CompletionModelFunction, +) +from autogpt.core.utils.json_schema import JSONSchema + +logger = logging.getLogger(__name__) + + +class AgentProfileGeneratorConfiguration(SystemConfiguration): + model_classification: LanguageModelClassification = UserConfigurable( + default=LanguageModelClassification.SMART_MODEL + ) + system_prompt: str = UserConfigurable( + default=( + "Your job is to respond to a user-defined task, given in triple quotes, by " + "invoking the `create_agent` function to generate an autonomous agent to " + "complete the task. " + "You should supply a role-based name for the agent (_GPT), " + "an informative description for what the agent does, and " + "1 to 5 directives in each of the categories Best Practices and Constraints, " + "that are optimally aligned with the successful completion " + "of its assigned task.\n" + "\n" + "Example Input:\n" + '"""Help me with marketing my business"""\n\n' + "Example Function Call:\n" + "```\n" + "{" + '"name": "create_agent",' + ' "arguments": {' + '"name": "CMOGPT",' + ' "description": "a professional digital marketer AI that assists Solopreneurs in' + " growing their businesses by providing world-class expertise in solving" + ' marketing problems for SaaS, content products, agencies, and more.",' + ' "directives": {' + ' "best_practices": [' + '"Engage in effective problem-solving, prioritization, planning, and' + " supporting execution to address your marketing needs as your virtual Chief" + ' Marketing Officer.",' + ' "Provide specific, actionable, and concise advice to help you make' + " informed decisions without the use of platitudes or overly wordy" + ' explanations.",' + ' "Identify and prioritize quick wins and cost-effective campaigns that' + ' maximize results with minimal time and budget investment.",' + ' "Proactively take the lead in guiding you and offering suggestions when' + " faced with unclear information or uncertainty to ensure your marketing" + ' strategy remains on track."' + "]," # best_practices + ' "constraints": [' + '"Do not suggest illegal or unethical plans or strategies.",' + ' "Take reasonable budgetary limits into account."' + "]" # constraints + "}" # directives + "}" # arguments + "}\n" + "```" + ) + ) + user_prompt_template: str = UserConfigurable(default='"""{user_objective}"""') + create_agent_function: dict = UserConfigurable( + default=CompletionModelFunction( + name="create_agent", + description="Create a new autonomous AI agent to complete a given task.", + parameters={ + "name": JSONSchema( + type=JSONSchema.Type.STRING, + description="A short role-based name for an autonomous agent.", + required=True, + ), + "description": JSONSchema( + type=JSONSchema.Type.STRING, + description="An informative one sentence description of what the AI agent does", + required=True, + ), + "directives": JSONSchema( + type=JSONSchema.Type.OBJECT, + properties={ + "best_practices": JSONSchema( + type=JSONSchema.Type.ARRAY, + minItems=1, + maxItems=5, + items=JSONSchema( + type=JSONSchema.Type.STRING, + ), + description=( + "One to five highly effective best practices that are" + " optimally aligned with the completion of the given task." + ), + required=True, + ), + "constraints": JSONSchema( + type=JSONSchema.Type.ARRAY, + minItems=1, + maxItems=5, + items=JSONSchema( + type=JSONSchema.Type.STRING, + ), + description=( + "One to five highly effective constraints that are" + " optimally aligned with the completion of the given task." + ), + required=True, + ), + }, + required=True, + ), + }, + ).schema + ) + + +class AgentProfileGenerator(PromptStrategy): + default_configuration: AgentProfileGeneratorConfiguration = ( + AgentProfileGeneratorConfiguration() + ) + + def __init__( + self, + model_classification: LanguageModelClassification, + system_prompt: str, + user_prompt_template: str, + create_agent_function: dict, + ): + self._model_classification = model_classification + self._system_prompt_message = system_prompt + self._user_prompt_template = user_prompt_template + self._create_agent_function = CompletionModelFunction.parse( + create_agent_function + ) + + @property + def model_classification(self) -> LanguageModelClassification: + return self._model_classification + + def build_prompt(self, user_objective: str = "", **kwargs) -> ChatPrompt: + system_message = ChatMessage.system(self._system_prompt_message) + user_message = ChatMessage.user( + self._user_prompt_template.format( + user_objective=user_objective, + ) + ) + prompt = ChatPrompt( + messages=[system_message, user_message], + functions=[self._create_agent_function], + ) + return prompt + + def parse_response_content( + self, + response_content: AssistantChatMessageDict, + ) -> tuple[AIProfile, AIDirectives]: + """Parse the actual text response from the objective model. + + Args: + response_content: The raw response content from the objective model. + + Returns: + The parsed response. + + """ + try: + arguments = json_loads(response_content["function_call"]["arguments"]) + ai_profile = AIProfile( + ai_name=arguments.get("name"), + ai_role=arguments.get("description"), + ) + ai_directives = AIDirectives( + best_practices=arguments["directives"].get("best_practices"), + constraints=arguments["directives"].get("constraints"), + resources=[], + ) + except KeyError: + logger.debug(f"Failed to parse this response content: {response_content}") + raise + return ai_profile, ai_directives + + +async def generate_agent_profile_for_task( + task: str, + app_config: Config, + llm_provider: ChatModelProvider, +) -> tuple[AIProfile, AIDirectives]: + """Generates an AIConfig object from the given string. + + Returns: + AIConfig: The AIConfig object tailored to the user's input + """ + agent_profile_generator = AgentProfileGenerator( + **AgentProfileGenerator.default_configuration.dict() # HACK + ) + + prompt = agent_profile_generator.build_prompt(task) + + # Call LLM with the string as user input + output = ( + await llm_provider.create_chat_completion( + prompt.messages, + model_name=app_config.smart_llm, + functions=prompt.functions, + ) + ).response + + # Debug LLM Output + logger.debug(f"AI Config Generator Raw Output: {output}") + + # Parse the output + ai_profile, ai_directives = agent_profile_generator.parse_response_content(output) + + return ai_profile, ai_directives diff --git a/autogpts/autogpt/autogpt/agent_manager/__init__.py b/autogpts/autogpt/autogpt/agent_manager/__init__.py new file mode 100644 index 00000000..a412566b --- /dev/null +++ b/autogpts/autogpt/autogpt/agent_manager/__init__.py @@ -0,0 +1,3 @@ +from .agent_manager import AgentManager + +__all__ = ["AgentManager"] diff --git a/autogpts/autogpt/autogpt/agent_manager/agent_manager.py b/autogpts/autogpt/autogpt/agent_manager/agent_manager.py new file mode 100644 index 00000000..dc3bc646 --- /dev/null +++ b/autogpts/autogpt/autogpt/agent_manager/agent_manager.py @@ -0,0 +1,47 @@ +from __future__ import annotations + +import uuid +from pathlib import Path +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from autogpt.agents.agent import AgentSettings + +from autogpt.agents.utils.agent_file_manager import AgentFileManager + + +class AgentManager: + def __init__(self, app_data_dir: Path): + self.agents_dir = app_data_dir / "agents" + if not self.agents_dir.exists(): + self.agents_dir.mkdir() + + @staticmethod + def generate_id(agent_name: str) -> str: + unique_id = str(uuid.uuid4())[:8] + return f"{agent_name}-{unique_id}" + + def list_agents(self) -> list[str]: + return [ + dir.name + for dir in self.agents_dir.iterdir() + if dir.is_dir() and AgentFileManager(dir).state_file_path.exists() + ] + + def get_agent_dir(self, agent_id: str, must_exist: bool = False) -> Path: + agent_dir = self.agents_dir / agent_id + if must_exist and not agent_dir.exists(): + raise FileNotFoundError(f"No agent with ID '{agent_id}'") + return agent_dir + + def retrieve_state(self, agent_id: str) -> AgentSettings: + from autogpt.agents.agent import AgentSettings + + agent_dir = self.get_agent_dir(agent_id, True) + state_file = AgentFileManager(agent_dir).state_file_path + if not state_file.exists(): + raise FileNotFoundError(f"Agent with ID '{agent_id}' has no state.json") + + state = AgentSettings.load_from_json_file(state_file) + state.agent_data_dir = agent_dir + return state diff --git a/autogpts/autogpt/autogpt/agents/agent.py b/autogpts/autogpt/autogpt/agents/agent.py index 43d39a6c..b1363105 100644 --- a/autogpts/autogpt/autogpt/agents/agent.py +++ b/autogpts/autogpt/autogpt/agents/agent.py @@ -8,10 +8,10 @@ from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from autogpt.config import Config - from autogpt.memory.vector import VectorMemory from autogpt.models.command_registry import CommandRegistry -from autogpt.config import AIConfig +from pydantic import Field + from autogpt.core.configuration import Configurable from autogpt.core.prompting import ChatPrompt from autogpt.core.resource.model_providers import ( @@ -38,8 +38,8 @@ from autogpt.models.context_item import ContextItem from .base import BaseAgent, BaseAgentConfiguration, BaseAgentSettings from .features.context import ContextMixin +from .features.file_workspace import FileWorkspaceMixin from .features.watchdog import WatchdogMixin -from .features.workspace import WorkspaceMixin from .prompt_strategies.one_shot import ( OneShotAgentPromptConfiguration, OneShotAgentPromptStrategy, @@ -54,13 +54,17 @@ class AgentConfiguration(BaseAgentConfiguration): class AgentSettings(BaseAgentSettings): - config: AgentConfiguration - prompt_config: OneShotAgentPromptConfiguration + config: AgentConfiguration = Field(default_factory=AgentConfiguration) + prompt_config: OneShotAgentPromptConfiguration = Field( + default_factory=( + lambda: OneShotAgentPromptStrategy.default_configuration.copy(deep=True) + ) + ) class Agent( ContextMixin, - WorkspaceMixin, + FileWorkspaceMixin, WatchdogMixin, BaseAgent, Configurable[AgentSettings], @@ -70,10 +74,6 @@ class Agent( default_settings: AgentSettings = AgentSettings( name="Agent", description=__doc__, - ai_config=AIConfig(ai_name="AutoGPT"), - config=AgentConfiguration(), - prompt_config=OneShotAgentPromptStrategy.default_configuration, - history=BaseAgent.default_settings.history, ) def __init__( @@ -81,7 +81,6 @@ class Agent( settings: AgentSettings, llm_provider: ChatModelProvider, command_registry: CommandRegistry, - memory: VectorMemory, legacy_config: Config, ): prompt_strategy = OneShotAgentPromptStrategy( @@ -96,9 +95,6 @@ class Agent( legacy_config=legacy_config, ) - self.memory = memory - """VectorMemoryProvider used to manage the agent's context (TODO)""" - self.created_at = datetime.now().strftime("%Y%m%d_%H%M%S") """Timestamp the agent was created; only used for structured debug logging.""" @@ -159,7 +155,7 @@ class Agent( self.log_cycle_handler.log_count_within_cycle = 0 self.log_cycle_handler.log_cycle( - self.ai_config.ai_name, + self.ai_profile.ai_name, self.created_at, self.config.cycle_count, prompt.raw(), @@ -184,7 +180,7 @@ class Agent( ) = self.prompt_strategy.parse_response_content(llm_response.response) self.log_cycle_handler.log_cycle( - self.ai_config.ai_name, + self.ai_profile.ai_name, self.created_at, self.config.cycle_count, assistant_reply_dict, @@ -212,7 +208,7 @@ class Agent( if command_name == "human_feedback": result = ActionInterruptedByHuman(feedback=user_input) self.log_cycle_handler.log_cycle( - self.ai_config.ai_name, + self.ai_profile.ai_name, self.created_at, self.config.cycle_count, user_input, diff --git a/autogpts/autogpt/autogpt/agents/base.py b/autogpts/autogpt/autogpt/agents/base.py index c8a0636f..1dd9ac49 100644 --- a/autogpts/autogpt/autogpt/agents/base.py +++ b/autogpts/autogpt/autogpt/agents/base.py @@ -2,7 +2,8 @@ from __future__ import annotations import logging from abc import ABC, abstractmethod -from typing import TYPE_CHECKING, Any, Literal, Optional +from pathlib import Path +from typing import TYPE_CHECKING, Any, Optional from auto_gpt_plugin_template import AutoGPTPluginTemplate from pydantic import Field, validator @@ -18,8 +19,9 @@ if TYPE_CHECKING: from autogpt.models.command_registry import CommandRegistry from autogpt.agents.utils.prompt_scratchpad import PromptScratchpad -from autogpt.config.ai_config import AIConfig +from autogpt.config import ConfigBuilder from autogpt.config.ai_directives import AIDirectives +from autogpt.config.ai_profile import AIProfile from autogpt.core.configuration import ( Configurable, SystemConfiguration, @@ -40,6 +42,8 @@ from autogpt.llm.providers.openai import get_openai_command_specs from autogpt.models.action_history import ActionResult, EpisodicActionHistory from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT +from .utils.agent_file_manager import AgentFileManager + logger = logging.getLogger(__name__) CommandName = str @@ -48,6 +52,8 @@ AgentThoughts = dict[str, Any] class BaseAgentConfiguration(SystemConfiguration): + allow_fs_access: bool = UserConfigurable(default=False) + fast_llm: OpenAIModelName = UserConfigurable(default=OpenAIModelName.GPT3_16k) smart_llm: OpenAIModelName = UserConfigurable(default=OpenAIModelName.GPT4) use_functions_api: bool = UserConfigurable(default=False) @@ -82,9 +88,8 @@ class BaseAgentConfiguration(SystemConfiguration): defaults to 75% of `llm.max_tokens`. """ - summary_max_tlength: Optional[ - int - ] = None # TODO: move to ActionHistoryConfiguration + summary_max_tlength: Optional[int] = None + # TODO: move to ActionHistoryConfiguration plugins: list[AutoGPTPluginTemplate] = Field(default_factory=list, exclude=True) @@ -115,31 +120,49 @@ class BaseAgentConfiguration(SystemConfiguration): f"Model {smart_llm} does not support OpenAI Functions. " "Please disable OPENAI_FUNCTIONS or choose a suitable model." ) + return v class BaseAgentSettings(SystemSettings): - ai_config: AIConfig - """The AIConfig or "personality" object associated with this agent.""" + agent_id: Optional[str] = None + agent_data_dir: Optional[Path] = None - config: BaseAgentConfiguration + ai_profile: AIProfile = Field(default_factory=lambda: AIProfile(ai_name="AutoGPT")) + """The AI profile or "personality" of the agent.""" + + directives: AIDirectives = Field( + default_factory=lambda: AIDirectives.from_file( + ConfigBuilder.default_settings.prompt_settings_file + ) + ) + """Directives (general instructional guidelines) for the agent.""" + + task: str = "Terminate immediately" # FIXME: placeholder for forge.sdk.schema.Task + """The user-given task that the agent is working on.""" + + config: BaseAgentConfiguration = Field(default_factory=BaseAgentConfiguration) """The configuration for this BaseAgent subsystem instance.""" - history: EpisodicActionHistory + history: EpisodicActionHistory = Field(default_factory=EpisodicActionHistory) """(STATE) The action history of the agent.""" + def save_to_json_file(self, file_path: Path) -> None: + with file_path.open("w") as f: + f.write(self.json()) + + @classmethod + def load_from_json_file(cls, file_path: Path): + return cls.parse_file(file_path) + class BaseAgent(Configurable[BaseAgentSettings], ABC): """Base class for all AutoGPT agent classes.""" - ThoughtProcessID = Literal["one-shot"] ThoughtProcessOutput = tuple[CommandName, CommandArgs, AgentThoughts] default_settings = BaseAgentSettings( name="BaseAgent", description=__doc__, - ai_config=AIConfig(), - config=BaseAgentConfiguration(), - history=EpisodicActionHistory(), ) def __init__( @@ -150,8 +173,20 @@ class BaseAgent(Configurable[BaseAgentSettings], ABC): command_registry: CommandRegistry, legacy_config: Config, ): - self.ai_config = settings.ai_config - self.ai_directives = AIDirectives.from_file(legacy_config.prompt_settings_file) + self.state = settings + self.config = settings.config + self.ai_profile = settings.ai_profile + self.directives = settings.directives + self.event_history = settings.history + + self.legacy_config = legacy_config + """LEGACY: Monolithic application configuration.""" + + self.file_manager: AgentFileManager = ( + AgentFileManager(settings.agent_data_dir) + if settings.agent_data_dir + else None + ) # type: ignore self.llm_provider = llm_provider @@ -160,20 +195,27 @@ class BaseAgent(Configurable[BaseAgentSettings], ABC): self.command_registry = command_registry """The registry containing all commands available to the agent.""" - self.llm_provider = llm_provider - - self.legacy_config = legacy_config - self.config = settings.config - """The applicable application configuration.""" - - self.event_history = settings.history - self._prompt_scratchpad: PromptScratchpad | None = None # Support multi-inheritance and mixins for subclasses super(BaseAgent, self).__init__() - logger.debug(f"Created {__class__} '{self.ai_config.ai_name}'") + logger.debug(f"Created {__class__} '{self.ai_profile.ai_name}'") + + def set_id(self, new_id: str, new_agent_dir: Optional[Path] = None): + self.state.agent_id = new_id + if self.state.agent_data_dir: + if not new_agent_dir: + raise ValueError( + "new_agent_dir must be specified if one is currently configured" + ) + self.attach_fs(new_agent_dir) + + def attach_fs(self, agent_dir: Path) -> AgentFileManager: + self.file_manager = AgentFileManager(agent_dir) + self.file_manager.initialize() + self.state.agent_data_dir = agent_dir + return self.file_manager @property def llm(self) -> ChatModelInfo: @@ -196,6 +238,10 @@ class BaseAgent(Configurable[BaseAgentSettings], ABC): Returns: The command name and arguments, if any, and the agent's thoughts. """ + assert self.file_manager, ( + f"Agent has no FileManager: call {__class__.__name__}.attach_fs()" + " before trying to run the agent." + ) # Scratchpad as surrogate PromptGenerator for plugin hooks self._prompt_scratchpad = PromptScratchpad() @@ -266,14 +312,15 @@ class BaseAgent(Configurable[BaseAgentSettings], ABC): if not plugin.can_handle_post_prompt(): continue plugin.post_prompt(scratchpad) - ai_directives = self.ai_directives.copy(deep=True) + ai_directives = self.directives.copy(deep=True) ai_directives.resources += scratchpad.resources ai_directives.constraints += scratchpad.constraints ai_directives.best_practices += scratchpad.best_practices extra_commands += list(scratchpad.commands.values()) prompt = self.prompt_strategy.build_prompt( - ai_config=self.ai_config, + task=self.state.task, + ai_profile=self.ai_profile, ai_directives=ai_directives, commands=get_openai_command_specs( self.command_registry.list_available_commands(self) diff --git a/autogpts/autogpt/autogpt/agents/features/file_workspace.py b/autogpts/autogpt/autogpt/agents/features/file_workspace.py new file mode 100644 index 00000000..ecdd2874 --- /dev/null +++ b/autogpts/autogpt/autogpt/agents/features/file_workspace.py @@ -0,0 +1,57 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from pathlib import Path + + from ..base import BaseAgent + +from autogpt.file_workspace import FileWorkspace + +from ..base import AgentFileManager, BaseAgentConfiguration + + +class FileWorkspaceMixin: + """Mixin that adds workspace support to a class""" + + workspace: FileWorkspace = None + """Workspace that the agent has access to, e.g. for reading/writing files.""" + + def __init__(self, **kwargs): + # Initialize other bases first, because we need the config from BaseAgent + super(FileWorkspaceMixin, self).__init__(**kwargs) + + config: BaseAgentConfiguration = getattr(self, "config") + if not isinstance(config, BaseAgentConfiguration): + raise ValueError( + "Cannot initialize Workspace for Agent without compatible .config" + ) + file_manager: AgentFileManager = getattr(self, "file_manager") + if not file_manager: + return + + self.workspace = _setup_workspace(file_manager, config) + + def attach_fs(self, agent_dir: Path): + res = super(FileWorkspaceMixin, self).attach_fs(agent_dir) + + self.workspace = _setup_workspace(self.file_manager, self.config) + + return res + + +def _setup_workspace(file_manager: AgentFileManager, config: BaseAgentConfiguration): + workspace = FileWorkspace( + file_manager.root / "workspace", + restrict_to_root=not config.allow_fs_access, + ) + workspace.initialize() + return workspace + + +def get_agent_workspace(agent: BaseAgent) -> FileWorkspace | None: + if isinstance(agent, FileWorkspaceMixin): + return agent.workspace + + return None diff --git a/autogpts/autogpt/autogpt/agents/features/workspace.py b/autogpts/autogpt/autogpt/agents/features/workspace.py deleted file mode 100644 index 34eceb27..00000000 --- a/autogpts/autogpt/autogpt/agents/features/workspace.py +++ /dev/null @@ -1,39 +0,0 @@ -from __future__ import annotations - -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from ..base import BaseAgent - -from autogpt.config import Config -from autogpt.workspace import Workspace - - -class WorkspaceMixin: - """Mixin that adds workspace support to a class""" - - workspace: Workspace - """Workspace that the agent has access to, e.g. for reading/writing files.""" - - def __init__(self, **kwargs): - # Initialize other bases first, because we need the config from BaseAgent - super(WorkspaceMixin, self).__init__(**kwargs) - - legacy_config: Config = getattr(self, "legacy_config") - if not isinstance(legacy_config, Config): - raise ValueError(f"Cannot initialize Workspace for Agent without Config") - if not legacy_config.workspace_path: - raise ValueError( - f"Cannot set up Workspace: no WORKSPACE_PATH in legacy_config" - ) - - self.workspace = Workspace( - legacy_config.workspace_path, legacy_config.restrict_to_workspace - ) - - -def get_agent_workspace(agent: BaseAgent) -> Workspace | None: - if isinstance(agent, WorkspaceMixin): - return agent.workspace - - return None diff --git a/autogpts/autogpt/autogpt/agents/planning_agent.py b/autogpts/autogpt/autogpt/agents/planning_agent.py index 2d6af385..f68611e3 100644 --- a/autogpts/autogpt/autogpt/agents/planning_agent.py +++ b/autogpts/autogpt/autogpt/agents/planning_agent.py @@ -6,7 +6,7 @@ from datetime import datetime from typing import TYPE_CHECKING, Literal, Optional if TYPE_CHECKING: - from autogpt.config import AIConfig, Config + from autogpt.config import Config from autogpt.llm.base import ChatModelResponse, ChatSequence from autogpt.memory.vector import VectorMemory from autogpt.models.command_registry import CommandRegistry @@ -32,19 +32,18 @@ from autogpt.models.context_item import ContextItem from .agent import execute_command, extract_command from .base import BaseAgent from .features.context import ContextMixin -from .features.workspace import WorkspaceMixin +from .features.file_workspace import FileWorkspaceMixin logger = logging.getLogger(__name__) -class PlanningAgent(ContextMixin, WorkspaceMixin, BaseAgent): +class PlanningAgent(ContextMixin, FileWorkspaceMixin, BaseAgent): """Agent class for interacting with AutoGPT.""" ThoughtProcessID = Literal["plan", "action", "evaluate"] def __init__( self, - ai_config: AIConfig, command_registry: CommandRegistry, memory: VectorMemory, triggering_prompt: str, @@ -52,7 +51,6 @@ class PlanningAgent(ContextMixin, WorkspaceMixin, BaseAgent): cycle_budget: Optional[int] = None, ): super().__init__( - ai_config=ai_config, command_registry=command_registry, config=config, default_cycle_instruction=triggering_prompt, @@ -223,14 +221,14 @@ class PlanningAgent(ContextMixin, WorkspaceMixin, BaseAgent): self.log_cycle_handler.log_count_within_cycle = 0 self.log_cycle_handler.log_cycle( - self.ai_config.ai_name, + self.ai_profile.ai_name, self.created_at, self.cycle_count, self.event_history.episodes, "event_history.json", ) self.log_cycle_handler.log_cycle( - self.ai_config.ai_name, + self.ai_profile.ai_name, self.created_at, self.cycle_count, prompt.raw(), @@ -249,7 +247,7 @@ class PlanningAgent(ContextMixin, WorkspaceMixin, BaseAgent): if command_name == "human_feedback": result = ActionInterruptedByHuman(feedback=user_input) self.log_cycle_handler.log_cycle( - self.ai_config.ai_name, + self.ai_profile.ai_name, self.created_at, self.cycle_count, user_input, @@ -333,7 +331,7 @@ class PlanningAgent(ContextMixin, WorkspaceMixin, BaseAgent): response = command_name, arguments, assistant_reply_dict self.log_cycle_handler.log_cycle( - self.ai_config.ai_name, + self.ai_profile.ai_name, self.created_at, self.cycle_count, assistant_reply_dict, diff --git a/autogpts/autogpt/autogpt/agents/prompt_strategies/one_shot.py b/autogpts/autogpt/autogpt/agents/prompt_strategies/one_shot.py index 506c2c41..30e35a40 100644 --- a/autogpts/autogpt/autogpt/agents/prompt_strategies/one_shot.py +++ b/autogpts/autogpt/autogpt/agents/prompt_strategies/one_shot.py @@ -7,13 +7,14 @@ from logging import Logger from typing import TYPE_CHECKING, Callable, Optional import distro +from pydantic import Field if TYPE_CHECKING: from autogpt.agents.agent import Agent from autogpt.models.action_history import Episode from autogpt.agents.utils.exceptions import InvalidAgentResponseError -from autogpt.config import AIConfig, AIDirectives +from autogpt.config import AIDirectives, AIProfile from autogpt.core.configuration.schema import SystemConfiguration, UserConfigurable from autogpt.core.prompting import ( ChatPrompt, @@ -29,56 +30,6 @@ from autogpt.core.utils.json_schema import JSONSchema from autogpt.json_utils.utilities import extract_dict_from_response from autogpt.prompts.utils import format_numbered_list, indent -RESPONSE_SCHEMA = JSONSchema( - type=JSONSchema.Type.OBJECT, - properties={ - "thoughts": JSONSchema( - type=JSONSchema.Type.OBJECT, - required=True, - properties={ - "text": JSONSchema( - description="Thoughts", - type=JSONSchema.Type.STRING, - required=True, - ), - "reasoning": JSONSchema( - type=JSONSchema.Type.STRING, - required=True, - ), - "plan": JSONSchema( - description="Short markdown-style bullet list that conveys the long-term plan", - type=JSONSchema.Type.STRING, - required=True, - ), - "criticism": JSONSchema( - description="Constructive self-criticism", - type=JSONSchema.Type.STRING, - required=True, - ), - "speak": JSONSchema( - description="Summary of thoughts, to say to user", - type=JSONSchema.Type.STRING, - required=True, - ), - }, - ), - "command": JSONSchema( - type=JSONSchema.Type.OBJECT, - required=True, - properties={ - "name": JSONSchema( - type=JSONSchema.Type.STRING, - required=True, - ), - "args": JSONSchema( - type=JSONSchema.Type.OBJECT, - required=True, - ), - }, - ), - }, -) - class OneShotAgentPromptConfiguration(SystemConfiguration): DEFAULT_BODY_TEMPLATE: str = ( @@ -166,7 +117,9 @@ class OneShotAgentPromptConfiguration(SystemConfiguration): ######### # State # ######### - progress_summaries: dict[tuple[int, int], str] = {(0, 0): ""} + # progress_summaries: dict[tuple[int, int], str] = Field( + # default_factory=lambda: {(0, 0): ""} + # ) class OneShotAgentPromptStrategy(PromptStrategy): @@ -190,7 +143,8 @@ class OneShotAgentPromptStrategy(PromptStrategy): def build_prompt( self, *, - ai_config: AIConfig, + task: str, + ai_profile: AIProfile, ai_directives: AIDirectives, commands: list[CompletionModelFunction], event_history: list[Episode], @@ -213,13 +167,16 @@ class OneShotAgentPromptStrategy(PromptStrategy): extra_messages = [] system_prompt = self.build_system_prompt( - ai_config=ai_config, + ai_profile=ai_profile, ai_directives=ai_directives, commands=commands, include_os_info=include_os_info, ) system_prompt_tlength = count_message_tokens(ChatMessage.system(system_prompt)) + user_task = f'"""{task}"""' + user_task_tlength = count_message_tokens(ChatMessage.user(user_task)) + response_format_instr = self.response_format_instruction( self.config.use_functions_api ) @@ -235,6 +192,7 @@ class OneShotAgentPromptStrategy(PromptStrategy): max_tokens=( max_prompt_tokens - system_prompt_tlength + - user_task_tlength - final_instruction_tlength - count_message_tokens(extra_messages) ), @@ -247,6 +205,7 @@ class OneShotAgentPromptStrategy(PromptStrategy): prompt = ChatPrompt( messages=[ ChatMessage.system(system_prompt), + ChatMessage.user(user_task), *extra_messages, final_instruction_msg, ], @@ -256,26 +215,31 @@ class OneShotAgentPromptStrategy(PromptStrategy): def build_system_prompt( self, - ai_config: AIConfig, + ai_profile: AIProfile, ai_directives: AIDirectives, commands: list[CompletionModelFunction], include_os_info: bool, ) -> str: system_prompt_parts = ( - self._generate_intro_prompt(ai_config) + self._generate_intro_prompt(ai_profile) + (self._generate_os_info() if include_os_info else []) + [ self.config.body_template.format( constraints=format_numbered_list( ai_directives.constraints - + self._generate_budget_constraint(ai_config.api_budget) + + self._generate_budget_constraint(ai_profile.api_budget) ), resources=format_numbered_list(ai_directives.resources), commands=self._generate_commands_list(commands), best_practices=format_numbered_list(ai_directives.best_practices), ) ] - + self._generate_goals_info(ai_config.ai_goals) + + [ + "## Your Task\n" + "The user will specify a task for you to execute, in triple quotes," + " in the next message. Your job is to complete the task while following" + " your directives as given above, and terminate when your task is done." + ] ) # Join non-empty parts together into paragraph format @@ -328,7 +292,7 @@ class OneShotAgentPromptStrategy(PromptStrategy): return "\n\n".join(steps) def response_format_instruction(self, use_functions_api: bool) -> str: - response_schema = RESPONSE_SCHEMA.copy(deep=True) + response_schema = self.response_schema.copy(deep=True) if ( use_functions_api and response_schema.properties @@ -349,14 +313,14 @@ class OneShotAgentPromptStrategy(PromptStrategy): f"{response_format}" ) - def _generate_intro_prompt(self, ai_config: AIConfig) -> list[str]: + def _generate_intro_prompt(self, ai_profile: AIProfile) -> list[str]: """Generates the introduction part of the prompt. Returns: list[str]: A list of strings forming the introduction part of the prompt. """ return [ - f"You are {ai_config.ai_name}, {ai_config.ai_role.rstrip('.')}.", + f"You are {ai_profile.ai_name}, {ai_profile.ai_role.rstrip('.')}.", "Your decisions must always be made independently without seeking " "user assistance. Play to your strengths as an LLM and pursue " "simple strategies with no legal complications.", @@ -392,24 +356,6 @@ class OneShotAgentPromptStrategy(PromptStrategy): ] return [] - def _generate_goals_info(self, goals: list[str]) -> list[str]: - """Generates the goals information part of the prompt. - - Returns: - str: The goals information part of the prompt. - """ - if goals: - return [ - "\n".join( - [ - "## Goals", - "For your task, you must fulfill the following goals:", - *[f"{i+1}. {goal}" for i, goal in enumerate(goals)], - ] - ) - ] - return [] - def _generate_commands_list(self, commands: list[CompletionModelFunction]) -> str: """Lists the commands available to the agent. @@ -434,7 +380,10 @@ class OneShotAgentPromptStrategy(PromptStrategy): assistant_reply_dict = extract_dict_from_response(response["content"]) - _, errors = RESPONSE_SCHEMA.validate_object(assistant_reply_dict, self.logger) + _, errors = self.response_schema.validate_object( + object=assistant_reply_dict, + logger=self.logger, + ) if errors: raise InvalidAgentResponseError( "Validation of response failed:\n " diff --git a/autogpts/autogpt/autogpt/agents/utils/agent_file_manager.py b/autogpts/autogpt/autogpt/agents/utils/agent_file_manager.py new file mode 100644 index 00000000..4db788bf --- /dev/null +++ b/autogpts/autogpt/autogpt/agents/utils/agent_file_manager.py @@ -0,0 +1,37 @@ +from __future__ import annotations + +import logging +from pathlib import Path + +logger = logging.getLogger(__name__) + + +class AgentFileManager: + """A class that represents a workspace for an AutoGPT agent.""" + + def __init__(self, agent_data_dir: Path): + self._root = agent_data_dir.resolve() + + @property + def root(self) -> Path: + """The root directory of the workspace.""" + return self._root + + def initialize(self) -> None: + self.root.mkdir(exist_ok=True, parents=True) + self.init_file_ops_log(self.file_ops_log_path) + + @property + def state_file_path(self) -> Path: + return self.root / "state.json" + + @property + def file_ops_log_path(self) -> Path: + return self.root / "file_logger.log" + + @staticmethod + def init_file_ops_log(file_logger_path: Path) -> Path: + if not file_logger_path.exists(): + with file_logger_path.open(mode="w", encoding="utf-8") as f: + f.write("") + return file_logger_path diff --git a/autogpts/autogpt/autogpt/agents/utils/exceptions.py b/autogpts/autogpt/autogpt/agents/utils/exceptions.py index d6f2d74e..efb85341 100644 --- a/autogpts/autogpt/autogpt/agents/utils/exceptions.py +++ b/autogpts/autogpt/autogpt/agents/utils/exceptions.py @@ -14,6 +14,10 @@ class AgentException(Exception): super().__init__(message, *args) +class AgentTerminated(AgentException): + """The agent terminated or was terminated""" + + class ConfigurationError(AgentException): """Error caused by invalid, incompatible or otherwise incorrect configuration""" diff --git a/autogpts/autogpt/autogpt/app/agent_protocol_server.py b/autogpts/autogpt/autogpt/app/agent_protocol_server.py new file mode 100644 index 00000000..279c2044 --- /dev/null +++ b/autogpts/autogpt/autogpt/app/agent_protocol_server.py @@ -0,0 +1,374 @@ +import logging +import os +import pathlib +from io import BytesIO +from uuid import uuid4 + +from fastapi import APIRouter, FastAPI, UploadFile +from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import FileResponse, RedirectResponse, StreamingResponse +from fastapi.staticfiles import StaticFiles +from forge.sdk.db import AgentDB +from forge.sdk.errors import NotFoundError +from forge.sdk.middlewares import AgentMiddleware +from forge.sdk.routes.agent_protocol import base_router +from forge.sdk.schema import ( + Artifact, + Step, + StepRequestBody, + Task, + TaskArtifactsListResponse, + TaskListResponse, + TaskRequestBody, + TaskStepsListResponse, +) +from hypercorn.asyncio import serve as hypercorn_serve +from hypercorn.config import Config as HypercornConfig + +from autogpt.agent_factory.configurators import configure_agent_with_state +from autogpt.agent_factory.generators import generate_agent_for_task +from autogpt.agent_manager import AgentManager +from autogpt.commands.system import finish +from autogpt.commands.user_interaction import ask_user +from autogpt.config import Config +from autogpt.core.resource.model_providers import ChatModelProvider +from autogpt.file_workspace import FileWorkspace +from autogpt.models.action_history import ActionSuccessResult + +logger = logging.getLogger(__name__) + + +class AgentProtocolServer: + def __init__( + self, + app_config: Config, + database: AgentDB, + llm_provider: ChatModelProvider, + ): + self.app_config = app_config + self.db = database + self.llm_provider = llm_provider + self.agent_manager = AgentManager(app_data_dir=app_config.app_data_dir) + + async def start(self, port: int = 8000, router: APIRouter = base_router): + """Start the agent server.""" + logger.debug("Starting the agent server...") + config = HypercornConfig() + config.bind = [f"localhost:{port}"] + app = FastAPI( + title="AutoGPT Server", + description="Forked from AutoGPT Forge; Modified version of The Agent Protocol.", + version="v0.4", + ) + + # Add CORS middleware + origins = [ + "http://localhost:5000", + "http://127.0.0.1:5000", + "http://localhost:8000", + "http://127.0.0.1:8000", + "http://localhost:8080", + "http://127.0.0.1:8080", + # Add any other origins you want to whitelist + ] + + app.add_middleware( + CORSMiddleware, + allow_origins=origins, + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], + ) + + app.include_router(router, prefix="/ap/v1") + script_dir = os.path.dirname(os.path.realpath(__file__)) + frontend_path = ( + pathlib.Path(script_dir) + .joinpath("../../../../frontend/build/web") + .resolve() + ) + + if os.path.exists(frontend_path): + app.mount("/app", StaticFiles(directory=frontend_path), name="app") + + @app.get("/", include_in_schema=False) + async def root(): + return RedirectResponse(url="/app/index.html", status_code=307) + + else: + logger.warning( + f"Frontend not found. {frontend_path} does not exist. The frontend will not be available." + ) + + # Used to access the methods on this class from API route handlers + app.add_middleware(AgentMiddleware, agent=self) + + config.loglevel = "ERROR" + config.bind = [f"0.0.0.0:{port}"] + + logger.info(f"AutoGPT server starting on http://localhost:{port}") + await hypercorn_serve(app, config) + + async def create_task(self, task_request: TaskRequestBody) -> Task: + """ + Create a task for the agent. + """ + logger.debug(f"Creating agent for task: '{task_request.input}'") + task_agent = await generate_agent_for_task( + task=task_request.input, + app_config=self.app_config, + llm_provider=self.llm_provider, + ) + task = await self.db.create_task( + input=task_request.input, + additional_input=task_request.additional_input, + ) + agent_id = task_agent.state.agent_id = task_agent_id(task.task_id) + logger.debug(f"New agent ID: {agent_id}") + task_agent.attach_fs(self.app_config.app_data_dir / "agents" / agent_id) + task_agent.state.save_to_json_file(task_agent.file_manager.state_file_path) + return task + + async def list_tasks(self, page: int = 1, pageSize: int = 10) -> TaskListResponse: + """ + List all tasks that the agent has created. + """ + logger.debug("Listing all tasks...") + tasks, pagination = await self.db.list_tasks(page, pageSize) + response = TaskListResponse(tasks=tasks, pagination=pagination) + return response + + async def get_task(self, task_id: int) -> Task: + """ + Get a task by ID. + """ + logger.debug(f"Getting task with ID: {task_id}...") + task = await self.db.get_task(task_id) + return task + + async def list_steps( + self, task_id: str, page: int = 1, pageSize: int = 10 + ) -> TaskStepsListResponse: + """ + List the IDs of all steps that the task has created. + """ + logger.debug(f"Listing all steps created by task with ID: {task_id}...") + steps, pagination = await self.db.list_steps(task_id, page, pageSize) + response = TaskStepsListResponse(steps=steps, pagination=pagination) + return response + + async def execute_step(self, task_id: str, step_request: StepRequestBody) -> Step: + """Create a step for the task.""" + logger.debug(f"Creating a step for task with ID: {task_id}...") + + # Restore Agent instance + agent = configure_agent_with_state( + state=self.agent_manager.retrieve_state(task_agent_id(task_id)), + app_config=self.app_config, + llm_provider=self.llm_provider, + ) + agent.workspace.on_write_file = lambda path: self.db.create_artifact( + task_id=task_id, + file_name=path.parts[-1], + relative_path=str(path), + ) + + # According to the Agent Protocol spec, the first execute_step request contains + # the same task input as the parent create_task request. + # To prevent this from interfering with the agent's process, we ignore the input + # of this first step request, and just generate the first step proposal. + is_init_step = not bool(agent.event_history) + execute_command, execute_command_args, execute_result = None, None, None + execute_approved = False + if is_init_step: + step_request.input = "" + elif ( + agent.event_history.current_episode + and not agent.event_history.current_episode.result + ): + execute_command = agent.event_history.current_episode.action.name + execute_command_args = agent.event_history.current_episode.action.args + execute_approved = not step_request.input or step_request.input == "y" + + logger.debug( + f"Agent proposed command" + f" {execute_command}({fmt_kwargs(execute_command_args)})." + f" User input/feedback: {repr(step_request.input)}" + ) + + # Save step request + step = await self.db.create_step( + task_id=task_id, + input=step_request, + is_last=execute_command == finish.__name__ and execute_approved, + ) + + # Execute previously proposed action + if execute_command: + assert execute_command_args is not None + + if step.is_last and execute_command == finish.__name__: + assert execute_command_args + step = await self.db.update_step( + task_id=task_id, + step_id=step.step_id, + output=execute_command_args["reason"], + ) + return step + + if execute_command == ask_user.__name__: # HACK + execute_result = ActionSuccessResult(outputs=step_request.input) + agent.event_history.register_result(execute_result) + elif execute_approved: + step = await self.db.update_step( + task_id=task_id, + step_id=step.step_id, + status="running", + ) + # Execute previously proposed action + execute_result = await agent.execute( + command_name=execute_command, + command_args=execute_command_args, + ) + else: + assert step_request.input + execute_result = await agent.execute( + command_name="human_feedback", # HACK + command_args={}, + user_input=step_request.input, + ) + + # Propose next action + next_command, next_command_args, raw_output = await agent.propose_action() + + # Format step output + output = ( + ( + f"Command `{execute_command}({fmt_kwargs(execute_command_args)})` returned:" + f" {execute_result}\n\n" + ) + if execute_command_args and execute_command != "ask_user" + else "" + ) + output += f"{raw_output['thoughts']['speak']}\n\n" + output += ( + f"Next Command: {next_command}({fmt_kwargs(next_command_args)})" + if next_command != "ask_user" + else next_command_args["question"] + ) + + additional_output = { + **( + { + "last_action": { + "name": execute_command, + "args": execute_command_args, + "result": execute_result.dict(), + }, + } + if not is_init_step + else {} + ), + **raw_output, + } + + step = await self.db.update_step( + task_id=task_id, + step_id=step.step_id, + status="completed", + output=output, + additional_output=additional_output, + ) + + agent.state.save_to_json_file(agent.file_manager.state_file_path) + return step + + async def get_step(self, task_id: str, step_id: str) -> Step: + """ + Get a step by ID. + """ + step = await self.db.get_step(task_id, step_id) + return step + + async def list_artifacts( + self, task_id: str, page: int = 1, pageSize: int = 10 + ) -> TaskArtifactsListResponse: + """ + List the artifacts that the task has created. + """ + artifacts, pagination = await self.db.list_artifacts(task_id, page, pageSize) + return TaskArtifactsListResponse(artifacts=artifacts, pagination=pagination) + + async def create_artifact( + self, task_id: str, file: UploadFile, relative_path: str + ) -> Artifact: + """ + Create an artifact for the task. + """ + data = None + file_name = file.filename or str(uuid4()) + data = b"" + while contents := file.file.read(1024 * 1024): + data += contents + # Check if relative path ends with filename + if relative_path.endswith(file_name): + file_path = relative_path + else: + file_path = os.path.join(relative_path, file_name) + + workspace = get_task_agent_file_workspace(task_id, self.agent_manager) + workspace.write_file(file_path, data) + + artifact = await self.db.create_artifact( + task_id=task_id, + file_name=file_name, + relative_path=relative_path, + agent_created=False, + ) + return artifact + + async def get_artifact(self, task_id: str, artifact_id: str) -> Artifact: + """ + Get an artifact by ID. + """ + try: + artifact = await self.db.get_artifact(artifact_id) + if artifact.file_name not in artifact.relative_path: + file_path = os.path.join(artifact.relative_path, artifact.file_name) + else: + file_path = artifact.relative_path + workspace = get_task_agent_file_workspace(task_id, self.agent_manager) + retrieved_artifact = workspace.read_file(file_path, binary=True) + except NotFoundError as e: + raise + except FileNotFoundError as e: + raise + + return StreamingResponse( + BytesIO(retrieved_artifact), + media_type="application/octet-stream", + headers={ + "Content-Disposition": f"attachment; filename={artifact.file_name}" + }, + ) + + +def task_agent_id(task_id: str | int) -> str: + return f"AutoGPT-{task_id}" + + +def get_task_agent_file_workspace( + task_id: str | int, + agent_manager: AgentManager, +) -> FileWorkspace: + return FileWorkspace( + root=agent_manager.get_agent_dir( + agent_id=task_agent_id(task_id), + must_exist=True, + ), + restrict_to_root=True, + ) + + +def fmt_kwargs(kwargs: dict) -> str: + return ", ".join(f"{n}={repr(v)}" for n, v in kwargs.items()) diff --git a/autogpts/autogpt/autogpt/app/cli.py b/autogpts/autogpt/autogpt/app/cli.py index 06b39aac..4f97b60a 100644 --- a/autogpts/autogpt/autogpt/app/cli.py +++ b/autogpts/autogpt/autogpt/app/cli.py @@ -6,6 +6,14 @@ import click @click.group(invoke_without_command=True) +@click.pass_context +def cli(ctx: click.Context): + # Invoke `run` by default + if ctx.invoked_subcommand is None: + ctx.invoke(run) + + +@cli.command() @click.option("-c", "--continuous", is_flag=True, help="Enable Continuous Mode") @click.option( "--skip-reprompt", @@ -16,6 +24,7 @@ import click @click.option( "--ai-settings", "-C", + type=click.Path(exists=True, dir_okay=False, path_type=Path), help=( "Specifies which ai_settings.yaml file to use, relative to the AutoGPT" " root directory. Will also automatically skip the re-prompt." @@ -24,6 +33,7 @@ import click @click.option( "--prompt-settings", "-P", + type=click.Path(exists=True, dir_okay=False, path_type=Path), help="Specifies which prompt_settings.yaml file to use.", ) @click.option( @@ -82,18 +92,45 @@ import click help="AI role override", ) @click.option( - "--ai-goal", + "--constraint", type=str, multiple=True, - help="AI goal override; may be used multiple times to pass multiple goals", + help=( + "Add or override AI constraints to include in the prompt;" + " may be used multiple times to pass multiple constraints" + ), ) -@click.pass_context -def main( - ctx: click.Context, +@click.option( + "--resource", + type=str, + multiple=True, + help=( + "Add or override AI resources to include in the prompt;" + " may be used multiple times to pass multiple resources" + ), +) +@click.option( + "--best-practice", + type=str, + multiple=True, + help=( + "Add or override AI best practices to include in the prompt;" + " may be used multiple times to pass multiple best practices" + ), +) +@click.option( + "--override-directives", + is_flag=True, + help=( + "If specified, --constraint, --resource and --best-practice will override" + " the AI's directives instead of being appended to them" + ), +) +def run( continuous: bool, continuous_limit: int, - ai_settings: str, - prompt_settings: str, + ai_settings: Optional[Path], + prompt_settings: Optional[Path], skip_reprompt: bool, speak: bool, debug: bool, @@ -107,41 +144,103 @@ def main( install_plugin_deps: bool, ai_name: Optional[str], ai_role: Optional[str], - ai_goal: tuple[str], + resource: tuple[str], + constraint: tuple[str], + best_practice: tuple[str], + override_directives: bool, ) -> None: """ - Welcome to AutoGPT an experimental open-source application showcasing the capabilities of the GPT-4 pushing the boundaries of AI. - - Start an AutoGPT assistant. + Sets up and runs an agent, based on the task specified by the user, or resumes an + existing agent. """ # Put imports inside function to avoid importing everything when starting the CLI from autogpt.app.main import run_auto_gpt - if ctx.invoked_subcommand is None: - run_auto_gpt( - continuous=continuous, - continuous_limit=continuous_limit, - ai_settings=ai_settings, - prompt_settings=prompt_settings, - skip_reprompt=skip_reprompt, - speak=speak, - debug=debug, - gpt3only=gpt3only, - gpt4only=gpt4only, - memory_type=memory_type, - browser_name=browser_name, - allow_downloads=allow_downloads, - skip_news=skip_news, - working_directory=Path( - __file__ - ).parent.parent.parent, # TODO: make this an option - workspace_directory=workspace_directory, - install_plugin_deps=install_plugin_deps, - ai_name=ai_name, - ai_role=ai_role, - ai_goals=ai_goal, - ) + run_auto_gpt( + continuous=continuous, + continuous_limit=continuous_limit, + ai_settings=ai_settings, + prompt_settings=prompt_settings, + skip_reprompt=skip_reprompt, + speak=speak, + debug=debug, + gpt3only=gpt3only, + gpt4only=gpt4only, + memory_type=memory_type, + browser_name=browser_name, + allow_downloads=allow_downloads, + skip_news=skip_news, + workspace_directory=workspace_directory, + install_plugin_deps=install_plugin_deps, + override_ai_name=ai_name, + override_ai_role=ai_role, + resources=list(resource), + constraints=list(constraint), + best_practices=list(best_practice), + override_directives=override_directives, + ) + + +@cli.command() +@click.option( + "--prompt-settings", + "-P", + type=click.Path(exists=True, dir_okay=False, path_type=Path), + help="Specifies which prompt_settings.yaml file to use.", +) +@click.option("--debug", is_flag=True, help="Enable Debug Mode") +@click.option("--gpt3only", is_flag=True, help="Enable GPT3.5 Only Mode") +@click.option("--gpt4only", is_flag=True, help="Enable GPT4 Only Mode") +@click.option( + "--use-memory", + "-m", + "memory_type", + type=str, + help="Defines which Memory backend to use", +) +@click.option( + "-b", + "--browser-name", + help="Specifies which web-browser to use when using selenium to scrape the web.", +) +@click.option( + "--allow-downloads", + is_flag=True, + help="Dangerous: Allows AutoGPT to download files natively.", +) +@click.option( + "--install-plugin-deps", + is_flag=True, + help="Installs external dependencies for 3rd party plugins.", +) +def serve( + prompt_settings: Optional[Path], + debug: bool, + gpt3only: bool, + gpt4only: bool, + memory_type: str, + browser_name: str, + allow_downloads: bool, + install_plugin_deps: bool, +) -> None: + """ + Starts an Agent Protocol compliant AutoGPT server, which creates a custom agent for + every task. + """ + # Put imports inside function to avoid importing everything when starting the CLI + from autogpt.app.main import run_auto_gpt_server + + run_auto_gpt_server( + prompt_settings=prompt_settings, + debug=debug, + gpt3only=gpt3only, + gpt4only=gpt4only, + memory_type=memory_type, + browser_name=browser_name, + allow_downloads=allow_downloads, + install_plugin_deps=install_plugin_deps, + ) if __name__ == "__main__": - main() + cli() diff --git a/autogpts/autogpt/autogpt/app/configurator.py b/autogpts/autogpt/autogpt/app/configurator.py index 171c0b83..54f4b1e3 100644 --- a/autogpts/autogpt/autogpt/app/configurator.py +++ b/autogpts/autogpt/autogpt/app/configurator.py @@ -2,7 +2,8 @@ from __future__ import annotations import logging -from typing import Literal +from pathlib import Path +from typing import Literal, Optional import click from colorama import Back, Fore, Style @@ -17,29 +18,29 @@ from autogpt.memory.vector import get_supported_memory_backends logger = logging.getLogger(__name__) -def create_config( +def apply_overrides_to_config( config: Config, - continuous: bool, - continuous_limit: int, - ai_settings_file: str, - prompt_settings_file: str, - skip_reprompt: bool, - speak: bool, - debug: bool, - gpt3only: bool, - gpt4only: bool, - memory_type: str, - browser_name: str, - allow_downloads: bool, - skip_news: bool, + continuous: bool = False, + continuous_limit: Optional[int] = None, + ai_settings_file: Optional[Path] = None, + prompt_settings_file: Optional[Path] = None, + skip_reprompt: bool = False, + speak: bool = False, + debug: bool = False, + gpt3only: bool = False, + gpt4only: bool = False, + memory_type: str = "", + browser_name: str = "", + allow_downloads: bool = False, + skip_news: bool = False, ) -> None: """Updates the config object with the given arguments. Args: continuous (bool): Whether to run in continuous mode continuous_limit (int): The number of times to run in continuous mode - ai_settings_file (str): The path to the ai_settings.yaml file - prompt_settings_file (str): The path to the prompt_settings.yaml file + ai_settings_file (Path): The path to the ai_settings.yaml file + prompt_settings_file (Path): The path to the prompt_settings.yaml file skip_reprompt (bool): Whether to skip the re-prompting messages at the beginning of the script speak (bool): Whether to enable speak mode debug (bool): Whether to enable debug mode @@ -52,7 +53,7 @@ def create_config( """ config.debug_mode = False config.continuous_mode = False - config.speak_mode = False + config.tts_config.speak_mode = False if debug: print_attribute("Debug mode", "ENABLED") @@ -77,7 +78,7 @@ def create_config( if speak: print_attribute("Speak Mode", "ENABLED") - config.speak_mode = True + config.tts_config.speak_mode = True # Set the default LLM models if gpt3only: @@ -130,7 +131,7 @@ def create_config( exit(1) print_attribute("Using AI Settings File", file) - config.ai_settings_file = file + config.ai_settings_file = config.project_root / file config.skip_reprompt = True if prompt_settings_file: @@ -144,7 +145,7 @@ def create_config( exit(1) print_attribute("Using Prompt Settings File", file) - config.prompt_settings_file = file + config.prompt_settings_file = config.project_root / file if browser_name: config.selenium_web_browser = browser_name diff --git a/autogpts/autogpt/autogpt/app/main.py b/autogpts/autogpt/autogpt/app/main.py index b184c29b..c974ae15 100644 --- a/autogpts/autogpt/autogpt/app/main.py +++ b/autogpts/autogpt/autogpt/app/main.py @@ -2,52 +2,59 @@ import enum import logging import math +import re import signal import sys from pathlib import Path from types import FrameType -from typing import Optional +from typing import TYPE_CHECKING, Optional from colorama import Fore, Style +from forge.sdk.db import AgentDB from pydantic import SecretStr +if TYPE_CHECKING: + from autogpt.agents.agent import Agent + +from autogpt.agent_factory.configurators import configure_agent_with_state, create_agent +from autogpt.agent_factory.profile_generator import generate_agent_profile_for_task +from autogpt.agent_manager import AgentManager from autogpt.agents import AgentThoughts, CommandArgs, CommandName -from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings -from autogpt.agents.utils.exceptions import InvalidAgentResponseError -from autogpt.app.configurator import create_config -from autogpt.app.setup import interactive_ai_config_setup -from autogpt.app.spinner import Spinner -from autogpt.app.utils import ( - clean_input, - get_current_git_branch, - get_latest_bulletin, - get_legal_warning, - markdown_to_ansi_style, -) -from autogpt.commands import COMMAND_CATEGORIES -from autogpt.config import AIConfig, Config, ConfigBuilder, check_openai_api_key -from autogpt.core.resource.model_providers import ( - ChatModelProvider, - ModelProviderCredentials, +from autogpt.agents.utils.exceptions import AgentTerminated, InvalidAgentResponseError +from autogpt.config import ( + AIDirectives, + AIProfile, + Config, + ConfigBuilder, + assert_config_has_openai_api_key, ) +from autogpt.core.resource.model_providers import ModelProviderCredentials from autogpt.core.resource.model_providers.openai import OpenAIProvider from autogpt.core.runner.client_lib.utils import coroutine -from autogpt.llm.api_manager import ApiManager from autogpt.logs.config import configure_chat_plugins, configure_logging from autogpt.logs.helpers import print_attribute, speak -from autogpt.memory.vector import get_memory -from autogpt.models.command_registry import CommandRegistry from autogpt.plugins import scan_plugins -from autogpt.workspace import Workspace from scripts.install_plugin_deps import install_plugin_dependencies +from .configurator import apply_overrides_to_config +from .setup import apply_overrides_to_ai_settings, interactively_revise_ai_settings +from .spinner import Spinner +from .utils import ( + clean_input, + get_legal_warning, + markdown_to_ansi_style, + print_git_branch_info, + print_motd, + print_python_version_info, +) + @coroutine async def run_auto_gpt( continuous: bool, continuous_limit: int, - ai_settings: str, - prompt_settings: str, + ai_settings: Optional[Path], + prompt_settings: Optional[Path], skip_reprompt: bool, speak: bool, debug: bool, @@ -57,37 +64,43 @@ async def run_auto_gpt( browser_name: str, allow_downloads: bool, skip_news: bool, - working_directory: Path, - workspace_directory: str | Path, + workspace_directory: Path, install_plugin_deps: bool, - ai_name: Optional[str] = None, - ai_role: Optional[str] = None, - ai_goals: tuple[str] = tuple(), + override_ai_name: str = "", + override_ai_role: str = "", + resources: Optional[list[str]] = None, + constraints: Optional[list[str]] = None, + best_practices: Optional[list[str]] = None, + override_directives: bool = False, ): - config = ConfigBuilder.build_config_from_env(workdir=working_directory) + config = ConfigBuilder.build_config_from_env() # TODO: fill in llm values here - check_openai_api_key(config) + assert_config_has_openai_api_key(config) - create_config( - config, - continuous, - continuous_limit, - ai_settings, - prompt_settings, - skip_reprompt, - speak, - debug, - gpt3only, - gpt4only, - memory_type, - browser_name, - allow_downloads, - skip_news, + apply_overrides_to_config( + config=config, + continuous=continuous, + continuous_limit=continuous_limit, + ai_settings_file=ai_settings, + prompt_settings_file=prompt_settings, + skip_reprompt=skip_reprompt, + speak=speak, + debug=debug, + gpt3only=gpt3only, + gpt4only=gpt4only, + memory_type=memory_type, + browser_name=browser_name, + allow_downloads=allow_downloads, + skip_news=skip_news, ) # Set up logging module - configure_logging(config) + configure_logging( + debug_mode=debug, + plain_output=config.plain_output, + tts_config=config.tts_config, + ) llm_provider = _configure_openai_provider(config) @@ -105,102 +118,229 @@ async def run_auto_gpt( ) if not config.skip_news: - motd, is_new_motd = get_latest_bulletin() - if motd: - motd = markdown_to_ansi_style(motd) - for motd_line in motd.split("\n"): - logger.info( - extra={ - "title": "NEWS:", - "title_color": Fore.GREEN, - "preserve_color": True, - }, - msg=motd_line, - ) - if is_new_motd and not config.chat_messages_enabled: - input( - Fore.MAGENTA - + Style.BRIGHT - + "NEWS: Bulletin was updated! Press Enter to continue..." - + Style.RESET_ALL - ) - - git_branch = get_current_git_branch() - if git_branch and git_branch != "stable": - logger.warn( - f"You are running on `{git_branch}` branch" - " - this is not a supported branch." - ) - if sys.version_info < (3, 10): - logger.error( - "WARNING: You are running on an older version of Python. " - "Some people have observed problems with certain " - "parts of AutoGPT with this version. " - "Please consider upgrading to Python 3.10 or higher.", - ) + print_motd(config, logger) + print_git_branch_info(logger) + print_python_version_info(logger) if install_plugin_deps: install_plugin_dependencies() - # TODO: have this directory live outside the repository (e.g. in a user's - # home directory) and have it come in as a command line argument or part of - # the env file. - config.workspace_path = Workspace.init_workspace_directory( - config, workspace_directory - ) - - # HACK: doing this here to collect some globals that depend on the workspace. - config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path) - config.plugins = scan_plugins(config, config.debug_mode) configure_chat_plugins(config) - # Create a CommandRegistry instance and scan default folder - command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config) + # Let user choose an existing agent to run + agent_manager = AgentManager(config.app_data_dir) + existing_agents = agent_manager.list_agents() + load_existing_agent = "" + if existing_agents: + print( + "Existing agents\n---------------\n" + + "\n".join(f"{i} - {id}" for i, id in enumerate(existing_agents, 1)) + ) + load_existing_agent = await clean_input( + config, + "Enter the number or name of the agent to run, or hit enter to create a new one:", + ) + if re.match(r"^\d+$", load_existing_agent): + load_existing_agent = existing_agents[int(load_existing_agent) - 1] + elif load_existing_agent and load_existing_agent not in existing_agents: + raise ValueError(f"Unknown agent '{load_existing_agent}'") - ai_config = await construct_main_ai_config( - config, - llm_provider=llm_provider, - name=ai_name, - role=ai_role, - goals=ai_goals, - ) - # print(prompt) + # Either load existing or set up new agent state + agent = None + agent_state = None - # Initialize memory and make sure it is empty. - # this is particularly important for indexing and referencing pinecone memory - memory = get_memory(config) - memory.clear() - print_attribute("Configured Memory", memory.__class__.__name__) + ############################ + # Resume an Existing Agent # + ############################ + if load_existing_agent: + agent_state = agent_manager.retrieve_state(load_existing_agent) + while True: + answer = await clean_input(config, "Resume? [Y/n]") + if answer.lower() == "y": + break + elif answer.lower() == "n": + agent_state = None + break + else: + print("Please respond with 'y' or 'n'") - print_attribute("Configured Browser", config.selenium_web_browser) + if agent_state: + agent = configure_agent_with_state( + state=agent_state, + app_config=config, + llm_provider=llm_provider, + ) + apply_overrides_to_ai_settings( + ai_profile=agent.state.ai_profile, + directives=agent.state.directives, + override_name=override_ai_name, + override_role=override_ai_role, + resources=resources, + constraints=constraints, + best_practices=best_practices, + replace_directives=override_directives, + ) - agent_prompt_config = Agent.default_settings.prompt_config.copy(deep=True) - agent_prompt_config.use_functions_api = config.openai_functions + # If any of these are specified as arguments, + # assume the user doesn't want to revise them + if not any( + [ + override_ai_name, + override_ai_role, + resources, + constraints, + best_practices, + ] + ): + ai_profile, ai_directives = await interactively_revise_ai_settings( + ai_profile=agent.state.ai_profile, + directives=agent.state.directives, + app_config=config, + ) + else: + logger.info("AI config overrides specified through CLI; skipping revision") - agent_settings = AgentSettings( - name=Agent.default_settings.name, - description=Agent.default_settings.description, - ai_config=ai_config, - config=AgentConfiguration( - fast_llm=config.fast_llm, - smart_llm=config.smart_llm, - use_functions_api=config.openai_functions, - plugins=config.plugins, - ), - prompt_config=agent_prompt_config, - history=Agent.default_settings.history.copy(deep=True), + ###################### + # Set up a new Agent # + ###################### + if not agent: + task = await clean_input( + config, + "Enter the task that you want AutoGPT to execute," + " with as much detail as possible:", + ) + base_ai_directives = AIDirectives.from_file(config.prompt_settings_file) + + ai_profile, task_oriented_ai_directives = await generate_agent_profile_for_task( + task, + app_config=config, + llm_provider=llm_provider, + ) + ai_directives = base_ai_directives + task_oriented_ai_directives + apply_overrides_to_ai_settings( + ai_profile=ai_profile, + directives=ai_directives, + override_name=override_ai_name, + override_role=override_ai_role, + resources=resources, + constraints=constraints, + best_practices=best_practices, + replace_directives=override_directives, + ) + + # If any of these are specified as arguments, + # assume the user doesn't want to revise them + if not any( + [ + override_ai_name, + override_ai_role, + resources, + constraints, + best_practices, + ] + ): + ai_profile, ai_directives = await interactively_revise_ai_settings( + ai_profile=ai_profile, + directives=ai_directives, + app_config=config, + ) + else: + logger.info("AI config overrides specified through CLI; skipping revision") + + agent = create_agent( + task=task, + ai_profile=ai_profile, + directives=ai_directives, + app_config=config, + llm_provider=llm_provider, + ) + agent.attach_fs(agent_manager.get_agent_dir(agent.state.agent_id)) + + if not agent.config.allow_fs_access: + logger.info( + f"{Fore.YELLOW}NOTE: All files/directories created by this agent" + f" can be found inside its workspace at:{Fore.RESET} {agent.workspace.root}", + extra={"preserve_color": True}, + ) + + ################# + # Run the Agent # + ################# + try: + await run_interaction_loop(agent) + except AgentTerminated: + agent_id = agent.state.agent_id + logger.info(f"Saving state of {agent_id}...") + + # Allow user to Save As other ID + save_as_id = ( + await clean_input( + config, + f"Press enter to save as '{agent_id}', or enter a different ID to save to:", + ) + or agent_id + ) + if save_as_id and save_as_id != agent_id: + agent.set_id( + new_id=save_as_id, + new_agent_dir=agent_manager.get_agent_dir(save_as_id), + ) + # TODO: clone workspace if user wants that + # TODO: ... OR allow many-to-one relations of agents and workspaces + + agent.state.save_to_json_file(agent.file_manager.state_file_path) + + +@coroutine +async def run_auto_gpt_server( + prompt_settings: Optional[Path], + debug: bool, + gpt3only: bool, + gpt4only: bool, + memory_type: str, + browser_name: str, + allow_downloads: bool, + install_plugin_deps: bool, +): + from .agent_protocol_server import AgentProtocolServer + + config = ConfigBuilder.build_config_from_env() + + # TODO: fill in llm values here + assert_config_has_openai_api_key(config) + + apply_overrides_to_config( + config=config, + prompt_settings_file=prompt_settings, + debug=debug, + gpt3only=gpt3only, + gpt4only=gpt4only, + memory_type=memory_type, + browser_name=browser_name, + allow_downloads=allow_downloads, ) - agent = Agent( - settings=agent_settings, - llm_provider=llm_provider, - command_registry=command_registry, - memory=memory, - legacy_config=config, + # Set up logging module + configure_logging( + debug_mode=debug, + plain_output=config.plain_output, + tts_config=config.tts_config, ) - await run_interaction_loop(agent) + llm_provider = _configure_openai_provider(config) + + if install_plugin_deps: + install_plugin_dependencies() + + config.plugins = scan_plugins(config, config.debug_mode) + + # Set up & start server + database = AgentDB("sqlite:///data/ap_server.db", debug_enabled=False) + server = AgentProtocolServer( + app_config=config, database=database, llm_provider=llm_provider + ) + await server.start() def _configure_openai_provider(config: Config) -> OpenAIProvider: @@ -252,7 +392,7 @@ class UserFeedback(str, enum.Enum): async def run_interaction_loop( - agent: Agent, + agent: "Agent", ) -> None: """Run the main interaction loop for the agent. @@ -264,31 +404,42 @@ async def run_interaction_loop( """ # These contain both application config and agent config, so grab them here. legacy_config = agent.legacy_config - ai_config = agent.ai_config + ai_profile = agent.ai_profile logger = logging.getLogger(__name__) cycle_budget = cycles_remaining = _get_cycle_budget( legacy_config.continuous_mode, legacy_config.continuous_limit ) spinner = Spinner("Thinking...", plain_output=legacy_config.plain_output) + stop_reason = None def graceful_agent_interrupt(signum: int, frame: Optional[FrameType]) -> None: - nonlocal cycle_budget, cycles_remaining, spinner - if cycles_remaining in [0, 1]: - logger.error("Interrupt signal received. Stopping AutoGPT immediately.") + nonlocal cycle_budget, cycles_remaining, spinner, stop_reason + if stop_reason: + logger.error("Quitting immediately...") sys.exit() + if cycles_remaining in [0, 1]: + logger.warning("Interrupt signal received: shutting down gracefully.") + logger.warning( + "Press Ctrl+C again if you want to stop AutoGPT immediately." + ) + stop_reason = AgentTerminated("Interrupt signal received") else: restart_spinner = spinner.running if spinner.running: spinner.stop() logger.error( - "Interrupt signal received. Stopping continuous command execution." + "Interrupt signal received: stopping continuous command execution." ) cycles_remaining = 1 if restart_spinner: spinner.start() + def handle_stop_signal() -> None: + if stop_reason: + raise stop_reason + # Set up an interrupt signal for the agent. signal.signal(signal.SIGINT, graceful_agent_interrupt) @@ -305,6 +456,7 @@ async def run_interaction_loop( ######## # Plan # ######## + handle_stop_signal() # Have the agent determine the next action to take. with spinner: try: @@ -318,10 +470,13 @@ async def run_interaction_loop( consecutive_failures += 1 if consecutive_failures >= 3: logger.error( - f"The agent failed to output valid thoughts {consecutive_failures} " - "times in a row. Terminating..." + "The agent failed to output valid thoughts" + f" {consecutive_failures} times in a row. Terminating..." + ) + raise AgentTerminated( + "The agent failed to output valid thoughts" + f" {consecutive_failures} times in a row." ) - sys.exit() continue consecutive_failures = 0 @@ -331,16 +486,21 @@ async def run_interaction_loop( ############### # Print the assistant's thoughts and the next command to the user. update_user( - legacy_config, ai_config, command_name, command_args, assistant_reply_dict + ai_profile, + command_name, + command_args, + assistant_reply_dict, + speak_mode=legacy_config.tts_config.speak_mode, ) ################## # Get user input # ################## + handle_stop_signal() if cycles_remaining == 1: # Last cycle user_feedback, user_input, new_cycles_remaining = await get_user_feedback( legacy_config, - ai_config, + ai_profile, ) if user_feedback == UserFeedback.AUTHORIZE: @@ -394,6 +554,8 @@ async def run_interaction_loop( if not command_name: continue + handle_stop_signal() + result = await agent.execute(command_name, command_args, user_input) if result.status == "success": @@ -405,26 +567,30 @@ async def run_interaction_loop( def update_user( - config: Config, - ai_config: AIConfig, + ai_profile: AIProfile, command_name: CommandName, command_args: CommandArgs, assistant_reply_dict: AgentThoughts, + speak_mode: bool = False, ) -> None: """Prints the assistant's thoughts and the next command to the user. Args: config: The program's configuration. - ai_config: The AI's configuration. + ai_profile: The AI's personality/profile command_name: The name of the command to execute. command_args: The arguments for the command. assistant_reply_dict: The assistant's reply. """ logger = logging.getLogger(__name__) - print_assistant_thoughts(ai_config.ai_name, assistant_reply_dict, config) + print_assistant_thoughts( + ai_name=ai_profile.ai_name, + assistant_reply_json_valid=assistant_reply_dict, + speak_mode=speak_mode, + ) - if config.speak_mode: + if speak_mode: speak(f"I want to execute {command_name}") # First log new-line so user can differentiate sections better in console @@ -442,13 +608,13 @@ def update_user( async def get_user_feedback( config: Config, - ai_config: AIConfig, + ai_profile: AIProfile, ) -> tuple[UserFeedback, str, int | None]: """Gets the user's feedback on the assistant's reply. Args: config: The program's configuration. - ai_config: The AI's configuration. + ai_profile: The AI's configuration. Returns: A tuple of the user's feedback, the user's input, and the number of @@ -463,7 +629,7 @@ async def get_user_feedback( f"Enter '{config.authorise_key}' to authorise command, " f"'{config.authorise_key} -N' to run N continuous commands, " f"'{config.exit_key}' to exit program, or enter feedback for " - f"{ai_config.ai_name}..." + f"{ai_profile.ai_name}..." ) user_feedback = None @@ -503,93 +669,10 @@ async def get_user_feedback( return user_feedback, user_input, new_cycles_remaining -async def construct_main_ai_config( - config: Config, - llm_provider: ChatModelProvider, - name: Optional[str] = None, - role: Optional[str] = None, - goals: tuple[str] = tuple(), -) -> AIConfig: - """Construct the prompt for the AI to respond to - - Returns: - str: The prompt string - """ - logger = logging.getLogger(__name__) - - ai_config = AIConfig.load(config.workdir / config.ai_settings_file) - - # Apply overrides - if name: - ai_config.ai_name = name - if role: - ai_config.ai_role = role - if goals: - ai_config.ai_goals = list(goals) - - if ( - all([name, role, goals]) - or config.skip_reprompt - and all([ai_config.ai_name, ai_config.ai_role, ai_config.ai_goals]) - ): - print_attribute("Name :", ai_config.ai_name) - print_attribute("Role :", ai_config.ai_role) - print_attribute("Goals:", ai_config.ai_goals) - print_attribute( - "API Budget:", - "infinite" if ai_config.api_budget <= 0 else f"${ai_config.api_budget}", - ) - elif all([ai_config.ai_name, ai_config.ai_role, ai_config.ai_goals]): - logger.info( - extra={"title": f"{Fore.GREEN}Welcome back!{Fore.RESET}"}, - msg=f"Would you like me to return to being {ai_config.ai_name}?", - ) - should_continue = await clean_input( - config, - f"""Continue with the last settings? -Name: {ai_config.ai_name} -Role: {ai_config.ai_role} -Goals: {ai_config.ai_goals} -API Budget: {"infinite" if ai_config.api_budget <= 0 else f"${ai_config.api_budget}"} -Continue ({config.authorise_key}/{config.exit_key}):""", - ) - if should_continue.lower() == config.exit_key: - ai_config = AIConfig() - - if any([not ai_config.ai_name, not ai_config.ai_role, not ai_config.ai_goals]): - ai_config = await interactive_ai_config_setup(config, llm_provider) - ai_config.save(config.workdir / config.ai_settings_file) - - if config.restrict_to_workspace: - logger.info( - f"{Fore.YELLOW}NOTE: All files/directories created by this agent" - f" can be found inside its workspace at:{Fore.RESET} {config.workspace_path}", - extra={"preserve_color": True}, - ) - # set the total api budget - api_manager = ApiManager() - api_manager.set_total_budget(ai_config.api_budget) - - # Agent Created, print message - logger.info( - f"{Fore.LIGHTBLUE_EX}{ai_config.ai_name}{Fore.RESET} has been created with the following details:", - extra={"preserve_color": True}, - ) - - # Print the ai_config details - print_attribute("Name :", ai_config.ai_name) - print_attribute("Role :", ai_config.ai_role) - print_attribute("Goals:", "") - for goal in ai_config.ai_goals: - logger.info(f"- {goal}") - - return ai_config - - def print_assistant_thoughts( ai_name: str, assistant_reply_json_valid: dict, - config: Config, + speak_mode: bool = False, ) -> None: logger = logging.getLogger(__name__) @@ -634,7 +717,7 @@ def print_assistant_thoughts( # Speak the assistant's thoughts if assistant_thoughts_speak: - if config.speak_mode: + if speak_mode: speak(assistant_thoughts_speak) else: print_attribute("SPEAK", assistant_thoughts_speak, title_color=Fore.YELLOW) diff --git a/autogpts/autogpt/autogpt/app/setup.py b/autogpts/autogpt/autogpt/app/setup.py index 06cead2e..6410fc9b 100644 --- a/autogpts/autogpt/autogpt/app/setup.py +++ b/autogpts/autogpt/autogpt/app/setup.py @@ -1,253 +1,190 @@ """Set up the AI and its goals""" import logging -import re from typing import Optional -from colorama import Fore, Style -from jinja2 import Template - -from autogpt.app import utils -from autogpt.config import Config -from autogpt.config.ai_config import AIConfig -from autogpt.core.resource.model_providers import ChatMessage, ChatModelProvider -from autogpt.logs.helpers import user_friendly_output -from autogpt.prompts.default_prompts import ( - DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC, - DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC, - DEFAULT_USER_DESIRE_PROMPT, -) +from autogpt.app.utils import clean_input +from autogpt.config import AIDirectives, AIProfile, Config +from autogpt.logs.helpers import print_attribute logger = logging.getLogger(__name__) -async def interactive_ai_config_setup( - config: Config, - llm_provider: ChatModelProvider, - ai_config_template: Optional[AIConfig] = None, -) -> AIConfig: - """Prompt the user for input +def apply_overrides_to_ai_settings( + ai_profile: AIProfile, + directives: AIDirectives, + override_name: str = "", + override_role: str = "", + replace_directives: bool = False, + resources: Optional[list[str]] = None, + constraints: Optional[list[str]] = None, + best_practices: Optional[list[str]] = None, +): + if override_name: + ai_profile.ai_name = override_name + if override_role: + ai_profile.ai_role = override_role - Params: - config (Config): The Config object - ai_config_template (AIConfig): The AIConfig object to use as a template + if replace_directives: + if resources: + directives.resources = resources + if constraints: + directives.constraints = constraints + if best_practices: + directives.best_practices = best_practices + else: + if resources: + directives.resources += resources + if constraints: + directives.constraints += constraints + if best_practices: + directives.best_practices += best_practices + + +async def interactively_revise_ai_settings( + ai_profile: AIProfile, + directives: AIDirectives, + app_config: Config, +): + """Interactively revise the AI settings. + + Args: + ai_profile (AIConfig): The current AI profile. + ai_directives (AIDirectives): The current AI directives. + app_config (Config): The application configuration. Returns: - AIConfig: The AIConfig object tailored to the user's input + AIConfig: The revised AI settings. """ + logger = logging.getLogger("revise_ai_profile") - # Construct the prompt - user_friendly_output( - title="Welcome to AutoGPT! ", - message="run with '--help' for more information.", - title_color=Fore.GREEN, - ) + revised = False - ai_config_template_provided = ai_config_template is not None and any( - [ - ai_config_template.ai_goals, - ai_config_template.ai_name, - ai_config_template.ai_role, - ] - ) - - user_desire = "" - if not ai_config_template_provided: - # Get user desire if command line overrides have not been passed in - user_friendly_output( - title="Create an AI-Assistant:", - message="input '--manual' to enter manual mode.", - title_color=Fore.GREEN, + while True: + # Print the current AI configuration + print_ai_settings( + title="Current AI Settings" if not revised else "Revised AI Settings", + ai_profile=ai_profile, + directives=directives, + logger=logger, ) - user_desire = await utils.clean_input( - config, f"{Fore.LIGHTBLUE_EX}I want AutoGPT to{Style.RESET_ALL}:" - ) + if ( + await clean_input(app_config, "Continue with these settings? [Y/n]") + or app_config.authorise_key + ) == app_config.authorise_key: + break - if user_desire.strip() == "": - user_desire = DEFAULT_USER_DESIRE_PROMPT # Default prompt - - # If user desire contains "--manual" or we have overridden any of the AI configuration - if "--manual" in user_desire or ai_config_template_provided: - user_friendly_output( - "", - title="Manual Mode Selected", - title_color=Fore.GREEN, - ) - return await generate_aiconfig_manual(config, ai_config_template) - - else: - try: - return await generate_aiconfig_automatic(user_desire, config, llm_provider) - except Exception as e: - user_friendly_output( - title="Unable to automatically generate AI Config based on user desire.", - message="Falling back to manual mode.", - title_color=Fore.RED, + # Ask for revised ai_profile + ai_profile.ai_name = ( + await clean_input( + app_config, "Enter AI name (or press enter to keep current):" ) - logger.debug(f"Error during AIConfig generation: {e}") - - return await generate_aiconfig_manual(config) - - -async def generate_aiconfig_manual( - config: Config, ai_config_template: Optional[AIConfig] = None -) -> AIConfig: - """ - Interactively create an AI configuration by prompting the user to provide the name, role, and goals of the AI. - - This function guides the user through a series of prompts to collect the necessary information to create - an AIConfig object. The user will be asked to provide a name and role for the AI, as well as up to five - goals. If the user does not provide a value for any of the fields, default values will be used. - - Params: - config (Config): The Config object - ai_config_template (AIConfig): The AIConfig object to use as a template - - Returns: - AIConfig: An AIConfig object containing the user-defined or default AI name, role, and goals. - """ - - # Manual Setup Intro - user_friendly_output( - title="Create an AI-Assistant:", - message="Enter the name of your AI and its role below. Entering nothing will load" - " defaults.", - title_color=Fore.GREEN, - ) - - if ai_config_template and ai_config_template.ai_name: - ai_name = ai_config_template.ai_name - else: - ai_name = "" - # Get AI Name from User - user_friendly_output( - title="Name your AI:", - message="For example, 'Entrepreneur-GPT'", - title_color=Fore.GREEN, + or ai_profile.ai_name ) - ai_name = await utils.clean_input(config, "AI Name:") - if ai_name == "": - ai_name = "Entrepreneur-GPT" - - user_friendly_output( - title=f"{ai_name} here!", - message="I am at your service.", - title_color=Fore.LIGHTBLUE_EX, - ) - - if ai_config_template and ai_config_template.ai_role: - ai_role = ai_config_template.ai_role - else: - # Get AI Role from User - user_friendly_output( - title="Describe your AI's role:", - message="For example, 'an AI designed to autonomously develop and run businesses with" - " the sole goal of increasing your net worth.'", - title_color=Fore.GREEN, - ) - ai_role = await utils.clean_input(config, f"{ai_name} is:") - if ai_role == "": - ai_role = "an AI designed to autonomously develop and run businesses with the" - " sole goal of increasing your net worth." - - if ai_config_template and ai_config_template.ai_goals: - ai_goals = ai_config_template.ai_goals - else: - # Enter up to 5 goals for the AI - user_friendly_output( - title="Enter up to 5 goals for your AI:", - message="For example: \nIncrease net worth, Grow Twitter Account, Develop and manage" - " multiple businesses autonomously'", - title_color=Fore.GREEN, - ) - logger.info("Enter nothing to load defaults, enter nothing when finished.") - ai_goals = [] - for i in range(5): - ai_goal = await utils.clean_input( - config, f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}:" + ai_profile.ai_role = ( + await clean_input( + app_config, "Enter new AI role (or press enter to keep current):" ) - if ai_goal == "": + or ai_profile.ai_role + ) + + # Revise constraints + for i, constraint in enumerate(directives.constraints): + print_attribute(f"Constraint {i+1}:", f'"{constraint}"') + new_constraint = ( + await clean_input( + app_config, + f"Enter new constraint {i+1} (press enter to keep current, or '-' to remove):", + ) + or constraint + ) + if new_constraint == "-": + directives.constraints.remove(constraint) + elif new_constraint: + directives.constraints[i] = new_constraint + + # Add new constraints + while True: + new_constraint = await clean_input( + app_config, + "Press enter to finish, or enter a constraint to add:", + ) + if not new_constraint: break - ai_goals.append(ai_goal) - if not ai_goals: - ai_goals = [ - "Increase net worth", - "Grow Twitter Account", - "Develop and manage multiple businesses autonomously", - ] + directives.constraints.append(new_constraint) - # Get API Budget from User - user_friendly_output( - title="Enter your budget for API calls:", - message="For example: $1.50", - title_color=Fore.GREEN, - ) - logger.info("Enter nothing to let the AI run without monetary limit") - api_budget_input = await utils.clean_input( - config, f"{Fore.LIGHTBLUE_EX}Budget ($){Style.RESET_ALL}:" - ) - if api_budget_input == "": - api_budget = 0.0 - else: - try: - api_budget = float(api_budget_input.replace("$", "")) - except ValueError: - user_friendly_output( - level=logging.WARNING, - title="Invalid budget input.", - message="Setting budget to unlimited.", - title_color=Fore.RED, + # Revise resources + for i, resource in enumerate(directives.resources): + print_attribute(f"Resource {i+1}:", f'"{resource}"') + new_resource = ( + await clean_input( + app_config, + f"Enter new resource {i+1} (press enter to keep current, or '-' to remove):", + ) + or resource ) - api_budget = 0.0 + if new_resource == "-": + directives.resources.remove(resource) + elif new_resource: + directives.resources[i] = new_resource - return AIConfig( - ai_name=ai_name, ai_role=ai_role, ai_goals=ai_goals, api_budget=api_budget - ) + # Add new resources + while True: + new_resource = await clean_input( + app_config, + "Press enter to finish, or enter a resource to add:", + ) + if not new_resource: + break + directives.resources.append(new_resource) + + # Revise best practices + for i, best_practice in enumerate(directives.best_practices): + print_attribute(f"Best Practice {i+1}:", f'"{best_practice}"') + new_best_practice = ( + await clean_input( + app_config, + f"Enter new best practice {i+1} (press enter to keep current, or '-' to remove):", + ) + or best_practice + ) + if new_best_practice == "-": + directives.best_practices.remove(best_practice) + elif new_best_practice: + directives.best_practices[i] = new_best_practice + + # Add new best practices + while True: + new_best_practice = await clean_input( + app_config, + "Press enter to finish, or add a best practice to add:", + ) + if not new_best_practice: + break + directives.best_practices.append(new_best_practice) + + revised = True + + return ai_profile, directives -async def generate_aiconfig_automatic( - user_prompt: str, - config: Config, - llm_provider: ChatModelProvider, -) -> AIConfig: - """Generates an AIConfig object from the given string. +def print_ai_settings( + ai_profile: AIProfile, + directives: AIDirectives, + logger: logging.Logger, + title: str = "AI Settings", +): + print_attribute(title, "") + print_attribute("-" * len(title), "") + print_attribute("Name :", ai_profile.ai_name) + print_attribute("Role :", ai_profile.ai_role) - Returns: - AIConfig: The AIConfig object tailored to the user's input - """ - - system_prompt = DEFAULT_SYSTEM_PROMPT_AICONFIG_AUTOMATIC - prompt_ai_config_automatic = Template( - DEFAULT_TASK_PROMPT_AICONFIG_AUTOMATIC - ).render(user_prompt=user_prompt) - # Call LLM with the string as user input - output = ( - await llm_provider.create_chat_completion( - [ - ChatMessage.system(system_prompt), - ChatMessage.user(prompt_ai_config_automatic), - ], - config.smart_llm, - ) - ).response["content"] - - # Debug LLM Output - logger.debug(f"AI Config Generator Raw Output: {output}") - - # Parse the output - ai_name = re.search(r"Name(?:\s*):(?:\s*)(.*)", output, re.IGNORECASE).group(1) - ai_role = ( - re.search( - r"Description(?:\s*):(?:\s*)(.*?)(?:(?:\n)|Goals)", - output, - re.IGNORECASE | re.DOTALL, - ) - .group(1) - .strip() - ) - ai_goals = re.findall(r"(?<=\n)-\s*(.*)", output) - api_budget = 0.0 # TODO: parse api budget using a regular expression - - return AIConfig( - ai_name=ai_name, ai_role=ai_role, ai_goals=ai_goals, api_budget=api_budget - ) + print_attribute("Constraints:", "" if directives.constraints else "(none)") + for constraint in directives.constraints: + logger.info(f"- {constraint}") + print_attribute("Resources:", "" if directives.resources else "(none)") + for resource in directives.resources: + logger.info(f"- {resource}") + print_attribute("Best practices:", "" if directives.best_practices else "(none)") + for best_practice in directives.best_practices: + logger.info(f"- {best_practice}") diff --git a/autogpts/autogpt/autogpt/app/utils.py b/autogpts/autogpt/autogpt/app/utils.py index cfa00ff8..30ee7f27 100644 --- a/autogpts/autogpt/autogpt/app/utils.py +++ b/autogpts/autogpt/autogpt/app/utils.py @@ -1,6 +1,7 @@ import logging import os import re +import sys import requests from colorama import Fore, Style @@ -146,3 +147,44 @@ behalf. You acknowledge that using the System could expose you to potential liab By using the System, you agree to indemnify, defend, and hold harmless the Project Parties from and against any and all claims, liabilities, damages, losses, or expenses (including reasonable attorneys' fees and costs) arising out of or in connection with your use of the System, including, without limitation, any actions taken by the System on your behalf, any failure to properly supervise or monitor the System, and any resulting harm or unintended consequences. """ return legal_text + + +def print_motd(config: Config, logger: logging.Logger): + motd, is_new_motd = get_latest_bulletin() + if motd: + motd = markdown_to_ansi_style(motd) + for motd_line in motd.split("\n"): + logger.info( + extra={ + "title": "NEWS:", + "title_color": Fore.GREEN, + "preserve_color": True, + }, + msg=motd_line, + ) + if is_new_motd and not config.chat_messages_enabled: + input( + Fore.MAGENTA + + Style.BRIGHT + + "NEWS: Bulletin was updated! Press Enter to continue..." + + Style.RESET_ALL + ) + + +def print_git_branch_info(logger: logging.Logger): + git_branch = get_current_git_branch() + if git_branch and git_branch != "stable": + logger.warn( + f"You are running on `{git_branch}` branch" + " - this is not a supported branch." + ) + + +def print_python_version_info(logger: logging.Logger): + if sys.version_info < (3, 10): + logger.error( + "WARNING: You are running on an older version of Python. " + "Some people have observed problems with certain " + "parts of AutoGPT with this version. " + "Please consider upgrading to Python 3.10 or higher.", + ) diff --git a/autogpts/autogpt/autogpt/commands/execute_code.py b/autogpts/autogpt/autogpt/commands/execute_code.py index 5e05efe0..f9d55885 100644 --- a/autogpts/autogpt/autogpt/commands/execute_code.py +++ b/autogpts/autogpt/autogpt/commands/execute_code.py @@ -102,7 +102,7 @@ def execute_python_file( str: The output of the file """ logger.info( - f"Executing python file '{filename}' in working directory '{agent.legacy_config.workspace_path}'" + f"Executing python file '{filename}' in working directory '{agent.workspace.root}'" ) if isinstance(args, str): diff --git a/autogpts/autogpt/autogpt/commands/file_operations.py b/autogpts/autogpt/autogpt/commands/file_operations.py index 2a4bb784..8e3b54ab 100644 --- a/autogpts/autogpt/autogpt/commands/file_operations.py +++ b/autogpts/autogpt/autogpt/commands/file_operations.py @@ -62,15 +62,15 @@ def operations_from_log( def file_operations_state(log_path: str | Path) -> dict[str, str]: """Iterates over the operations log and returns the expected state. - Parses a log file at config.file_logger_path to construct a dictionary that maps - each file path written or appended to its checksum. Deleted files are removed - from the dictionary. + Parses a log file at file_manager.file_ops_log_path to construct a dictionary + that maps each file path written or appended to its checksum. Deleted files are + removed from the dictionary. Returns: A dictionary mapping file paths to their checksums. Raises: - FileNotFoundError: If config.file_logger_path is not found. + FileNotFoundError: If file_manager.file_ops_log_path is not found. ValueError: If the log file content is not in the expected format. """ state = {} @@ -101,7 +101,7 @@ def is_duplicate_operation( with contextlib.suppress(ValueError): file_path = file_path.relative_to(agent.workspace.root) - state = file_operations_state(agent.legacy_config.file_logger_path) + state = file_operations_state(agent.file_manager.file_ops_log_path) if operation == "delete" and str(file_path) not in state: return True if operation == "write" and state.get(str(file_path)) == checksum: @@ -129,7 +129,7 @@ def log_operation( log_entry += f" #{checksum}" logger.debug(f"Logging file operation: {log_entry}") append_to_file( - agent.legacy_config.file_logger_path, f"{log_entry}\n", agent, should_log=False + agent.file_manager.file_ops_log_path, f"{log_entry}\n", agent, should_log=False ) @@ -155,6 +155,7 @@ def read_file(filename: Path, agent: Agent) -> str: str: The contents of the file """ content = read_textual_file(filename, logger) + # TODO: content = agent.workspace.read_file(filename) # # TODO: invalidate/update memory when file is edited # file_memory = MemoryItem.from_text_file(content, str(filename), agent.config) @@ -224,13 +225,11 @@ def write_to_file(filename: Path, contents: str, agent: Agent) -> str: directory = os.path.dirname(filename) os.makedirs(directory, exist_ok=True) - with open(filename, "w", encoding="utf-8") as f: - f.write(contents) + agent.workspace.write_file(filename, contents) log_operation("write", filename, agent, checksum) return f"File {filename.name} has been written successfully." -@sanitize_path_arg("filename") def append_to_file( filename: Path, text: str, agent: Agent, should_log: bool = True ) -> None: @@ -243,11 +242,11 @@ def append_to_file( """ directory = os.path.dirname(filename) os.makedirs(directory, exist_ok=True) - with open(filename, "a", encoding="utf-8") as f: + with open(filename, "a") as f: f.write(text) if should_log: - with open(filename, "r", encoding="utf-8") as f: + with open(filename, "r") as f: checksum = text_checksum(f.read()) log_operation("append", filename, agent, checksum=checksum) @@ -280,7 +279,7 @@ def list_folder(folder: Path, agent: Agent) -> list[str]: if file.startswith("."): continue relative_path = os.path.relpath( - os.path.join(root, file), agent.legacy_config.workspace_path + os.path.join(root, file), agent.workspace.root ) found_files.append(relative_path) diff --git a/autogpts/autogpt/autogpt/commands/image_gen.py b/autogpts/autogpt/autogpt/commands/image_gen.py index 33d18956..ba771635 100644 --- a/autogpts/autogpt/autogpt/commands/image_gen.py +++ b/autogpts/autogpt/autogpt/commands/image_gen.py @@ -44,7 +44,7 @@ def generate_image(prompt: str, agent: Agent, size: int = 256) -> str: Returns: str: The filename of the image """ - filename = agent.legacy_config.workspace_path / f"{str(uuid.uuid4())}.jpg" + filename = agent.workspace.root / f"{str(uuid.uuid4())}.jpg" # DALL-E if agent.legacy_config.image_provider == "dalle": diff --git a/autogpts/autogpt/autogpt/commands/system.py b/autogpts/autogpt/autogpt/commands/system.py index 8b143545..d1a879c1 100644 --- a/autogpts/autogpt/autogpt/commands/system.py +++ b/autogpts/autogpt/autogpt/commands/system.py @@ -12,7 +12,7 @@ if TYPE_CHECKING: from autogpt.agents.agent import Agent from autogpt.agents.features.context import get_agent_context -from autogpt.agents.utils.exceptions import InvalidArgumentError +from autogpt.agents.utils.exceptions import AgentTerminated, InvalidArgumentError from autogpt.command_decorator import command from autogpt.core.utils.json_schema import JSONSchema @@ -42,8 +42,7 @@ def finish(reason: str, agent: Agent) -> None: A result string from create chat completion. A list of suggestions to improve the code. """ - logger.info(reason, extra={"title": "Shutting down...\n"}) - quit() + raise AgentTerminated(reason) @command( diff --git a/autogpts/autogpt/autogpt/commands/web_search.py b/autogpts/autogpt/autogpt/commands/web_search.py index de77d5d1..8c9a9334 100644 --- a/autogpts/autogpt/autogpt/commands/web_search.py +++ b/autogpts/autogpt/autogpt/commands/web_search.py @@ -58,7 +58,11 @@ def web_search(query: str, agent: Agent, num_results: int = 8) -> str: attempts += 1 search_results = [ - {"title": r["title"], "url": r["href"], "description": r["body"]} + { + "title": r["title"], + "url": r["href"], + **({"description": r["body"]} if r.get("body") else {}), + } for r in search_results ] diff --git a/autogpts/autogpt/autogpt/config/__init__.py b/autogpts/autogpt/autogpt/config/__init__.py index 12dbc4f1..e0c11339 100644 --- a/autogpts/autogpt/autogpt/config/__init__.py +++ b/autogpts/autogpt/autogpt/config/__init__.py @@ -1,13 +1,13 @@ """ This module contains the configuration classes for AutoGPT. """ -from .ai_config import AIConfig from .ai_directives import AIDirectives -from .config import Config, ConfigBuilder, check_openai_api_key +from .ai_profile import AIProfile +from .config import Config, ConfigBuilder, assert_config_has_openai_api_key __all__ = [ - "check_openai_api_key", - "AIConfig", + "assert_config_has_openai_api_key", + "AIProfile", "AIDirectives", "Config", "ConfigBuilder", diff --git a/autogpts/autogpt/autogpt/config/ai_directives.py b/autogpts/autogpt/autogpt/config/ai_directives.py index 38f169be..6b5aa437 100644 --- a/autogpts/autogpt/autogpt/config/ai_directives.py +++ b/autogpts/autogpt/autogpt/config/ai_directives.py @@ -1,9 +1,8 @@ -from __future__ import annotations - import logging +from pathlib import Path import yaml -from pydantic import BaseModel +from pydantic import BaseModel, Field from autogpt.logs.helpers import request_user_double_check from autogpt.utils import validate_yaml_file @@ -20,17 +19,17 @@ class AIDirectives(BaseModel): best_practices (list): A list of best practices that the AI should follow. """ - constraints: list[str] - resources: list[str] - best_practices: list[str] + resources: list[str] = Field(default_factory=list) + constraints: list[str] = Field(default_factory=list) + best_practices: list[str] = Field(default_factory=list) @staticmethod - def from_file(prompt_settings_file: str) -> AIDirectives: + def from_file(prompt_settings_file: Path) -> "AIDirectives": (validated, message) = validate_yaml_file(prompt_settings_file) if not validated: logger.error(message, extra={"title": "FAILED FILE VALIDATION"}) request_user_double_check() - exit(1) + raise RuntimeError(f"File validation failed: {message}") with open(prompt_settings_file, encoding="utf-8") as file: config_params = yaml.load(file, Loader=yaml.FullLoader) @@ -40,3 +39,10 @@ class AIDirectives(BaseModel): resources=config_params.get("resources", []), best_practices=config_params.get("best_practices", []), ) + + def __add__(self, other: "AIDirectives") -> "AIDirectives": + return AIDirectives( + resources=self.resources + other.resources, + constraints=self.constraints + other.constraints, + best_practices=self.best_practices + other.best_practices, + ).copy(deep=True) diff --git a/autogpts/autogpt/autogpt/config/ai_config.py b/autogpts/autogpt/autogpt/config/ai_profile.py similarity index 86% rename from autogpts/autogpt/autogpt/config/ai_config.py rename to autogpts/autogpt/autogpt/config/ai_profile.py index 1cf16687..ac4e3e59 100644 --- a/autogpts/autogpt/autogpt/config/ai_config.py +++ b/autogpts/autogpt/autogpt/config/ai_profile.py @@ -1,15 +1,12 @@ -"""A module that contains the AIConfig class object that contains the configuration""" -from __future__ import annotations - from pathlib import Path import yaml from pydantic import BaseModel, Field -class AIConfig(BaseModel): +class AIProfile(BaseModel): """ - A class object that contains the configuration information for the AI + Object to hold the AI's personality. Attributes: ai_name (str): The name of the AI. @@ -24,7 +21,7 @@ class AIConfig(BaseModel): api_budget: float = 0.0 @staticmethod - def load(ai_settings_file: str | Path) -> "AIConfig": + def load(ai_settings_file: str | Path) -> "AIProfile": """ Returns class object with parameters (ai_name, ai_role, ai_goals, api_budget) loaded from yaml file if yaml file exists, else returns class with no parameters. @@ -52,7 +49,7 @@ class AIConfig(BaseModel): ] api_budget = config_params.get("api_budget", 0.0) - return AIConfig( + return AIProfile( ai_name=ai_name, ai_role=ai_role, ai_goals=ai_goals, api_budget=api_budget ) diff --git a/autogpts/autogpt/autogpt/config/config.py b/autogpts/autogpt/autogpt/config/config.py index 3fb371da..5436a670 100644 --- a/autogpts/autogpt/autogpt/config/config.py +++ b/autogpts/autogpt/autogpt/config/config.py @@ -12,14 +12,17 @@ from auto_gpt_plugin_template import AutoGPTPluginTemplate from colorama import Fore from pydantic import Field, validator +import autogpt from autogpt.core.configuration.schema import Configurable, SystemSettings from autogpt.core.resource.model_providers.openai import OPEN_AI_CHAT_MODELS from autogpt.plugins.plugins_config import PluginsConfig +from autogpt.speech import TTSConfig -AI_SETTINGS_FILE = "ai_settings.yaml" -AZURE_CONFIG_FILE = "azure.yaml" -PLUGINS_CONFIG_FILE = "plugins_config.yaml" -PROMPT_SETTINGS_FILE = "prompt_settings.yaml" +PROJECT_ROOT = Path(autogpt.__file__).parent.parent +AI_SETTINGS_FILE = Path("ai_settings.yaml") +AZURE_CONFIG_FILE = Path("azure.yaml") +PLUGINS_CONFIG_FILE = Path("plugins_config.yaml") +PROMPT_SETTINGS_FILE = Path("prompt_settings.yaml") GPT_4_MODEL = "gpt-4" GPT_3_MODEL = "gpt-3.5-turbo" @@ -31,6 +34,8 @@ class Config(SystemSettings, arbitrary_types_allowed=True): ######################## # Application Settings # ######################## + project_root: Path = PROJECT_ROOT + app_data_dir: Path = project_root / "data" skip_news: bool = False skip_reprompt: bool = False authorise_key: str = "y" @@ -40,20 +45,14 @@ class Config(SystemSettings, arbitrary_types_allowed=True): noninteractive_mode: bool = False chat_messages_enabled: bool = True # TTS configuration - speak_mode: bool = False - text_to_speech_provider: str = "gtts" - streamelements_voice: str = "Brian" - elevenlabs_voice_id: Optional[str] = None + tts_config: TTSConfig = TTSConfig() ########################## # Agent Control Settings # ########################## # Paths - ai_settings_file: str = AI_SETTINGS_FILE - prompt_settings_file: str = PROMPT_SETTINGS_FILE - workdir: Path = None - workspace_path: Optional[Path] = None - file_logger_path: Optional[Path] = None + ai_settings_file: Path = project_root / AI_SETTINGS_FILE + prompt_settings_file: Path = project_root / PROMPT_SETTINGS_FILE # Model configuration fast_llm: str = "gpt-3.5-turbo-16k" smart_llm: str = "gpt-4-0314" @@ -105,7 +104,7 @@ class Config(SystemSettings, arbitrary_types_allowed=True): # Plugin Settings # ################### plugins_dir: str = "plugins" - plugins_config_file: str = PLUGINS_CONFIG_FILE + plugins_config_file: Path = project_root / PLUGINS_CONFIG_FILE plugins_config: PluginsConfig = Field( default_factory=lambda: PluginsConfig(plugins={}) ) @@ -124,10 +123,8 @@ class Config(SystemSettings, arbitrary_types_allowed=True): openai_api_version: Optional[str] = None openai_organization: Optional[str] = None use_azure: bool = False - azure_config_file: Optional[str] = AZURE_CONFIG_FILE + azure_config_file: Optional[Path] = project_root / AZURE_CONFIG_FILE azure_model_to_deployment_id_map: Optional[Dict[str, str]] = None - # Elevenlabs - elevenlabs_api_key: Optional[str] = None # Github github_api_key: Optional[str] = None github_username: Optional[str] = None @@ -225,33 +222,34 @@ class ConfigBuilder(Configurable[Config]): default_settings = Config() @classmethod - def build_config_from_env(cls, workdir: Path) -> Config: + def build_config_from_env(cls, project_root: Path = PROJECT_ROOT) -> Config: """Initialize the Config class""" config_dict = { - "workdir": workdir, + "project_root": project_root, "authorise_key": os.getenv("AUTHORISE_COMMAND_KEY"), "exit_key": os.getenv("EXIT_KEY"), "plain_output": os.getenv("PLAIN_OUTPUT", "False") == "True", "shell_command_control": os.getenv("SHELL_COMMAND_CONTROL"), - "ai_settings_file": os.getenv("AI_SETTINGS_FILE", AI_SETTINGS_FILE), - "prompt_settings_file": os.getenv( - "PROMPT_SETTINGS_FILE", PROMPT_SETTINGS_FILE - ), + "ai_settings_file": project_root + / Path(os.getenv("AI_SETTINGS_FILE", AI_SETTINGS_FILE)), + "prompt_settings_file": project_root + / Path(os.getenv("PROMPT_SETTINGS_FILE", PROMPT_SETTINGS_FILE)), "fast_llm": os.getenv("FAST_LLM", os.getenv("FAST_LLM_MODEL")), "smart_llm": os.getenv("SMART_LLM", os.getenv("SMART_LLM_MODEL")), "embedding_model": os.getenv("EMBEDDING_MODEL"), "browse_spacy_language_model": os.getenv("BROWSE_SPACY_LANGUAGE_MODEL"), "openai_api_key": os.getenv("OPENAI_API_KEY"), "use_azure": os.getenv("USE_AZURE") == "True", - "azure_config_file": os.getenv("AZURE_CONFIG_FILE", AZURE_CONFIG_FILE), + "azure_config_file": project_root + / Path(os.getenv("AZURE_CONFIG_FILE", AZURE_CONFIG_FILE)), "execute_local_commands": os.getenv("EXECUTE_LOCAL_COMMANDS", "False") == "True", "restrict_to_workspace": os.getenv("RESTRICT_TO_WORKSPACE", "True") == "True", "openai_functions": os.getenv("OPENAI_FUNCTIONS", "False") == "True", - "elevenlabs_api_key": os.getenv("ELEVENLABS_API_KEY"), - "streamelements_voice": os.getenv("STREAMELEMENTS_VOICE"), - "text_to_speech_provider": os.getenv("TEXT_TO_SPEECH_PROVIDER"), + "tts_config": { + "provider": os.getenv("TEXT_TO_SPEECH_PROVIDER"), + }, "github_api_key": os.getenv("GITHUB_API_KEY"), "github_username": os.getenv("GITHUB_USERNAME"), "google_api_key": os.getenv("GOOGLE_API_KEY"), @@ -273,9 +271,8 @@ class ConfigBuilder(Configurable[Config]): "redis_password": os.getenv("REDIS_PASSWORD"), "wipe_redis_on_start": os.getenv("WIPE_REDIS_ON_START", "True") == "True", "plugins_dir": os.getenv("PLUGINS_DIR"), - "plugins_config_file": os.getenv( - "PLUGINS_CONFIG_FILE", PLUGINS_CONFIG_FILE - ), + "plugins_config_file": project_root + / Path(os.getenv("PLUGINS_CONFIG_FILE", PLUGINS_CONFIG_FILE)), "chat_messages_enabled": os.getenv("CHAT_MESSAGES_ENABLED") == "True", } @@ -294,19 +291,26 @@ class ConfigBuilder(Configurable[Config]): "GOOGLE_CUSTOM_SEARCH_ENGINE_ID", os.getenv("CUSTOM_SEARCH_ENGINE_ID") ) - config_dict["elevenlabs_voice_id"] = os.getenv( - "ELEVENLABS_VOICE_ID", os.getenv("ELEVENLABS_VOICE_1_ID") - ) - if not config_dict["text_to_speech_provider"]: + if os.getenv("ELEVENLABS_API_KEY"): + config_dict["tts_config"]["elevenlabs"] = { + "api_key": os.getenv("ELEVENLABS_API_KEY"), + "voice_id": os.getenv("ELEVENLABS_VOICE_ID", ""), + } + if os.getenv("STREAMELEMENTS_VOICE"): + config_dict["tts_config"]["streamelements"] = { + "voice": os.getenv("STREAMELEMENTS_VOICE"), + } + + if not config_dict["tts_config"]["provider"]: if os.getenv("USE_MAC_OS_TTS"): default_tts_provider = "macos" - elif config_dict["elevenlabs_api_key"]: + elif "elevenlabs" in config_dict["tts_config"]: default_tts_provider = "elevenlabs" elif os.getenv("USE_BRIAN_TTS"): default_tts_provider = "streamelements" else: default_tts_provider = "gtts" - config_dict["text_to_speech_provider"] = default_tts_provider + config_dict["tts_config"]["provider"] = default_tts_provider config_dict["plugins_allowlist"] = _safe_split(os.getenv("ALLOWLISTED_PLUGINS")) config_dict["plugins_denylist"] = _safe_split(os.getenv("DENYLISTED_PLUGINS")) @@ -320,7 +324,7 @@ class ConfigBuilder(Configurable[Config]): if config_dict["use_azure"]: azure_config = cls.load_azure_config( - workdir / config_dict["azure_config_file"] + project_root / config_dict["azure_config_file"] ) config_dict.update(azure_config) @@ -340,7 +344,7 @@ class ConfigBuilder(Configurable[Config]): # Set secondary config variables (that depend on other config variables) config.plugins_config = PluginsConfig.load_config( - config.workdir / config.plugins_config_file, + config.plugins_config_file, config.plugins_denylist, config.plugins_allowlist, ) @@ -374,7 +378,7 @@ class ConfigBuilder(Configurable[Config]): } -def check_openai_api_key(config: Config) -> None: +def assert_config_has_openai_api_key(config: Config) -> None: """Check if the OpenAI API key is set in config.py or as an environment variable.""" if not config.openai_api_key: print( diff --git a/autogpts/autogpt/autogpt/core/resource/model_providers/openai.py b/autogpts/autogpt/autogpt/core/resource/model_providers/openai.py index 1cc2147c..37a672ea 100644 --- a/autogpts/autogpt/autogpt/core/resource/model_providers/openai.py +++ b/autogpts/autogpt/autogpt/core/resource/model_providers/openai.py @@ -3,7 +3,7 @@ import functools import logging import math import time -from typing import Callable, ParamSpec, TypeVar +from typing import Callable, Optional, ParamSpec, TypeVar import openai import tiktoken @@ -16,6 +16,7 @@ from autogpt.core.configuration import ( ) from autogpt.core.resource.model_providers.schema import ( AssistantChatMessageDict, + AssistantFunctionCallDict, ChatMessage, ChatModelInfo, ChatModelProvider, @@ -33,6 +34,7 @@ from autogpt.core.resource.model_providers.schema import ( ModelProviderUsage, ModelTokenizer, ) +from autogpt.core.utils.json_schema import JSONSchema _T = TypeVar("_T") _P = ParamSpec("_P") @@ -263,11 +265,17 @@ class OpenAIProvider( model_prompt: list[ChatMessage], model_name: OpenAIModelName, completion_parser: Callable[[AssistantChatMessageDict], _T] = lambda _: None, - functions: list[CompletionModelFunction] = [], + functions: Optional[list[CompletionModelFunction]] = None, **kwargs, ) -> ChatModelResponse[_T]: """Create a completion using the OpenAI API.""" + completion_kwargs = self._get_completion_kwargs(model_name, functions, **kwargs) + functions_compat_mode = functions and "functions" not in completion_kwargs + if "messages" in completion_kwargs: + model_prompt += completion_kwargs["messages"] + del completion_kwargs["messages"] + response = await self._create_chat_completion( messages=model_prompt, **completion_kwargs, @@ -279,6 +287,10 @@ class OpenAIProvider( } response_message = response.choices[0].message.to_dict_recursive() + if functions_compat_mode: + response_message["function_call"] = _functions_compat_extract_call( + response_message["content"] + ) response = ChatModelResponse( response=response_message, parsed_result=completion_parser(response_message), @@ -313,7 +325,7 @@ class OpenAIProvider( def _get_completion_kwargs( self, model_name: OpenAIModelName, - functions: list[CompletionModelFunction], + functions: Optional[list[CompletionModelFunction]] = None, **kwargs, ) -> dict: """Get kwargs for completion API call. @@ -331,8 +343,13 @@ class OpenAIProvider( **kwargs, **self._credentials.unmasked(), } + if functions: - completion_kwargs["functions"] = [f.schema for f in functions] + if OPEN_AI_CHAT_MODELS[model_name].has_function_call_api: + completion_kwargs["functions"] = [f.schema for f in functions] + else: + # Provide compatibility with older models + _functions_compat_fix_kwargs(functions, completion_kwargs) return completion_kwargs @@ -459,3 +476,129 @@ class _OpenAIRetryHandler: self._backoff(attempt) return _wrapped + + +def format_function_specs_as_typescript_ns( + functions: list[CompletionModelFunction], +) -> str: + """Returns a function signature block in the format used by OpenAI internally: + https://community.openai.com/t/how-to-calculate-the-tokens-when-using-function-call/266573/18 + + For use with `count_tokens` to determine token usage of provided functions. + + Example: + ```ts + namespace functions { + + // Get the current weather in a given location + type get_current_weather = (_: { + // The city and state, e.g. San Francisco, CA + location: string, + unit?: "celsius" | "fahrenheit", + }) => any; + + } // namespace functions + ``` + """ + + return ( + "namespace functions {\n\n" + + "\n\n".join(format_openai_function_for_prompt(f) for f in functions) + + "\n\n} // namespace functions" + ) + + +def format_openai_function_for_prompt(func: CompletionModelFunction) -> str: + """Returns the function formatted similarly to the way OpenAI does it internally: + https://community.openai.com/t/how-to-calculate-the-tokens-when-using-function-call/266573/18 + + Example: + ```ts + // Get the current weather in a given location + type get_current_weather = (_: { + // The city and state, e.g. San Francisco, CA + location: string, + unit?: "celsius" | "fahrenheit", + }) => any; + ``` + """ + + def param_signature(name: str, spec: JSONSchema) -> str: + return ( + f"// {spec.description}\n" if spec.description else "" + ) + f"{name}{'' if spec.required else '?'}: {spec.typescript_type}," + + return "\n".join( + [ + f"// {func.description}", + f"type {func.name} = (_ :{{", + *[param_signature(name, p) for name, p in func.parameters.items()], + "}) => any;", + ] + ) + + +def count_openai_functions_tokens( + functions: list[CompletionModelFunction], count_tokens: Callable[[str], int] +) -> int: + """Returns the number of tokens taken up by a set of function definitions + + Reference: https://community.openai.com/t/how-to-calculate-the-tokens-when-using-function-call/266573/18 + """ + return count_tokens( + f"# Tools\n\n## functions\n\n{format_function_specs_as_typescript_ns(functions)}" + ) + + +def _functions_compat_fix_kwargs( + functions: list[CompletionModelFunction], + completion_kwargs: dict, +): + function_definitions = format_function_specs_as_typescript_ns(functions) + function_call_schema = JSONSchema( + type=JSONSchema.Type.OBJECT, + properties={ + "name": JSONSchema( + description="The name of the function to call", + enum=[f.name for f in functions], + required=True, + ), + "arguments": JSONSchema( + description="The arguments for the function call", + type=JSONSchema.Type.OBJECT, + required=True, + ), + }, + ) + completion_kwargs["messages"] = [ + ChatMessage.system( + "# function_call instructions\n\n" + "Specify a '```function_call' block in your response," + " enclosing a function call in the form of a valid JSON object" + " that adheres to the following schema:\n\n" + f"{function_call_schema.to_dict()}\n\n" + "Put the function_call block at the end of your response" + " and include its fences if it is not the only content.\n\n" + "## functions\n\n" + "For the function call itself, use one of the following" + f" functions:\n\n{function_definitions}" + ), + ] + + +def _functions_compat_extract_call(response: str) -> AssistantFunctionCallDict: + import json + import re + + logging.debug(f"Trying to extract function call from response:\n{response}") + + if response[0] == "{": + function_call = json.loads(response) + else: + block = re.search(r"```(?:function_call)?\n(.*)\n```\s*$", response, re.DOTALL) + if not block: + raise ValueError("Could not find function call block in response") + function_call = json.loads(block.group(1)) + + function_call["arguments"] = str(function_call["arguments"]) # HACK + return function_call diff --git a/autogpts/autogpt/autogpt/core/resource/model_providers/schema.py b/autogpts/autogpt/autogpt/core/resource/model_providers/schema.py index 4989afd5..14e5618c 100644 --- a/autogpts/autogpt/autogpt/core/resource/model_providers/schema.py +++ b/autogpts/autogpt/autogpt/core/resource/model_providers/schema.py @@ -333,7 +333,7 @@ class ChatModelProvider(ModelProvider): model_prompt: list[ChatMessage], model_name: str, completion_parser: Callable[[AssistantChatMessageDict], _T] = lambda _: None, - functions: list[CompletionModelFunction] = [], + functions: Optional[list[CompletionModelFunction]] = None, **kwargs, ) -> ChatModelResponse[_T]: ... diff --git a/autogpts/autogpt/autogpt/core/runner/cli_web_app/server/api.py b/autogpts/autogpt/autogpt/core/runner/cli_web_app/server/api.py index 36d9040c..d1cbc2fd 100644 --- a/autogpts/autogpt/autogpt/core/runner/cli_web_app/server/api.py +++ b/autogpts/autogpt/autogpt/core/runner/cli_web_app/server/api.py @@ -6,14 +6,10 @@ from agent_protocol import StepHandler, StepResult from autogpt.agents import Agent from autogpt.app.main import UserFeedback from autogpt.commands import COMMAND_CATEGORIES -from autogpt.config import AIConfig, ConfigBuilder +from autogpt.config import AIProfile, ConfigBuilder from autogpt.logs.helpers import user_friendly_output -from autogpt.memory.vector import get_memory from autogpt.models.command_registry import CommandRegistry from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT -from autogpt.workspace import Workspace - -PROJECT_DIR = Path().resolve() async def task_handler(task_input) -> StepHandler: @@ -69,11 +65,11 @@ async def interaction_step( ) return - next_command_name, next_command_args, assistant_reply_dict = agent.think() + next_command_name, next_command_args, assistant_reply_dict = agent.propose_action() return { "config": agent.config, - "ai_config": agent.ai_config, + "ai_profile": agent.ai_profile, "result": result, "assistant_reply_dict": assistant_reply_dict, "next_step_command_name": next_command_name, @@ -82,25 +78,21 @@ async def interaction_step( def bootstrap_agent(task, continuous_mode) -> Agent: - config = ConfigBuilder.build_config_from_env(workdir=PROJECT_DIR) + config = ConfigBuilder.build_config_from_env() config.debug_mode = True config.continuous_mode = continuous_mode config.temperature = 0 config.plain_output = True command_registry = CommandRegistry.with_command_modules(COMMAND_CATEGORIES, config) config.memory_backend = "no_memory" - config.workspace_path = Workspace.init_workspace_directory(config) - config.file_logger_path = Workspace.build_file_logger_path(config.workspace_path) - ai_config = AIConfig( + ai_profile = AIProfile( ai_name="AutoGPT", ai_role="a multi-purpose AI assistant.", ai_goals=[task], ) - ai_config.command_registry = command_registry return Agent( - memory=get_memory(config), command_registry=command_registry, - ai_config=ai_config, + ai_profile=ai_profile, config=config, triggering_prompt=DEFAULT_TRIGGERING_PROMPT, ) diff --git a/autogpts/autogpt/autogpt/core/runner/client_lib/utils.py b/autogpts/autogpt/autogpt/core/runner/client_lib/utils.py index 39b5135f..ebb03edb 100644 --- a/autogpts/autogpt/autogpt/core/runner/client_lib/utils.py +++ b/autogpts/autogpt/autogpt/core/runner/client_lib/utils.py @@ -1,7 +1,7 @@ import asyncio import functools from bdb import BdbQuit -from typing import Callable, ParamSpec, TypeVar +from typing import Any, Callable, Coroutine, ParamSpec, TypeVar import click @@ -53,9 +53,9 @@ def handle_exceptions( return wrapped -def coroutine(f): +def coroutine(f: Callable[P, Coroutine[Any, Any, T]]) -> Callable[P, T]: @functools.wraps(f) - def wrapper(*args, **kwargs): + def wrapper(*args: P.args, **kwargs: P.kwargs): return asyncio.run(f(*args, **kwargs)) return wrapper diff --git a/autogpts/autogpt/autogpt/file_workspace/__init__.py b/autogpts/autogpt/autogpt/file_workspace/__init__.py new file mode 100644 index 00000000..76a26eef --- /dev/null +++ b/autogpts/autogpt/autogpt/file_workspace/__init__.py @@ -0,0 +1,5 @@ +from .file_workspace import FileWorkspace + +__all__ = [ + "FileWorkspace", +] diff --git a/autogpts/autogpt/autogpt/file_workspace/file_workspace.py b/autogpts/autogpt/autogpt/file_workspace/file_workspace.py new file mode 100644 index 00000000..37da359e --- /dev/null +++ b/autogpts/autogpt/autogpt/file_workspace/file_workspace.py @@ -0,0 +1,145 @@ +""" +The FileWorkspace class provides an interface for interacting with a file workspace. +""" +from __future__ import annotations + +import logging +from pathlib import Path +from typing import Any, Callable, Optional + +logger = logging.getLogger(__name__) + + +class FileWorkspace: + """A class that represents a file workspace.""" + + NULL_BYTES = ["\0", "\000", "\x00", "\u0000"] + + on_write_file: Callable[[Path], Any] | None = None + """ + Event hook, executed after writing a file. + + Params: + Path: The path of the file that was written, relative to the workspace root. + """ + + def __init__(self, root: str | Path, restrict_to_root: bool): + self._root = self._sanitize_path(root) + self._restrict_to_root = restrict_to_root + + @property + def root(self) -> Path: + """The root directory of the file workspace.""" + return self._root + + @property + def restrict_to_root(self): + """Whether to restrict generated paths to the root.""" + return self._restrict_to_root + + def initialize(self) -> None: + self.root.mkdir(exist_ok=True, parents=True) + + def get_path(self, relative_path: str | Path) -> Path: + """Get the full path for an item in the workspace. + + Parameters: + relative_path: The relative path to resolve in the workspace. + + Returns: + Path: The resolved path relative to the workspace. + """ + return self._sanitize_path( + relative_path, + root=self.root, + restrict_to_root=self.restrict_to_root, + ) + + def open_file(self, path: str | Path, mode: str = "r"): + """Open a file in the workspace.""" + full_path = self.get_path(path) + return open(full_path, mode) + + def read_file(self, path: str | Path, binary: bool = False): + """Read a file in the workspace.""" + with self.open_file(path, "rb" if binary else "r") as file: + return file.read() + + def write_file(self, path: str | Path, content: str | bytes): + """Write to a file in the workspace.""" + with self.open_file(path, "wb" if type(content) is bytes else "w") as file: + file.write(content) + + if self.on_write_file: + path = Path(path) + if path.is_absolute(): + path = path.relative_to(self.root) + self.on_write_file(path) + + def list_files(self, path: str | Path = "."): + """List all files in a directory in the workspace.""" + full_path = self.get_path(path) + return [str(file) for file in full_path.glob("*") if file.is_file()] + + def delete_file(self, path: str | Path): + """Delete a file in the workspace.""" + full_path = self.get_path(path) + full_path.unlink() + + @staticmethod + def _sanitize_path( + relative_path: str | Path, + root: Optional[str | Path] = None, + restrict_to_root: bool = True, + ) -> Path: + """Resolve the relative path within the given root if possible. + + Parameters: + relative_path: The relative path to resolve. + root: The root path to resolve the relative path within. + restrict_to_root: Whether to restrict the path to the root. + + Returns: + Path: The resolved path. + + Raises: + ValueError: If the path is absolute and a root is provided. + ValueError: If the path is outside the root and the root is restricted. + """ + + # Posix systems disallow null bytes in paths. Windows is agnostic about it. + # Do an explicit check here for all sorts of null byte representations. + + for null_byte in FileWorkspace.NULL_BYTES: + if null_byte in str(relative_path) or null_byte in str(root): + raise ValueError("embedded null byte") + + if root is None: + return Path(relative_path).resolve() + + logger.debug(f"Resolving path '{relative_path}' in workspace '{root}'") + + root, relative_path = Path(root).resolve(), Path(relative_path) + + logger.debug(f"Resolved root as '{root}'") + + # Allow absolute paths if they are contained in the workspace. + if ( + relative_path.is_absolute() + and restrict_to_root + and not relative_path.is_relative_to(root) + ): + raise ValueError( + f"Attempted to access absolute path '{relative_path}' in workspace '{root}'." + ) + + full_path = root.joinpath(relative_path).resolve() + + logger.debug(f"Joined paths as '{full_path}'") + + if restrict_to_root and not full_path.is_relative_to(root): + raise ValueError( + f"Attempted to access path '{full_path}' outside of workspace '{root}'." + ) + + return full_path diff --git a/autogpts/autogpt/autogpt/logs/config.py b/autogpts/autogpt/autogpt/logs/config.py index b342a33d..7b8a043b 100644 --- a/autogpts/autogpt/autogpt/logs/config.py +++ b/autogpts/autogpt/autogpt/logs/config.py @@ -4,13 +4,14 @@ from __future__ import annotations import logging import sys from pathlib import Path -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional from auto_gpt_plugin_template import AutoGPTPluginTemplate from openai.util import logger as openai_logger if TYPE_CHECKING: from autogpt.config import Config + from autogpt.speech import TTSConfig from autogpt.core.runner.client_lib.logging import BelowLevelFilter @@ -33,15 +34,20 @@ USER_FRIENDLY_OUTPUT_LOGGER = "USER_FRIENDLY_OUTPUT" _chat_plugins: list[AutoGPTPluginTemplate] = [] -def configure_logging(config: Config, log_dir: Path = LOG_DIR) -> None: +def configure_logging( + debug_mode: bool = False, + plain_output: bool = False, + tts_config: Optional[TTSConfig] = None, + log_dir: Path = LOG_DIR, +) -> None: """Configure the native logging module.""" # create log directory if it doesn't exist if not log_dir.exists(): log_dir.mkdir() - log_level = logging.DEBUG if config.debug_mode else logging.INFO - log_format = DEBUG_LOG_FORMAT if config.debug_mode else SIMPLE_LOG_FORMAT + log_level = logging.DEBUG if debug_mode else logging.INFO + log_format = DEBUG_LOG_FORMAT if debug_mode else SIMPLE_LOG_FORMAT console_formatter = AutoGptFormatter(log_format) # Console output handlers @@ -60,7 +66,7 @@ def configure_logging(config: Config, log_dir: Path = LOG_DIR) -> None: AutoGptFormatter(SIMPLE_LOG_FORMAT, no_color=True) ) - if config.debug_mode: + if debug_mode: # DEBUG log file handler debug_log_handler = logging.FileHandler(log_dir / DEBUG_LOG_FILE, "a", "utf-8") debug_log_handler.setLevel(logging.DEBUG) @@ -79,7 +85,7 @@ def configure_logging(config: Config, log_dir: Path = LOG_DIR) -> None: level=log_level, handlers=( [stdout, stderr, activity_log_handler, error_log_handler] - + ([debug_log_handler] if config.debug_mode else []) + + ([debug_log_handler] if debug_mode else []) ), ) @@ -93,9 +99,10 @@ def configure_logging(config: Config, log_dir: Path = LOG_DIR) -> None: user_friendly_output_logger = logging.getLogger(USER_FRIENDLY_OUTPUT_LOGGER) user_friendly_output_logger.setLevel(logging.INFO) user_friendly_output_logger.addHandler( - typing_console_handler if not config.plain_output else stdout + typing_console_handler if not plain_output else stdout ) - user_friendly_output_logger.addHandler(TTSHandler(config)) + if tts_config: + user_friendly_output_logger.addHandler(TTSHandler(tts_config)) user_friendly_output_logger.addHandler(activity_log_handler) user_friendly_output_logger.addHandler(error_log_handler) user_friendly_output_logger.addHandler(stderr) @@ -103,7 +110,8 @@ def configure_logging(config: Config, log_dir: Path = LOG_DIR) -> None: speech_output_logger = logging.getLogger(SPEECH_OUTPUT_LOGGER) speech_output_logger.setLevel(logging.INFO) - speech_output_logger.addHandler(TTSHandler(config)) + if tts_config: + speech_output_logger.addHandler(TTSHandler(tts_config)) speech_output_logger.propagate = False # JSON logger with better formatting diff --git a/autogpts/autogpt/autogpt/logs/handlers.py b/autogpts/autogpt/autogpt/logs/handlers.py index 4896d99a..6d371059 100644 --- a/autogpts/autogpt/autogpt/logs/handlers.py +++ b/autogpts/autogpt/autogpt/logs/handlers.py @@ -11,7 +11,7 @@ from autogpt.logs.utils import remove_color_codes from autogpt.speech import TextToSpeechProvider if TYPE_CHECKING: - from autogpt.config import Config + from autogpt.speech import TTSConfig class TypingConsoleHandler(logging.StreamHandler): @@ -50,7 +50,7 @@ class TypingConsoleHandler(logging.StreamHandler): class TTSHandler(logging.Handler): """Output messages to the configured TTS engine (if any)""" - def __init__(self, config: Config): + def __init__(self, config: TTSConfig): super().__init__() self.config = config self.tts_provider = TextToSpeechProvider(config) diff --git a/autogpts/autogpt/autogpt/models/action_history.py b/autogpts/autogpt/autogpt/models/action_history.py index 5fc52db0..fc19cf12 100644 --- a/autogpts/autogpt/autogpt/models/action_history.py +++ b/autogpts/autogpt/autogpt/models/action_history.py @@ -2,7 +2,7 @@ from __future__ import annotations from typing import Any, Iterator, Literal, Optional -from pydantic import BaseModel +from pydantic import BaseModel, Field from autogpt.prompts.utils import format_numbered_list, indent @@ -60,14 +60,8 @@ class Episode(BaseModel): class EpisodicActionHistory(BaseModel): """Utility container for an action history""" - cursor: int - episodes: list[Episode] - - def __init__(self, episodes: list[Episode] = []): - super().__init__( - episodes=episodes, - cursor=len(episodes), - ) + episodes: list[Episode] = Field(default_factory=list) + cursor: int = 0 @property def current_episode(self) -> Episode | None: diff --git a/autogpts/autogpt/autogpt/speech/__init__.py b/autogpts/autogpt/autogpt/speech/__init__.py index 1b419eb1..d5f0f2e0 100644 --- a/autogpts/autogpt/autogpt/speech/__init__.py +++ b/autogpts/autogpt/autogpt/speech/__init__.py @@ -1,4 +1,4 @@ """This module contains the speech recognition and speech synthesis functions.""" -from autogpt.speech.say import TextToSpeechProvider +from autogpt.speech.say import TextToSpeechProvider, TTSConfig -__all__ = ["TextToSpeechProvider"] +__all__ = ["TextToSpeechProvider", "TTSConfig"] diff --git a/autogpts/autogpt/autogpt/speech/base.py b/autogpts/autogpt/autogpt/speech/base.py index 29b17b02..b9bcd040 100644 --- a/autogpts/autogpt/autogpt/speech/base.py +++ b/autogpts/autogpt/autogpt/speech/base.py @@ -4,10 +4,6 @@ from __future__ import annotations import abc import re from threading import Lock -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from autogpt.config import Config class VoiceBase: @@ -15,7 +11,7 @@ class VoiceBase: Base class for all voice classes. """ - def __init__(self, config: Config): + def __init__(self, *args, **kwargs): """ Initialize the voice class. """ @@ -24,7 +20,7 @@ class VoiceBase: self._api_key = None self._voices = [] self._mutex = Lock() - self._setup(config) + self._setup(*args, **kwargs) def say(self, text: str, voice_index: int = 0) -> bool: """ @@ -43,7 +39,7 @@ class VoiceBase: return self._speech(text, voice_index) @abc.abstractmethod - def _setup(self, config: Config) -> None: + def _setup(self, *args, **kwargs) -> None: """ Setup the voices, API key, etc. """ diff --git a/autogpts/autogpt/autogpt/speech/eleven_labs.py b/autogpts/autogpt/autogpt/speech/eleven_labs.py index 7fbd40bd..c7929222 100644 --- a/autogpts/autogpt/autogpt/speech/eleven_labs.py +++ b/autogpts/autogpt/autogpt/speech/eleven_labs.py @@ -3,13 +3,12 @@ from __future__ import annotations import logging import os -from typing import TYPE_CHECKING import requests from playsound import playsound -if TYPE_CHECKING: - from autogpt.config import Config +from autogpt.core.configuration import SystemConfiguration, UserConfigurable + from .base import VoiceBase logger = logging.getLogger(__name__) @@ -17,10 +16,15 @@ logger = logging.getLogger(__name__) PLACEHOLDERS = {"your-voice-id"} +class ElevenLabsConfig(SystemConfiguration): + api_key: str = UserConfigurable() + voice_id: str = UserConfigurable() + + class ElevenLabsSpeech(VoiceBase): """ElevenLabs speech class""" - def _setup(self, config: Config) -> None: + def _setup(self, config: ElevenLabsConfig) -> None: """Set up the voices, API key, etc. Returns: @@ -41,12 +45,12 @@ class ElevenLabsSpeech(VoiceBase): } self._headers = { "Content-Type": "application/json", - "xi-api-key": config.elevenlabs_api_key, + "xi-api-key": config.api_key, } self._voices = default_voices.copy() - if config.elevenlabs_voice_id in voice_options: - config.elevenlabs_voice_id = voice_options[config.elevenlabs_voice_id] - self._use_custom_voice(config.elevenlabs_voice_id, 0) + if config.voice_id in voice_options: + config.voice_id = voice_options[config.voice_id] + self._use_custom_voice(config.voice_id, 0) def _use_custom_voice(self, voice, voice_index) -> None: """Use a custom voice if provided and not a placeholder diff --git a/autogpts/autogpt/autogpt/speech/gtts.py b/autogpts/autogpt/autogpt/speech/gtts.py index 105ef29c..40f7bcb9 100644 --- a/autogpts/autogpt/autogpt/speech/gtts.py +++ b/autogpts/autogpt/autogpt/speech/gtts.py @@ -2,21 +2,17 @@ from __future__ import annotations import os -from typing import TYPE_CHECKING import gtts from playsound import playsound -if TYPE_CHECKING: - from autogpt.config import Config - from autogpt.speech.base import VoiceBase class GTTSVoice(VoiceBase): """GTTS Voice.""" - def _setup(self, config: Config) -> None: + def _setup(self) -> None: pass def _speech(self, text: str, _: int = 0) -> bool: diff --git a/autogpts/autogpt/autogpt/speech/macos_tts.py b/autogpts/autogpt/autogpt/speech/macos_tts.py index 01facc1d..e88331d2 100644 --- a/autogpts/autogpt/autogpt/speech/macos_tts.py +++ b/autogpts/autogpt/autogpt/speech/macos_tts.py @@ -2,10 +2,6 @@ from __future__ import annotations import os -from typing import TYPE_CHECKING - -if TYPE_CHECKING: - from autogpt.config import Config from autogpt.speech.base import VoiceBase @@ -13,7 +9,7 @@ from autogpt.speech.base import VoiceBase class MacOSTTS(VoiceBase): """MacOS TTS Voice.""" - def _setup(self, config: Config) -> None: + def _setup(self) -> None: pass def _speech(self, text: str, voice_index: int = 0) -> bool: diff --git a/autogpts/autogpt/autogpt/speech/say.py b/autogpts/autogpt/autogpt/speech/say.py index 301fda49..be4ad5d8 100644 --- a/autogpts/autogpt/autogpt/speech/say.py +++ b/autogpts/autogpt/autogpt/speech/say.py @@ -3,24 +3,32 @@ from __future__ import annotations import threading from threading import Semaphore -from typing import TYPE_CHECKING +from typing import Literal, Optional -if TYPE_CHECKING: - from autogpt.config import Config +from autogpt.core.configuration.schema import SystemConfiguration, UserConfigurable from .base import VoiceBase -from .eleven_labs import ElevenLabsSpeech +from .eleven_labs import ElevenLabsConfig, ElevenLabsSpeech from .gtts import GTTSVoice from .macos_tts import MacOSTTS -from .stream_elements_speech import StreamElementsSpeech +from .stream_elements_speech import StreamElementsConfig, StreamElementsSpeech _QUEUE_SEMAPHORE = Semaphore( 1 ) # The amount of sounds to queue before blocking the main thread +class TTSConfig(SystemConfiguration): + speak_mode: bool = False + provider: Literal[ + "elevenlabs", "gtts", "macos", "streamelements" + ] = UserConfigurable(default="gtts") + elevenlabs: Optional[ElevenLabsConfig] = None + streamelements: Optional[StreamElementsConfig] = None + + class TextToSpeechProvider: - def __init__(self, config: Config): + def __init__(self, config: TTSConfig): self._config = config self._default_voice_engine, self._voice_engine = self._get_voice_engine(config) @@ -37,19 +45,19 @@ class TextToSpeechProvider: thread.start() def __repr__(self): - return f"{self.__class__.__name__}(enabled={self._config.speak_mode}, provider={self._voice_engine.__class__.__name__})" + return f"{self.__class__.__name__}(provider={self._voice_engine.__class__.__name__})" @staticmethod - def _get_voice_engine(config: Config) -> tuple[VoiceBase, VoiceBase]: + def _get_voice_engine(config: TTSConfig) -> tuple[VoiceBase, VoiceBase]: """Get the voice engine to use for the given configuration""" - tts_provider = config.text_to_speech_provider + tts_provider = config.provider if tts_provider == "elevenlabs": - voice_engine = ElevenLabsSpeech(config) + voice_engine = ElevenLabsSpeech(config.elevenlabs) elif tts_provider == "macos": - voice_engine = MacOSTTS(config) + voice_engine = MacOSTTS() elif tts_provider == "streamelements": - voice_engine = StreamElementsSpeech(config) + voice_engine = StreamElementsSpeech(config.streamelements) else: - voice_engine = GTTSVoice(config) + voice_engine = GTTSVoice() - return GTTSVoice(config), voice_engine + return GTTSVoice(), voice_engine diff --git a/autogpts/autogpt/autogpt/speech/stream_elements_speech.py b/autogpts/autogpt/autogpt/speech/stream_elements_speech.py index 1be69270..99bc43bf 100644 --- a/autogpts/autogpt/autogpt/speech/stream_elements_speech.py +++ b/autogpts/autogpt/autogpt/speech/stream_elements_speech.py @@ -2,28 +2,29 @@ from __future__ import annotations import logging import os -from typing import TYPE_CHECKING import requests from playsound import playsound -if TYPE_CHECKING: - from autogpt.config import Config - +from autogpt.core.configuration import SystemConfiguration, UserConfigurable from autogpt.speech.base import VoiceBase logger = logging.getLogger(__name__) +class StreamElementsConfig(SystemConfiguration): + voice: str = UserConfigurable(default="Brian") + + class StreamElementsSpeech(VoiceBase): """Streamelements speech module for autogpt""" - def _setup(self, config: Config) -> None: + def _setup(self, config: StreamElementsConfig) -> None: """Setup the voices, API key, etc.""" self.config = config def _speech(self, text: str, voice: str, _: int = 0) -> bool: - voice = self.config.streamelements_voice + voice = self.config.voice """Speak text using the streamelements API Args: diff --git a/autogpts/autogpt/autogpt/utils.py b/autogpts/autogpt/autogpt/utils.py index f69fe50f..4aa503a7 100644 --- a/autogpts/autogpt/autogpt/utils.py +++ b/autogpts/autogpt/autogpt/utils.py @@ -1,8 +1,10 @@ +from pathlib import Path + import yaml from colorama import Fore -def validate_yaml_file(file: str): +def validate_yaml_file(file: str | Path): try: with open(file, encoding="utf-8") as fp: yaml.load(fp.read(), Loader=yaml.FullLoader) diff --git a/autogpts/autogpt/autogpt/workspace/__init__.py b/autogpts/autogpt/autogpt/workspace/__init__.py deleted file mode 100644 index b348144b..00000000 --- a/autogpts/autogpt/autogpt/workspace/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from autogpt.workspace.workspace import Workspace - -__all__ = [ - "Workspace", -] diff --git a/autogpts/autogpt/autogpt/workspace/workspace.py b/autogpts/autogpt/autogpt/workspace/workspace.py deleted file mode 100644 index 125e6740..00000000 --- a/autogpts/autogpt/autogpt/workspace/workspace.py +++ /dev/null @@ -1,169 +0,0 @@ -""" -========= -Workspace -========= - -The workspace is a directory containing configuration and working files for an AutoGPT -agent. - -""" -from __future__ import annotations - -import logging -from pathlib import Path -from typing import Optional - -from autogpt.config import Config - -logger = logging.getLogger(__name__) - - -class Workspace: - """A class that represents a workspace for an AutoGPT agent.""" - - NULL_BYTES = ["\0", "\000", "\x00", "\u0000"] - - def __init__(self, workspace_root: str | Path, restrict_to_workspace: bool): - self._root = self._sanitize_path(workspace_root) - self._restrict_to_workspace = restrict_to_workspace - - @property - def root(self) -> Path: - """The root directory of the workspace.""" - return self._root - - @property - def restrict_to_workspace(self): - """Whether to restrict generated paths to the workspace.""" - return self._restrict_to_workspace - - @classmethod - def make_workspace(cls, workspace_directory: str | Path, *args, **kwargs) -> Path: - """Create a workspace directory and return the path to it. - - Parameters - ---------- - workspace_directory - The path to the workspace directory. - - Returns - ------- - Path - The path to the workspace directory. - - """ - # TODO: have this make the env file and ai settings file in the directory. - workspace_directory = cls._sanitize_path(workspace_directory) - workspace_directory.mkdir(exist_ok=True, parents=True) - return workspace_directory - - def get_path(self, relative_path: str | Path) -> Path: - """Get the full path for an item in the workspace. - - Parameters - ---------- - relative_path - The relative path to resolve in the workspace. - - Returns - ------- - Path - The resolved path relative to the workspace. - - """ - return self._sanitize_path( - relative_path, - root=self.root, - restrict_to_root=self.restrict_to_workspace, - ) - - @staticmethod - def _sanitize_path( - relative_path: str | Path, - root: Optional[str | Path] = None, - restrict_to_root: bool = True, - ) -> Path: - """Resolve the relative path within the given root if possible. - - Parameters - ---------- - relative_path - The relative path to resolve. - root - The root path to resolve the relative path within. - restrict_to_root - Whether to restrict the path to the root. - - Returns - ------- - Path - The resolved path. - - Raises - ------ - ValueError - If the path is absolute and a root is provided. - ValueError - If the path is outside the root and the root is restricted. - - """ - - # Posix systems disallow null bytes in paths. Windows is agnostic about it. - # Do an explicit check here for all sorts of null byte representations. - - for null_byte in Workspace.NULL_BYTES: - if null_byte in str(relative_path) or null_byte in str(root): - raise ValueError("embedded null byte") - - if root is None: - return Path(relative_path).resolve() - - logger.debug(f"Resolving path '{relative_path}' in workspace '{root}'") - - root, relative_path = Path(root).resolve(), Path(relative_path) - - logger.debug(f"Resolved root as '{root}'") - - # Allow exception for absolute paths if they are contained in your workspace directory. - if ( - relative_path.is_absolute() - and restrict_to_root - and not relative_path.is_relative_to(root) - ): - raise ValueError( - f"Attempted to access absolute path '{relative_path}' in workspace '{root}'." - ) - - full_path = root.joinpath(relative_path).resolve() - - logger.debug(f"Joined paths as '{full_path}'") - - if restrict_to_root and not full_path.is_relative_to(root): - raise ValueError( - f"Attempted to access path '{full_path}' outside of workspace '{root}'." - ) - - return full_path - - @staticmethod - def build_file_logger_path(workspace_directory: Path) -> Path: - file_logger_path = workspace_directory / "file_logger.log" - if not file_logger_path.exists(): - with file_logger_path.open(mode="w", encoding="utf-8") as f: - f.write("File Operation Logger ") - return file_logger_path - - @staticmethod - def init_workspace_directory( - config: Config, override_workspace_path: Optional[str | Path] = None - ) -> Path: - if override_workspace_path is None: - workspace_path = config.workdir / "auto_gpt_workspace" - elif type(override_workspace_path) == str: - workspace_path = Path(override_workspace_path) - else: - workspace_path = override_workspace_path - - # TODO: pass in the ai_settings file and the env file and have them cloned into - # the workspace directory so we can bind them to the agent. - return Workspace.make_workspace(workspace_path) diff --git a/autogpts/autogpt/poetry.lock b/autogpts/autogpt/poetry.lock index a8c65ffe..dbd7624f 100644 --- a/autogpts/autogpt/poetry.lock +++ b/autogpts/autogpt/poetry.lock @@ -15,7 +15,7 @@ files = [ name = "agbenchmark" version = "0.0.10" description = "Benchmarking the performance of agents far and wide, regardless of how they are set up and how they work" -optional = false +optional = true python-versions = "^3.10" files = [] develop = false @@ -203,11 +203,22 @@ doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)"] test = ["anyio[trio]", "coverage[toml] (>=7)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] trio = ["trio (>=0.22)"] +[[package]] +name = "appdirs" +version = "1.4.4" +description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +optional = false +python-versions = "*" +files = [ + {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, + {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, +] + [[package]] name = "appnope" version = "0.1.3" description = "Disable App Nap on macOS >= 10.9" -optional = false +optional = true python-versions = "*" files = [ {file = "appnope-0.1.3-py2.py3-none-any.whl", hash = "sha256:265a455292d0bd8a72453494fa24df5a11eb18373a60c7c0430889f22548605e"}, @@ -218,7 +229,7 @@ files = [ name = "asttokens" version = "2.4.0" description = "Annotate AST trees with source code positions" -optional = false +optional = true python-versions = "*" files = [ {file = "asttokens-2.4.0-py2.py3-none-any.whl", hash = "sha256:cf8fc9e61a86461aa9fb161a14a0841a03c405fa829ac6b202670b3495d2ce69"}, @@ -304,17 +315,95 @@ files = [ pyflakes = ">=3.0.0" tomli = {version = ">=2.0.1", markers = "python_version < \"3.11\""} +[[package]] +name = "autogpt-forge" +version = "0.1.0" +description = "" +optional = false +python-versions = "^3.10" +files = [] +develop = false + +[package.dependencies] +aiohttp = "^3.8.5" +bs4 = "^0.0.1" +chromadb = "^0.4.10" +colorlog = "^6.7.0" +duckduckgo-search = "^3.8.0" +jinja2 = "^3.1.2" +litellm = "^0.1.821" +openai = "^0.27.8" +python-dotenv = "^1.0.0" +python-multipart = "^0.0.6" +selenium = "^4.13.0" +sqlalchemy = "^2.0.19" +tenacity = "^8.2.2" +toml = "^0.10.2" +uvicorn = "^0.23.2" + +[package.source] +type = "git" +url = "https://github.com/Significant-Gravitas/AutoGPT.git" +reference = "10aecec" +resolved_reference = "10aececc6a732878dd037e2deb2f6a4962e19e20" +subdirectory = "autogpts/forge" + [[package]] name = "backcall" version = "0.2.0" description = "Specifications for callback functions passed in to an API" -optional = false +optional = true python-versions = "*" files = [ {file = "backcall-0.2.0-py2.py3-none-any.whl", hash = "sha256:fbbce6a29f263178a1f7915c1940bde0ec2b2a967566fe1c65c1dfb7422bd255"}, {file = "backcall-0.2.0.tar.gz", hash = "sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e"}, ] +[[package]] +name = "backoff" +version = "2.2.1" +description = "Function decoration for backoff and retry" +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, + {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, +] + +[[package]] +name = "bcrypt" +version = "4.0.1" +description = "Modern password hashing for your software and your servers" +optional = false +python-versions = ">=3.6" +files = [ + {file = "bcrypt-4.0.1-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:b1023030aec778185a6c16cf70f359cbb6e0c289fd564a7cfa29e727a1c38f8f"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:08d2947c490093a11416df18043c27abe3921558d2c03e2076ccb28a116cb6d0"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0eaa47d4661c326bfc9d08d16debbc4edf78778e6aaba29c1bc7ce67214d4410"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae88eca3024bb34bb3430f964beab71226e761f51b912de5133470b649d82344"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:a522427293d77e1c29e303fc282e2d71864579527a04ddcfda6d4f8396c6c36a"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:fbdaec13c5105f0c4e5c52614d04f0bca5f5af007910daa8b6b12095edaa67b3"}, + {file = "bcrypt-4.0.1-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:ca3204d00d3cb2dfed07f2d74a25f12fc12f73e606fcaa6975d1f7ae69cacbb2"}, + {file = "bcrypt-4.0.1-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:089098effa1bc35dc055366740a067a2fc76987e8ec75349eb9484061c54f535"}, + {file = "bcrypt-4.0.1-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:e9a51bbfe7e9802b5f3508687758b564069ba937748ad7b9e890086290d2f79e"}, + {file = "bcrypt-4.0.1-cp36-abi3-win32.whl", hash = "sha256:2caffdae059e06ac23fce178d31b4a702f2a3264c20bfb5ff541b338194d8fab"}, + {file = "bcrypt-4.0.1-cp36-abi3-win_amd64.whl", hash = "sha256:8a68f4341daf7522fe8d73874de8906f3a339048ba406be6ddc1b3ccb16fc0d9"}, + {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf4fa8b2ca74381bb5442c089350f09a3f17797829d958fad058d6e44d9eb83c"}, + {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:67a97e1c405b24f19d08890e7ae0c4f7ce1e56a712a016746c8b2d7732d65d4b"}, + {file = "bcrypt-4.0.1-pp37-pypy37_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b3b85202d95dd568efcb35b53936c5e3b3600c7cdcc6115ba461df3a8e89f38d"}, + {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbb03eec97496166b704ed663a53680ab57c5084b2fc98ef23291987b525cb7d"}, + {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:5ad4d32a28b80c5fa6671ccfb43676e8c1cc232887759d1cd7b6f56ea4355215"}, + {file = "bcrypt-4.0.1-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:b57adba8a1444faf784394de3436233728a1ecaeb6e07e8c22c8848f179b893c"}, + {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:705b2cea8a9ed3d55b4491887ceadb0106acf7c6387699fca771af56b1cdeeda"}, + {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:2b3ac11cf45161628f1f3733263e63194f22664bf4d0c0f3ab34099c02134665"}, + {file = "bcrypt-4.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:3100851841186c25f127731b9fa11909ab7b1df6fc4b9f8353f4f1fd952fbf71"}, + {file = "bcrypt-4.0.1.tar.gz", hash = "sha256:27d375903ac8261cfe4047f6709d16f7d18d39b1ec92aaf72af989552a650ebd"}, +] + +[package.extras] +tests = ["pytest (>=3.2.1,!=3.3.0)"] +typecheck = ["mypy"] + [[package]] name = "beautifulsoup4" version = "4.12.2" @@ -552,6 +641,19 @@ files = [ [package.dependencies] cffi = ">=1.0.0" +[[package]] +name = "bs4" +version = "0.0.1" +description = "Dummy package for Beautiful Soup" +optional = false +python-versions = "*" +files = [ + {file = "bs4-0.0.1.tar.gz", hash = "sha256:36ecea1fd7cc5c0c6e4a1ff075df26d50da647b75376626cc186e2212886dd3a"}, +] + +[package.dependencies] +beautifulsoup4 = "*" + [[package]] name = "cachetools" version = "5.3.1" @@ -770,6 +872,74 @@ files = [ {file = "charset_normalizer-3.3.0-py3-none-any.whl", hash = "sha256:e46cd37076971c1040fc8c41273a8b3e2c624ce4f2be3f5dfcb7a430c1d3acc2"}, ] +[[package]] +name = "chroma-hnswlib" +version = "0.7.3" +description = "Chromas fork of hnswlib" +optional = false +python-versions = "*" +files = [ + {file = "chroma-hnswlib-0.7.3.tar.gz", hash = "sha256:b6137bedde49fffda6af93b0297fe00429fc61e5a072b1ed9377f909ed95a932"}, + {file = "chroma_hnswlib-0.7.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:59d6a7c6f863c67aeb23e79a64001d537060b6995c3eca9a06e349ff7b0998ca"}, + {file = "chroma_hnswlib-0.7.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d71a3f4f232f537b6152947006bd32bc1629a8686df22fd97777b70f416c127a"}, + {file = "chroma_hnswlib-0.7.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c92dc1ebe062188e53970ba13f6b07e0ae32e64c9770eb7f7ffa83f149d4210"}, + {file = "chroma_hnswlib-0.7.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49da700a6656fed8753f68d44b8cc8ae46efc99fc8a22a6d970dc1697f49b403"}, + {file = "chroma_hnswlib-0.7.3-cp310-cp310-win_amd64.whl", hash = "sha256:108bc4c293d819b56476d8f7865803cb03afd6ca128a2a04d678fffc139af029"}, + {file = "chroma_hnswlib-0.7.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:11e7ca93fb8192214ac2b9c0943641ac0daf8f9d4591bb7b73be808a83835667"}, + {file = "chroma_hnswlib-0.7.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6f552e4d23edc06cdeb553cdc757d2fe190cdeb10d43093d6a3319f8d4bf1c6b"}, + {file = "chroma_hnswlib-0.7.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f96f4d5699e486eb1fb95849fe35ab79ab0901265805be7e60f4eaa83ce263ec"}, + {file = "chroma_hnswlib-0.7.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:368e57fe9ebae05ee5844840fa588028a023d1182b0cfdb1d13f607c9ea05756"}, + {file = "chroma_hnswlib-0.7.3-cp311-cp311-win_amd64.whl", hash = "sha256:b7dca27b8896b494456db0fd705b689ac6b73af78e186eb6a42fea2de4f71c6f"}, + {file = "chroma_hnswlib-0.7.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:70f897dc6218afa1d99f43a9ad5eb82f392df31f57ff514ccf4eeadecd62f544"}, + {file = "chroma_hnswlib-0.7.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5aef10b4952708f5a1381c124a29aead0c356f8d7d6e0b520b778aaa62a356f4"}, + {file = "chroma_hnswlib-0.7.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ee2d8d1529fca3898d512079144ec3e28a81d9c17e15e0ea4665697a7923253"}, + {file = "chroma_hnswlib-0.7.3-cp37-cp37m-win_amd64.whl", hash = "sha256:a4021a70e898783cd6f26e00008b494c6249a7babe8774e90ce4766dd288c8ba"}, + {file = "chroma_hnswlib-0.7.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a8f61fa1d417fda848e3ba06c07671f14806a2585272b175ba47501b066fe6b1"}, + {file = "chroma_hnswlib-0.7.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d7563be58bc98e8f0866907368e22ae218d6060601b79c42f59af4eccbbd2e0a"}, + {file = "chroma_hnswlib-0.7.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51b8d411486ee70d7b66ec08cc8b9b6620116b650df9c19076d2d8b6ce2ae914"}, + {file = "chroma_hnswlib-0.7.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d706782b628e4f43f1b8a81e9120ac486837fbd9bcb8ced70fe0d9b95c72d77"}, + {file = "chroma_hnswlib-0.7.3-cp38-cp38-win_amd64.whl", hash = "sha256:54f053dedc0e3ba657f05fec6e73dd541bc5db5b09aa8bc146466ffb734bdc86"}, + {file = "chroma_hnswlib-0.7.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e607c5a71c610a73167a517062d302c0827ccdd6e259af6e4869a5c1306ffb5d"}, + {file = "chroma_hnswlib-0.7.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2358a795870156af6761890f9eb5ca8cade57eb10c5f046fe94dae1faa04b9e"}, + {file = "chroma_hnswlib-0.7.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7cea425df2e6b8a5e201fff0d922a1cc1d165b3cfe762b1408075723c8892218"}, + {file = "chroma_hnswlib-0.7.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:454df3dd3e97aa784fba7cf888ad191e0087eef0fd8c70daf28b753b3b591170"}, + {file = "chroma_hnswlib-0.7.3-cp39-cp39-win_amd64.whl", hash = "sha256:df587d15007ca701c6de0ee7d5585dd5e976b7edd2b30ac72bc376b3c3f85882"}, +] + +[package.dependencies] +numpy = "*" + +[[package]] +name = "chromadb" +version = "0.4.14" +description = "Chroma." +optional = false +python-versions = ">=3.7" +files = [ + {file = "chromadb-0.4.14-py3-none-any.whl", hash = "sha256:c1b59bdfb4b35a40bad0b8927c5ed757adf191ff9db2b9a384dc46a76e1ff10f"}, + {file = "chromadb-0.4.14.tar.gz", hash = "sha256:0fcef603bcf9c854305020c3f8d368c09b1545d48bd2bceefd51861090f87dad"}, +] + +[package.dependencies] +bcrypt = ">=4.0.1" +chroma-hnswlib = "0.7.3" +fastapi = ">=0.95.2" +grpcio = ">=1.58.0" +importlib-resources = "*" +numpy = {version = ">=1.22.5", markers = "python_version >= \"3.8\""} +onnxruntime = ">=1.14.1" +overrides = ">=7.3.1" +posthog = ">=2.4.0" +pulsar-client = ">=3.1.0" +pydantic = ">=1.9" +pypika = ">=0.48.9" +requests = ">=2.28" +tokenizers = ">=0.13.2" +tqdm = ">=4.65.0" +typer = ">=0.9.0" +typing-extensions = ">=4.5.0" +uvicorn = {version = ">=0.18.3", extras = ["standard"]} + [[package]] name = "click" version = "8.1.7" @@ -795,6 +965,40 @@ files = [ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] +[[package]] +name = "coloredlogs" +version = "15.0.1" +description = "Colored terminal output for Python's logging module" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "coloredlogs-15.0.1-py2.py3-none-any.whl", hash = "sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934"}, + {file = "coloredlogs-15.0.1.tar.gz", hash = "sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0"}, +] + +[package.dependencies] +humanfriendly = ">=9.1" + +[package.extras] +cron = ["capturer (>=2.4)"] + +[[package]] +name = "colorlog" +version = "6.7.0" +description = "Add colours to the output of Python's logging module." +optional = false +python-versions = ">=3.6" +files = [ + {file = "colorlog-6.7.0-py2.py3-none-any.whl", hash = "sha256:0d33ca236784a1ba3ff9c532d4964126d8a2c44f1f0cb1d2b0728196f512f662"}, + {file = "colorlog-6.7.0.tar.gz", hash = "sha256:bd94bd21c1e13fac7bd3153f4bc3a7dc0eb0974b8bc2fdf1a989e474f6e582e5"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} + +[package.extras] +development = ["black", "flake8", "mypy", "pytest", "types-colorama"] + [[package]] name = "confection" version = "0.1.3" @@ -814,7 +1018,7 @@ srsly = ">=2.4.0,<3.0.0" name = "contourpy" version = "1.1.0" description = "Python library for calculating contours of 2D quadrilateral grids" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "contourpy-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:89f06eff3ce2f4b3eb24c1055a26981bffe4e7264acd86f15b97e40530b794bc"}, @@ -872,7 +1076,7 @@ test-no-images = ["pytest", "pytest-cov", "wurlitzer"] name = "contourpy" version = "1.1.1" description = "Python library for calculating contours of 2D quadrilateral grids" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "contourpy-1.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:46e24f5412c948d81736509377e255f6040e94216bf1a9b5ea1eaa9d29f6ec1b"}, @@ -1021,7 +1225,7 @@ files = [ name = "cycler" version = "0.12.1" description = "Composable style cycles" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"}, @@ -1078,7 +1282,7 @@ files = [ name = "decorator" version = "5.1.1" description = "Decorators for Humans" -optional = false +optional = true python-versions = ">=3.5" files = [ {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, @@ -1213,7 +1417,7 @@ testing = ["hatch", "pre-commit", "pytest", "tox"] name = "executing" version = "2.0.0" description = "Get the currently executing AST node of a frame, and other information" -optional = false +optional = true python-versions = "*" files = [ {file = "executing-2.0.0-py2.py3-none-any.whl", hash = "sha256:06df6183df67389625f4e763921c6cf978944721abf3e714000200aab95b0657"}, @@ -1274,11 +1478,22 @@ mccabe = ">=0.7.0,<0.8.0" pycodestyle = ">=2.11.0,<2.12.0" pyflakes = ">=3.1.0,<3.2.0" +[[package]] +name = "flatbuffers" +version = "23.5.26" +description = "The FlatBuffers serialization format for Python" +optional = false +python-versions = "*" +files = [ + {file = "flatbuffers-23.5.26-py2.py3-none-any.whl", hash = "sha256:c0ff356da363087b915fde4b8b45bdda73432fc17cddb3c8157472eab1422ad1"}, + {file = "flatbuffers-23.5.26.tar.gz", hash = "sha256:9ea1144cac05ce5d86e2859f431c6cd5e66cd9c78c558317c7955fb8d4c78d89"}, +] + [[package]] name = "fonttools" version = "4.43.1" description = "Tools to manipulate font files" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "fonttools-4.43.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:bf11e2cca121df35e295bd34b309046c29476ee739753bc6bc9d5050de319273"}, @@ -1409,6 +1624,41 @@ files = [ {file = "frozenlist-1.4.0.tar.gz", hash = "sha256:09163bdf0b2907454042edb19f887c6d33806adc71fbd54afc14908bfdc22251"}, ] +[[package]] +name = "fsspec" +version = "2023.9.2" +description = "File-system specification" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fsspec-2023.9.2-py3-none-any.whl", hash = "sha256:603dbc52c75b84da501b9b2ec8c11e1f61c25984c4a0dda1f129ef391fbfc9b4"}, + {file = "fsspec-2023.9.2.tar.gz", hash = "sha256:80bfb8c70cc27b2178cc62a935ecf242fc6e8c3fb801f9c571fc01b1e715ba7d"}, +] + +[package.extras] +abfs = ["adlfs"] +adl = ["adlfs"] +arrow = ["pyarrow (>=1)"] +dask = ["dask", "distributed"] +devel = ["pytest", "pytest-cov"] +dropbox = ["dropbox", "dropboxdrivefs", "requests"] +full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"] +fuse = ["fusepy"] +gcs = ["gcsfs"] +git = ["pygit2"] +github = ["requests"] +gs = ["gcsfs"] +gui = ["panel"] +hdfs = ["pyarrow (>=1)"] +http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "requests"] +libarchive = ["libarchive-c"] +oci = ["ocifs"] +s3 = ["s3fs"] +sftp = ["paramiko"] +smb = ["smbprotocol"] +ssh = ["paramiko"] +tqdm = ["tqdm"] + [[package]] name = "ftfy" version = "6.1.1" @@ -1549,6 +1799,148 @@ protobuf = ">=3.19.5,<3.20.0 || >3.20.0,<3.20.1 || >3.20.1,<4.21.1 || >4.21.1,<4 [package.extras] grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] +[[package]] +name = "greenlet" +version = "3.0.0" +description = "Lightweight in-process concurrent programming" +optional = false +python-versions = ">=3.7" +files = [ + {file = "greenlet-3.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e09dea87cc91aea5500262993cbd484b41edf8af74f976719dd83fe724644cd6"}, + {file = "greenlet-3.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f47932c434a3c8d3c86d865443fadc1fbf574e9b11d6650b656e602b1797908a"}, + {file = "greenlet-3.0.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bdfaeecf8cc705d35d8e6de324bf58427d7eafb55f67050d8f28053a3d57118c"}, + {file = "greenlet-3.0.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6a68d670c8f89ff65c82b936275369e532772eebc027c3be68c6b87ad05ca695"}, + {file = "greenlet-3.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38ad562a104cd41e9d4644f46ea37167b93190c6d5e4048fcc4b80d34ecb278f"}, + {file = "greenlet-3.0.0-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:02a807b2a58d5cdebb07050efe3d7deaf915468d112dfcf5e426d0564aa3aa4a"}, + {file = "greenlet-3.0.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b1660a15a446206c8545edc292ab5c48b91ff732f91b3d3b30d9a915d5ec4779"}, + {file = "greenlet-3.0.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:813720bd57e193391dfe26f4871186cf460848b83df7e23e6bef698a7624b4c9"}, + {file = "greenlet-3.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:aa15a2ec737cb609ed48902b45c5e4ff6044feb5dcdfcf6fa8482379190330d7"}, + {file = "greenlet-3.0.0-cp310-universal2-macosx_11_0_x86_64.whl", hash = "sha256:7709fd7bb02b31908dc8fd35bfd0a29fc24681d5cc9ac1d64ad07f8d2b7db62f"}, + {file = "greenlet-3.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:211ef8d174601b80e01436f4e6905aca341b15a566f35a10dd8d1e93f5dbb3b7"}, + {file = "greenlet-3.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6512592cc49b2c6d9b19fbaa0312124cd4c4c8a90d28473f86f92685cc5fef8e"}, + {file = "greenlet-3.0.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:871b0a8835f9e9d461b7fdaa1b57e3492dd45398e87324c047469ce2fc9f516c"}, + {file = "greenlet-3.0.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b505fcfc26f4148551826a96f7317e02c400665fa0883fe505d4fcaab1dabfdd"}, + {file = "greenlet-3.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:123910c58234a8d40eaab595bc56a5ae49bdd90122dde5bdc012c20595a94c14"}, + {file = "greenlet-3.0.0-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:96d9ea57292f636ec851a9bb961a5cc0f9976900e16e5d5647f19aa36ba6366b"}, + {file = "greenlet-3.0.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0b72b802496cccbd9b31acea72b6f87e7771ccfd7f7927437d592e5c92ed703c"}, + {file = "greenlet-3.0.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:527cd90ba3d8d7ae7dceb06fda619895768a46a1b4e423bdb24c1969823b8362"}, + {file = "greenlet-3.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:37f60b3a42d8b5499be910d1267b24355c495064f271cfe74bf28b17b099133c"}, + {file = "greenlet-3.0.0-cp311-universal2-macosx_10_9_universal2.whl", hash = "sha256:c3692ecf3fe754c8c0f2c95ff19626584459eab110eaab66413b1e7425cd84e9"}, + {file = "greenlet-3.0.0-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:be557119bf467d37a8099d91fbf11b2de5eb1fd5fc5b91598407574848dc910f"}, + {file = "greenlet-3.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:73b2f1922a39d5d59cc0e597987300df3396b148a9bd10b76a058a2f2772fc04"}, + {file = "greenlet-3.0.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d1e22c22f7826096ad503e9bb681b05b8c1f5a8138469b255eb91f26a76634f2"}, + {file = "greenlet-3.0.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1d363666acc21d2c204dd8705c0e0457d7b2ee7a76cb16ffc099d6799744ac99"}, + {file = "greenlet-3.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:334ef6ed8337bd0b58bb0ae4f7f2dcc84c9f116e474bb4ec250a8bb9bd797a66"}, + {file = "greenlet-3.0.0-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6672fdde0fd1a60b44fb1751a7779c6db487e42b0cc65e7caa6aa686874e79fb"}, + {file = "greenlet-3.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:952256c2bc5b4ee8df8dfc54fc4de330970bf5d79253c863fb5e6761f00dda35"}, + {file = "greenlet-3.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:269d06fa0f9624455ce08ae0179430eea61085e3cf6457f05982b37fd2cefe17"}, + {file = "greenlet-3.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:9adbd8ecf097e34ada8efde9b6fec4dd2a903b1e98037adf72d12993a1c80b51"}, + {file = "greenlet-3.0.0-cp312-universal2-macosx_10_9_universal2.whl", hash = "sha256:553d6fb2324e7f4f0899e5ad2c427a4579ed4873f42124beba763f16032959af"}, + {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6b5ce7f40f0e2f8b88c28e6691ca6806814157ff05e794cdd161be928550f4c"}, + {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ecf94aa539e97a8411b5ea52fc6ccd8371be9550c4041011a091eb8b3ca1d810"}, + {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80dcd3c938cbcac986c5c92779db8e8ce51a89a849c135172c88ecbdc8c056b7"}, + {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e52a712c38e5fb4fd68e00dc3caf00b60cb65634d50e32281a9d6431b33b4af1"}, + {file = "greenlet-3.0.0-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d5539f6da3418c3dc002739cb2bb8d169056aa66e0c83f6bacae0cd3ac26b423"}, + {file = "greenlet-3.0.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:343675e0da2f3c69d3fb1e894ba0a1acf58f481f3b9372ce1eb465ef93cf6fed"}, + {file = "greenlet-3.0.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:abe1ef3d780de56defd0c77c5ba95e152f4e4c4e12d7e11dd8447d338b85a625"}, + {file = "greenlet-3.0.0-cp37-cp37m-win32.whl", hash = "sha256:e693e759e172fa1c2c90d35dea4acbdd1d609b6936115d3739148d5e4cd11947"}, + {file = "greenlet-3.0.0-cp37-cp37m-win_amd64.whl", hash = "sha256:bdd696947cd695924aecb3870660b7545a19851f93b9d327ef8236bfc49be705"}, + {file = "greenlet-3.0.0-cp37-universal2-macosx_11_0_x86_64.whl", hash = "sha256:cc3e2679ea13b4de79bdc44b25a0c4fcd5e94e21b8f290791744ac42d34a0353"}, + {file = "greenlet-3.0.0-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:63acdc34c9cde42a6534518e32ce55c30f932b473c62c235a466469a710bfbf9"}, + {file = "greenlet-3.0.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a1a6244ff96343e9994e37e5b4839f09a0207d35ef6134dce5c20d260d0302c"}, + {file = "greenlet-3.0.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b822fab253ac0f330ee807e7485769e3ac85d5eef827ca224feaaefa462dc0d0"}, + {file = "greenlet-3.0.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8060b32d8586e912a7b7dac2d15b28dbbd63a174ab32f5bc6d107a1c4143f40b"}, + {file = "greenlet-3.0.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:621fcb346141ae08cb95424ebfc5b014361621b8132c48e538e34c3c93ac7365"}, + {file = "greenlet-3.0.0-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6bb36985f606a7c49916eff74ab99399cdfd09241c375d5a820bb855dfb4af9f"}, + {file = "greenlet-3.0.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:10b5582744abd9858947d163843d323d0b67be9432db50f8bf83031032bc218d"}, + {file = "greenlet-3.0.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f351479a6914fd81a55c8e68963609f792d9b067fb8a60a042c585a621e0de4f"}, + {file = "greenlet-3.0.0-cp38-cp38-win32.whl", hash = "sha256:9de687479faec7db5b198cc365bc34addd256b0028956501f4d4d5e9ca2e240a"}, + {file = "greenlet-3.0.0-cp38-cp38-win_amd64.whl", hash = "sha256:3fd2b18432e7298fcbec3d39e1a0aa91ae9ea1c93356ec089421fabc3651572b"}, + {file = "greenlet-3.0.0-cp38-universal2-macosx_11_0_x86_64.whl", hash = "sha256:3c0d36f5adc6e6100aedbc976d7428a9f7194ea79911aa4bf471f44ee13a9464"}, + {file = "greenlet-3.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4cd83fb8d8e17633ad534d9ac93719ef8937568d730ef07ac3a98cb520fd93e4"}, + {file = "greenlet-3.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a5b2d4cdaf1c71057ff823a19d850ed5c6c2d3686cb71f73ae4d6382aaa7a06"}, + {file = "greenlet-3.0.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e7dcdfad252f2ca83c685b0fa9fba00e4d8f243b73839229d56ee3d9d219314"}, + {file = "greenlet-3.0.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c94e4e924d09b5a3e37b853fe5924a95eac058cb6f6fb437ebb588b7eda79870"}, + {file = "greenlet-3.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad6fb737e46b8bd63156b8f59ba6cdef46fe2b7db0c5804388a2d0519b8ddb99"}, + {file = "greenlet-3.0.0-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d55db1db455c59b46f794346efce896e754b8942817f46a1bada2d29446e305a"}, + {file = "greenlet-3.0.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:56867a3b3cf26dc8a0beecdb4459c59f4c47cdd5424618c08515f682e1d46692"}, + {file = "greenlet-3.0.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9a812224a5fb17a538207e8cf8e86f517df2080c8ee0f8c1ed2bdaccd18f38f4"}, + {file = "greenlet-3.0.0-cp39-cp39-win32.whl", hash = "sha256:0d3f83ffb18dc57243e0151331e3c383b05e5b6c5029ac29f754745c800f8ed9"}, + {file = "greenlet-3.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:831d6f35037cf18ca5e80a737a27d822d87cd922521d18ed3dbc8a6967be50ce"}, + {file = "greenlet-3.0.0-cp39-universal2-macosx_11_0_x86_64.whl", hash = "sha256:a048293392d4e058298710a54dfaefcefdf49d287cd33fb1f7d63d55426e4355"}, + {file = "greenlet-3.0.0.tar.gz", hash = "sha256:19834e3f91f485442adc1ee440171ec5d9a4840a1f7bd5ed97833544719ce10b"}, +] + +[package.extras] +docs = ["Sphinx"] +test = ["objgraph", "psutil"] + +[[package]] +name = "grpcio" +version = "1.59.0" +description = "HTTP/2-based RPC framework" +optional = false +python-versions = ">=3.7" +files = [ + {file = "grpcio-1.59.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:225e5fa61c35eeaebb4e7491cd2d768cd8eb6ed00f2664fa83a58f29418b39fd"}, + {file = "grpcio-1.59.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:b95ec8ecc4f703f5caaa8d96e93e40c7f589bad299a2617bdb8becbcce525539"}, + {file = "grpcio-1.59.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:1a839ba86764cc48226f50b924216000c79779c563a301586a107bda9cbe9dcf"}, + {file = "grpcio-1.59.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f6cfe44a5d7c7d5f1017a7da1c8160304091ca5dc64a0f85bca0d63008c3137a"}, + {file = "grpcio-1.59.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0fcf53df684fcc0154b1e61f6b4a8c4cf5f49d98a63511e3f30966feff39cd0"}, + {file = "grpcio-1.59.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa66cac32861500f280bb60fe7d5b3e22d68c51e18e65367e38f8669b78cea3b"}, + {file = "grpcio-1.59.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8cd2d38c2d52f607d75a74143113174c36d8a416d9472415eab834f837580cf7"}, + {file = "grpcio-1.59.0-cp310-cp310-win32.whl", hash = "sha256:228b91ce454876d7eed74041aff24a8f04c0306b7250a2da99d35dd25e2a1211"}, + {file = "grpcio-1.59.0-cp310-cp310-win_amd64.whl", hash = "sha256:ca87ee6183421b7cea3544190061f6c1c3dfc959e0b57a5286b108511fd34ff4"}, + {file = "grpcio-1.59.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:c173a87d622ea074ce79be33b952f0b424fa92182063c3bda8625c11d3585d09"}, + {file = "grpcio-1.59.0-cp311-cp311-macosx_10_10_universal2.whl", hash = "sha256:ec78aebb9b6771d6a1de7b6ca2f779a2f6113b9108d486e904bde323d51f5589"}, + {file = "grpcio-1.59.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:0b84445fa94d59e6806c10266b977f92fa997db3585f125d6b751af02ff8b9fe"}, + {file = "grpcio-1.59.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c251d22de8f9f5cca9ee47e4bade7c5c853e6e40743f47f5cc02288ee7a87252"}, + {file = "grpcio-1.59.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:956f0b7cb465a65de1bd90d5a7475b4dc55089b25042fe0f6c870707e9aabb1d"}, + {file = "grpcio-1.59.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:38da5310ef84e16d638ad89550b5b9424df508fd5c7b968b90eb9629ca9be4b9"}, + {file = "grpcio-1.59.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:63982150a7d598281fa1d7ffead6096e543ff8be189d3235dd2b5604f2c553e5"}, + {file = "grpcio-1.59.0-cp311-cp311-win32.whl", hash = "sha256:50eff97397e29eeee5df106ea1afce3ee134d567aa2c8e04fabab05c79d791a7"}, + {file = "grpcio-1.59.0-cp311-cp311-win_amd64.whl", hash = "sha256:15f03bd714f987d48ae57fe092cf81960ae36da4e520e729392a59a75cda4f29"}, + {file = "grpcio-1.59.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:f1feb034321ae2f718172d86b8276c03599846dc7bb1792ae370af02718f91c5"}, + {file = "grpcio-1.59.0-cp312-cp312-macosx_10_10_universal2.whl", hash = "sha256:d09bd2a4e9f5a44d36bb8684f284835c14d30c22d8ec92ce796655af12163588"}, + {file = "grpcio-1.59.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:2f120d27051e4c59db2f267b71b833796770d3ea36ca712befa8c5fff5da6ebd"}, + {file = "grpcio-1.59.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba0ca727a173ee093f49ead932c051af463258b4b493b956a2c099696f38aa66"}, + {file = "grpcio-1.59.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5711c51e204dc52065f4a3327dca46e69636a0b76d3e98c2c28c4ccef9b04c52"}, + {file = "grpcio-1.59.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:d74f7d2d7c242a6af9d4d069552ec3669965b74fed6b92946e0e13b4168374f9"}, + {file = "grpcio-1.59.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3859917de234a0a2a52132489c4425a73669de9c458b01c9a83687f1f31b5b10"}, + {file = "grpcio-1.59.0-cp312-cp312-win32.whl", hash = "sha256:de2599985b7c1b4ce7526e15c969d66b93687571aa008ca749d6235d056b7205"}, + {file = "grpcio-1.59.0-cp312-cp312-win_amd64.whl", hash = "sha256:598f3530231cf10ae03f4ab92d48c3be1fee0c52213a1d5958df1a90957e6a88"}, + {file = "grpcio-1.59.0-cp37-cp37m-linux_armv7l.whl", hash = "sha256:b34c7a4c31841a2ea27246a05eed8a80c319bfc0d3e644412ec9ce437105ff6c"}, + {file = "grpcio-1.59.0-cp37-cp37m-macosx_10_10_universal2.whl", hash = "sha256:c4dfdb49f4997dc664f30116af2d34751b91aa031f8c8ee251ce4dcfc11277b0"}, + {file = "grpcio-1.59.0-cp37-cp37m-manylinux_2_17_aarch64.whl", hash = "sha256:61bc72a00ecc2b79d9695220b4d02e8ba53b702b42411397e831c9b0589f08a3"}, + {file = "grpcio-1.59.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f367e4b524cb319e50acbdea57bb63c3b717c5d561974ace0b065a648bb3bad3"}, + {file = "grpcio-1.59.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:849c47ef42424c86af069a9c5e691a765e304079755d5c29eff511263fad9c2a"}, + {file = "grpcio-1.59.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c0488c2b0528e6072010182075615620071371701733c63ab5be49140ed8f7f0"}, + {file = "grpcio-1.59.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:611d9aa0017fa386809bddcb76653a5ab18c264faf4d9ff35cb904d44745f575"}, + {file = "grpcio-1.59.0-cp37-cp37m-win_amd64.whl", hash = "sha256:e5378785dce2b91eb2e5b857ec7602305a3b5cf78311767146464bfa365fc897"}, + {file = "grpcio-1.59.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:fe976910de34d21057bcb53b2c5e667843588b48bf11339da2a75f5c4c5b4055"}, + {file = "grpcio-1.59.0-cp38-cp38-macosx_10_10_universal2.whl", hash = "sha256:c041a91712bf23b2a910f61e16565a05869e505dc5a5c025d429ca6de5de842c"}, + {file = "grpcio-1.59.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:0ae444221b2c16d8211b55326f8ba173ba8f8c76349bfc1768198ba592b58f74"}, + {file = "grpcio-1.59.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ceb1e68135788c3fce2211de86a7597591f0b9a0d2bb80e8401fd1d915991bac"}, + {file = "grpcio-1.59.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c4b1cc3a9dc1924d2eb26eec8792fedd4b3fcd10111e26c1d551f2e4eda79ce"}, + {file = "grpcio-1.59.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:871371ce0c0055d3db2a86fdebd1e1d647cf21a8912acc30052660297a5a6901"}, + {file = "grpcio-1.59.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:93e9cb546e610829e462147ce724a9cb108e61647a3454500438a6deef610be1"}, + {file = "grpcio-1.59.0-cp38-cp38-win32.whl", hash = "sha256:f21917aa50b40842b51aff2de6ebf9e2f6af3fe0971c31960ad6a3a2b24988f4"}, + {file = "grpcio-1.59.0-cp38-cp38-win_amd64.whl", hash = "sha256:14890da86a0c0e9dc1ea8e90101d7a3e0e7b1e71f4487fab36e2bfd2ecadd13c"}, + {file = "grpcio-1.59.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:34341d9e81a4b669a5f5dca3b2a760b6798e95cdda2b173e65d29d0b16692857"}, + {file = "grpcio-1.59.0-cp39-cp39-macosx_10_10_universal2.whl", hash = "sha256:986de4aa75646e963466b386a8c5055c8b23a26a36a6c99052385d6fe8aaf180"}, + {file = "grpcio-1.59.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:aca8a24fef80bef73f83eb8153f5f5a0134d9539b4c436a716256b311dda90a6"}, + {file = "grpcio-1.59.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:936b2e04663660c600d5173bc2cc84e15adbad9c8f71946eb833b0afc205b996"}, + {file = "grpcio-1.59.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc8bf2e7bc725e76c0c11e474634a08c8f24bcf7426c0c6d60c8f9c6e70e4d4a"}, + {file = "grpcio-1.59.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:81d86a096ccd24a57fa5772a544c9e566218bc4de49e8c909882dae9d73392df"}, + {file = "grpcio-1.59.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2ea95cd6abbe20138b8df965b4a8674ec312aaef3147c0f46a0bac661f09e8d0"}, + {file = "grpcio-1.59.0-cp39-cp39-win32.whl", hash = "sha256:3b8ff795d35a93d1df6531f31c1502673d1cebeeba93d0f9bd74617381507e3f"}, + {file = "grpcio-1.59.0-cp39-cp39-win_amd64.whl", hash = "sha256:38823bd088c69f59966f594d087d3a929d1ef310506bee9e3648317660d65b81"}, + {file = "grpcio-1.59.0.tar.gz", hash = "sha256:acf70a63cf09dd494000007b798aff88a436e1c03b394995ce450be437b8e54f"}, +] + +[package.extras] +protobuf = ["grpcio-tools (>=1.59.0)"] + [[package]] name = "gtts" version = "2.4.0" @@ -1598,7 +1990,7 @@ hyperframe = ">=6.0,<7" name = "helicone" version = "1.0.12" description = "A Python wrapper for the OpenAI API that logs all requests to Helicone." -optional = false +optional = true python-versions = ">=3.8.1" files = [ {file = "helicone-1.0.12-py3-none-any.whl", hash = "sha256:5971c2ca310de925f5c6746e0f90f28699b0666e8603fd3f2dfa21acf8712293"}, @@ -1656,6 +2048,54 @@ files = [ [package.dependencies] pyparsing = {version = ">=2.4.2,<3.0.0 || >3.0.0,<3.0.1 || >3.0.1,<3.0.2 || >3.0.2,<3.0.3 || >3.0.3,<4", markers = "python_version > \"3.0\""} +[[package]] +name = "httptools" +version = "0.6.1" +description = "A collection of framework independent HTTP protocol utils." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "httptools-0.6.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d2f6c3c4cb1948d912538217838f6e9960bc4a521d7f9b323b3da579cd14532f"}, + {file = "httptools-0.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:00d5d4b68a717765b1fabfd9ca755bd12bf44105eeb806c03d1962acd9b8e563"}, + {file = "httptools-0.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:639dc4f381a870c9ec860ce5c45921db50205a37cc3334e756269736ff0aac58"}, + {file = "httptools-0.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e57997ac7fb7ee43140cc03664de5f268813a481dff6245e0075925adc6aa185"}, + {file = "httptools-0.6.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0ac5a0ae3d9f4fe004318d64b8a854edd85ab76cffbf7ef5e32920faef62f142"}, + {file = "httptools-0.6.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3f30d3ce413088a98b9db71c60a6ada2001a08945cb42dd65a9a9fe228627658"}, + {file = "httptools-0.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:1ed99a373e327f0107cb513b61820102ee4f3675656a37a50083eda05dc9541b"}, + {file = "httptools-0.6.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7a7ea483c1a4485c71cb5f38be9db078f8b0e8b4c4dc0210f531cdd2ddac1ef1"}, + {file = "httptools-0.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:85ed077c995e942b6f1b07583e4eb0a8d324d418954fc6af913d36db7c05a5a0"}, + {file = "httptools-0.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b0bb634338334385351a1600a73e558ce619af390c2b38386206ac6a27fecfc"}, + {file = "httptools-0.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d9ceb2c957320def533671fc9c715a80c47025139c8d1f3797477decbc6edd2"}, + {file = "httptools-0.6.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4f0f8271c0a4db459f9dc807acd0eadd4839934a4b9b892f6f160e94da309837"}, + {file = "httptools-0.6.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6a4f5ccead6d18ec072ac0b84420e95d27c1cdf5c9f1bc8fbd8daf86bd94f43d"}, + {file = "httptools-0.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:5cceac09f164bcba55c0500a18fe3c47df29b62353198e4f37bbcc5d591172c3"}, + {file = "httptools-0.6.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:75c8022dca7935cba14741a42744eee13ba05db00b27a4b940f0d646bd4d56d0"}, + {file = "httptools-0.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:48ed8129cd9a0d62cf4d1575fcf90fb37e3ff7d5654d3a5814eb3d55f36478c2"}, + {file = "httptools-0.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f58e335a1402fb5a650e271e8c2d03cfa7cea46ae124649346d17bd30d59c90"}, + {file = "httptools-0.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93ad80d7176aa5788902f207a4e79885f0576134695dfb0fefc15b7a4648d503"}, + {file = "httptools-0.6.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9bb68d3a085c2174c2477eb3ffe84ae9fb4fde8792edb7bcd09a1d8467e30a84"}, + {file = "httptools-0.6.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b512aa728bc02354e5ac086ce76c3ce635b62f5fbc32ab7082b5e582d27867bb"}, + {file = "httptools-0.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:97662ce7fb196c785344d00d638fc9ad69e18ee4bfb4000b35a52efe5adcc949"}, + {file = "httptools-0.6.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8e216a038d2d52ea13fdd9b9c9c7459fb80d78302b257828285eca1c773b99b3"}, + {file = "httptools-0.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3e802e0b2378ade99cd666b5bffb8b2a7cc8f3d28988685dc300469ea8dd86cb"}, + {file = "httptools-0.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4bd3e488b447046e386a30f07af05f9b38d3d368d1f7b4d8f7e10af85393db97"}, + {file = "httptools-0.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe467eb086d80217b7584e61313ebadc8d187a4d95bb62031b7bab4b205c3ba3"}, + {file = "httptools-0.6.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3c3b214ce057c54675b00108ac42bacf2ab8f85c58e3f324a4e963bbc46424f4"}, + {file = "httptools-0.6.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8ae5b97f690badd2ca27cbf668494ee1b6d34cf1c464271ef7bfa9ca6b83ffaf"}, + {file = "httptools-0.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:405784577ba6540fa7d6ff49e37daf104e04f4b4ff2d1ac0469eaa6a20fde084"}, + {file = "httptools-0.6.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:95fb92dd3649f9cb139e9c56604cc2d7c7bf0fc2e7c8d7fbd58f96e35eddd2a3"}, + {file = "httptools-0.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dcbab042cc3ef272adc11220517278519adf8f53fd3056d0e68f0a6f891ba94e"}, + {file = "httptools-0.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cf2372e98406efb42e93bfe10f2948e467edfd792b015f1b4ecd897903d3e8d"}, + {file = "httptools-0.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:678fcbae74477a17d103b7cae78b74800d795d702083867ce160fc202104d0da"}, + {file = "httptools-0.6.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e0b281cf5a125c35f7f6722b65d8542d2e57331be573e9e88bc8b0115c4a7a81"}, + {file = "httptools-0.6.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:95658c342529bba4e1d3d2b1a874db16c7cca435e8827422154c9da76ac4e13a"}, + {file = "httptools-0.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:7ebaec1bf683e4bf5e9fbb49b8cc36da482033596a415b3e4ebab5a4c0d7ec5e"}, + {file = "httptools-0.6.1.tar.gz", hash = "sha256:c6e26c30455600b95d94b1b836085138e82f177351454ee841c148f93a9bad5a"}, +] + +[package.extras] +test = ["Cython (>=0.29.24,<0.30.0)"] + [[package]] name = "httpx" version = "0.24.1" @@ -1683,6 +2123,77 @@ cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] +[[package]] +name = "huggingface-hub" +version = "0.17.3" +description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "huggingface_hub-0.17.3-py3-none-any.whl", hash = "sha256:545eb3665f6ac587add946e73984148f2ea5c7877eac2e845549730570c1933a"}, + {file = "huggingface_hub-0.17.3.tar.gz", hash = "sha256:40439632b211311f788964602bf8b0d9d6b7a2314fba4e8d67b2ce3ecea0e3fd"}, +] + +[package.dependencies] +filelock = "*" +fsspec = "*" +packaging = ">=20.9" +pyyaml = ">=5.1" +requests = "*" +tqdm = ">=4.42.1" +typing-extensions = ">=3.7.4.3" + +[package.extras] +all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (==23.7)", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (<2.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"] +cli = ["InquirerPy (==0.3.4)"] +dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (==23.7)", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (<2.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"] +docs = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (==23.7)", "gradio", "hf-doc-builder", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (<2.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)", "watchdog"] +fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"] +inference = ["aiohttp", "pydantic (<2.0)"] +quality = ["black (==23.7)", "mypy (==1.5.1)", "ruff (>=0.0.241)"] +tensorflow = ["graphviz", "pydot", "tensorflow"] +testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "numpy", "pydantic (<2.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"] +torch = ["torch"] +typing = ["pydantic (<2.0)", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"] + +[[package]] +name = "humanfriendly" +version = "10.0" +description = "Human friendly output for text interfaces using Python" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "humanfriendly-10.0-py2.py3-none-any.whl", hash = "sha256:1697e1a8a8f550fd43c2865cd84542fc175a61dcb779b6fee18cf6b6ccba1477"}, + {file = "humanfriendly-10.0.tar.gz", hash = "sha256:6b0b831ce8f15f7300721aa49829fc4e83921a9a301cc7f606be6686a2288ddc"}, +] + +[package.dependencies] +pyreadline3 = {version = "*", markers = "sys_platform == \"win32\" and python_version >= \"3.8\""} + +[[package]] +name = "hypercorn" +version = "0.14.4" +description = "A ASGI Server based on Hyper libraries and inspired by Gunicorn" +optional = false +python-versions = ">=3.7" +files = [ + {file = "hypercorn-0.14.4-py3-none-any.whl", hash = "sha256:f956200dbf8677684e6e976219ffa6691d6cf795281184b41dbb0b135ab37b8d"}, + {file = "hypercorn-0.14.4.tar.gz", hash = "sha256:3fa504efc46a271640023c9b88c3184fd64993f47a282e8ae1a13ccb285c2f67"}, +] + +[package.dependencies] +h11 = "*" +h2 = ">=3.1.0" +priority = "*" +tomli = {version = "*", markers = "python_version < \"3.11\""} +wsproto = ">=0.14.0" + +[package.extras] +docs = ["pydata_sphinx_theme"] +h3 = ["aioquic (>=0.9.0,<1.0)"] +trio = ["exceptiongroup (>=1.1.0)", "trio (>=0.22.0)"] +uvloop = ["uvloop"] + [[package]] name = "hyperframe" version = "6.0.1" @@ -1719,6 +2230,40 @@ files = [ {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, ] +[[package]] +name = "importlib-metadata" +version = "6.8.0" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "importlib_metadata-6.8.0-py3-none-any.whl", hash = "sha256:3ebb78df84a805d7698245025b975d9d67053cd94c79245ba4b3eb694abe68bb"}, + {file = "importlib_metadata-6.8.0.tar.gz", hash = "sha256:dbace7892d8c0c4ac1ad096662232f831d4e64f4c4545bd53016a3e9d4654743"}, +] + +[package.dependencies] +zipp = ">=0.5" + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +perf = ["ipython"] +testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] + +[[package]] +name = "importlib-resources" +version = "6.1.0" +description = "Read resources from Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "importlib_resources-6.1.0-py3-none-any.whl", hash = "sha256:aa50258bbfa56d4e33fbd8aa3ef48ded10d1735f11532b8df95388cc6bdb7e83"}, + {file = "importlib_resources-6.1.0.tar.gz", hash = "sha256:9d48dcccc213325e810fd723e7fbb45ccb39f6cf5c31f00cf2b965f5f10f3cb9"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-ruff", "zipp (>=3.17)"] + [[package]] name = "inflection" version = "0.5.1" @@ -1745,7 +2290,7 @@ files = [ name = "ipython" version = "8.16.1" description = "IPython: Productive Interactive Computing" -optional = false +optional = true python-versions = ">=3.9" files = [ {file = "ipython-8.16.1-py3-none-any.whl", hash = "sha256:0852469d4d579d9cd613c220af7bf0c9cc251813e12be647cb9d463939db9b1e"}, @@ -1801,7 +2346,7 @@ requirements-deprecated-finder = ["pip-api", "pipreqs"] name = "jedi" version = "0.19.1" description = "An autocompletion tool for Python that can be used for text editors." -optional = false +optional = true python-versions = ">=3.6" files = [ {file = "jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0"}, @@ -1837,7 +2382,7 @@ i18n = ["Babel (>=2.7)"] name = "jsonpickle" version = "3.0.2" description = "Python library for serializing any arbitrary object graph into JSON" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "jsonpickle-3.0.2-py3-none-any.whl", hash = "sha256:4a8442d97ca3f77978afa58068768dba7bff2dbabe79a9647bc3cdafd4ef019f"}, @@ -1888,7 +2433,7 @@ referencing = ">=0.28.0" name = "kiwisolver" version = "1.4.5" description = "A fast implementation of the Cassowary constraint solver" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:05703cf211d585109fcd72207a31bb170a0f22144d68298dc5e61b3c946518af"}, @@ -2011,11 +2556,32 @@ files = [ [package.extras] data = ["language-data (>=1.1,<2.0)"] +[[package]] +name = "litellm" +version = "0.1.824" +description = "Library to easily interface with LLM API providers" +optional = false +python-versions = ">=3.8,<4.0" +files = [ + {file = "litellm-0.1.824-py3-none-any.whl", hash = "sha256:3aed4d3d849b8e518aaafc62758a548111e4502bad7fbcd69581bfeb10bc016a"}, + {file = "litellm-0.1.824.tar.gz", hash = "sha256:8c4dd49d1d996a6b953f39271608139e96ac25c43b91dd32188367b7d233f584"}, +] + +[package.dependencies] +appdirs = ">=1.4.4,<2.0.0" +click = "*" +importlib-metadata = ">=6.8.0" +jinja2 = ">=3.1.2,<4.0.0" +openai = ">=0.27.0,<0.29.0" +python-dotenv = ">=0.2.0" +tiktoken = ">=0.4.0" +tokenizers = "*" + [[package]] name = "lockfile" version = "0.12.2" description = "Platform-independent file locking module" -optional = false +optional = true python-versions = "*" files = [ {file = "lockfile-0.12.2-py2.py3-none-any.whl", hash = "sha256:6c3cb24f344923d30b2785d5ad75182c8ea7ac1b6171b08657258ec7429d50fa"}, @@ -2235,7 +2801,7 @@ files = [ name = "matplotlib" version = "3.8.0" description = "Python plotting package" -optional = false +optional = true python-versions = ">=3.9" files = [ {file = "matplotlib-3.8.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:c4940bad88a932ddc69734274f6fb047207e008389489f2b6f77d9ca485f0e7a"}, @@ -2284,7 +2850,7 @@ setuptools_scm = ">=7" name = "matplotlib-inline" version = "0.1.6" description = "Inline Matplotlib backend for Jupyter" -optional = false +optional = true python-versions = ">=3.5" files = [ {file = "matplotlib-inline-0.1.6.tar.gz", hash = "sha256:f887e5f10ba98e8d2b150ddcf4702c1e5f8b3a20005eb0f74bfdbd360ee6f304"}, @@ -2305,6 +2871,34 @@ files = [ {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, ] +[[package]] +name = "monotonic" +version = "1.6" +description = "An implementation of time.monotonic() for Python 2 & < 3.3" +optional = false +python-versions = "*" +files = [ + {file = "monotonic-1.6-py2.py3-none-any.whl", hash = "sha256:68687e19a14f11f26d140dd5c86f3dba4bf5df58003000ed467e0e2a69bca96c"}, + {file = "monotonic-1.6.tar.gz", hash = "sha256:3a55207bcfed53ddd5c5bae174524062935efed17792e9de2ad0205ce9ad63f7"}, +] + +[[package]] +name = "mpmath" +version = "1.3.0" +description = "Python library for arbitrary-precision floating-point arithmetic" +optional = false +python-versions = "*" +files = [ + {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, + {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, +] + +[package.extras] +develop = ["codecov", "pycodestyle", "pytest (>=4.6)", "pytest-cov", "wheel"] +docs = ["sphinx"] +gmpy = ["gmpy2 (>=2.1.0a4)"] +tests = ["pytest (>=4.6)"] + [[package]] name = "multidict" version = "6.0.4" @@ -2491,7 +3085,7 @@ files = [ name = "networkx" version = "3.1" description = "Python package for creating and manipulating graphs and networks" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "networkx-3.1-py3-none-any.whl", hash = "sha256:4f33f68cb2afcf86f28a45f43efc27a9386b535d567d2127f8f61d51dec58d36"}, @@ -2553,6 +3147,47 @@ files = [ {file = "numpy-1.25.2.tar.gz", hash = "sha256:fd608e19c8d7c55021dffd43bfe5492fab8cc105cc8986f813f8c3c048b38760"}, ] +[[package]] +name = "onnxruntime" +version = "1.16.1" +description = "ONNX Runtime is a runtime accelerator for Machine Learning models" +optional = false +python-versions = "*" +files = [ + {file = "onnxruntime-1.16.1-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:28b2c7f444b4119950b69370801cd66067f403d19cbaf2a444735d7c269cce4a"}, + {file = "onnxruntime-1.16.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c24e04f33e7899f6aebb03ed51e51d346c1f906b05c5569d58ac9a12d38a2f58"}, + {file = "onnxruntime-1.16.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fa93b166f2d97063dc9f33c5118c5729a4a5dd5617296b6dbef42f9047b3e81"}, + {file = "onnxruntime-1.16.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:042dd9201b3016ee18f8f8bc4609baf11ff34ca1ff489c0a46bcd30919bf883d"}, + {file = "onnxruntime-1.16.1-cp310-cp310-win32.whl", hash = "sha256:c20aa0591f305012f1b21aad607ed96917c86ae7aede4a4dd95824b3d124ceb7"}, + {file = "onnxruntime-1.16.1-cp310-cp310-win_amd64.whl", hash = "sha256:5581873e578917bea76d6434ee7337e28195d03488dcf72d161d08e9398c6249"}, + {file = "onnxruntime-1.16.1-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:ef8c0c8abf5f309aa1caf35941380839dc5f7a2fa53da533be4a3f254993f120"}, + {file = "onnxruntime-1.16.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e680380bea35a137cbc3efd67a17486e96972901192ad3026ee79c8d8fe264f7"}, + {file = "onnxruntime-1.16.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e62cc38ce1a669013d0a596d984762dc9c67c56f60ecfeee0d5ad36da5863f6"}, + {file = "onnxruntime-1.16.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:025c7a4d57bd2e63b8a0f84ad3df53e419e3df1cc72d63184f2aae807b17c13c"}, + {file = "onnxruntime-1.16.1-cp311-cp311-win32.whl", hash = "sha256:9ad074057fa8d028df248b5668514088cb0937b6ac5954073b7fb9b2891ffc8c"}, + {file = "onnxruntime-1.16.1-cp311-cp311-win_amd64.whl", hash = "sha256:d5e43a3478bffc01f817ecf826de7b25a2ca1bca8547d70888594ab80a77ad24"}, + {file = "onnxruntime-1.16.1-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:3aef4d70b0930e29a8943eab248cd1565664458d3a62b2276bd11181f28fd0a3"}, + {file = "onnxruntime-1.16.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:55a7b843a57c8ca0c8ff169428137958146081d5d76f1a6dd444c4ffcd37c3c2"}, + {file = "onnxruntime-1.16.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c631af1941bf3b5f7d063d24c04aacce8cff0794e157c497e315e89ac5ad7b"}, + {file = "onnxruntime-1.16.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5671f296c3d5c233f601e97a10ab5a1dd8e65ba35c7b7b0c253332aba9dff330"}, + {file = "onnxruntime-1.16.1-cp38-cp38-win32.whl", hash = "sha256:eb3802305023dd05e16848d4e22b41f8147247894309c0c27122aaa08793b3d2"}, + {file = "onnxruntime-1.16.1-cp38-cp38-win_amd64.whl", hash = "sha256:fecfb07443d09d271b1487f401fbdf1ba0c829af6fd4fe8f6af25f71190e7eb9"}, + {file = "onnxruntime-1.16.1-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:de3e12094234db6545c67adbf801874b4eb91e9f299bda34c62967ef0050960f"}, + {file = "onnxruntime-1.16.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ff723c2a5621b5e7103f3be84d5aae1e03a20621e72219dddceae81f65f240af"}, + {file = "onnxruntime-1.16.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14a7fb3073aaf6b462e3d7fb433320f7700558a8892e5021780522dc4574292a"}, + {file = "onnxruntime-1.16.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:963159f1f699b0454cd72fcef3276c8a1aab9389a7b301bcd8e320fb9d9e8597"}, + {file = "onnxruntime-1.16.1-cp39-cp39-win32.whl", hash = "sha256:85771adb75190db9364b25ddec353ebf07635b83eb94b64ed014f1f6d57a3857"}, + {file = "onnxruntime-1.16.1-cp39-cp39-win_amd64.whl", hash = "sha256:d32d2b30799c1f950123c60ae8390818381fd5f88bdf3627eeca10071c155dc5"}, +] + +[package.dependencies] +coloredlogs = "*" +flatbuffers = "*" +numpy = ">=1.21.6" +packaging = "*" +protobuf = "*" +sympy = "*" + [[package]] name = "openai" version = "0.27.10" @@ -2673,6 +3308,17 @@ files = [ [package.dependencies] attrs = ">=19.2.0" +[[package]] +name = "overrides" +version = "7.4.0" +description = "A decorator to automatically detect mismatch when overriding a method." +optional = false +python-versions = ">=3.6" +files = [ + {file = "overrides-7.4.0-py3-none-any.whl", hash = "sha256:3ad24583f86d6d7a49049695efe9933e67ba62f0c7625d53c59fa832ce4b8b7d"}, + {file = "overrides-7.4.0.tar.gz", hash = "sha256:9502a3cca51f4fac40b5feca985b6703a5c1f6ad815588a7ca9e285b9dca6757"}, +] + [[package]] name = "packaging" version = "23.2" @@ -2688,7 +3334,7 @@ files = [ name = "pandas" version = "2.1.0" description = "Powerful data structures for data analysis, time series, and statistics" -optional = false +optional = true python-versions = ">=3.9" files = [ {file = "pandas-2.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:40dd20439ff94f1b2ed55b393ecee9cb6f3b08104c2c40b0cb7186a2f0046242"}, @@ -2746,7 +3392,7 @@ xml = ["lxml (>=4.8.0)"] name = "pandas" version = "2.1.1" description = "Powerful data structures for data analysis, time series, and statistics" -optional = false +optional = true python-versions = ">=3.9" files = [ {file = "pandas-2.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:58d997dbee0d4b64f3cb881a24f918b5f25dd64ddf31f467bb9b67ae4c63a1e4"}, @@ -2813,7 +3459,7 @@ xml = ["lxml (>=4.8.0)"] name = "parso" version = "0.8.3" description = "A Python Parser" -optional = false +optional = true python-versions = ">=3.6" files = [ {file = "parso-0.8.3-py2.py3-none-any.whl", hash = "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75"}, @@ -2861,7 +3507,7 @@ test = ["mock", "pytest", "pytest-coverage", "typer-cli"] name = "pexpect" version = "4.8.0" description = "Pexpect allows easy control of interactive console applications." -optional = false +optional = true python-versions = "*" files = [ {file = "pexpect-4.8.0-py2.py3-none-any.whl", hash = "sha256:0b48a55dcb3c05f3329815901ea4fc1537514d6ba867a152b581d69ae3710937"}, @@ -2875,7 +3521,7 @@ ptyprocess = ">=0.5" name = "pickleshare" version = "0.7.5" description = "Tiny 'shelve'-like database with concurrency support" -optional = false +optional = true python-versions = "*" files = [ {file = "pickleshare-0.7.5-py2.py3-none-any.whl", hash = "sha256:9649af414d74d4df115d5d718f82acb59c9d418196b7b4290ed47a12ce62df56"}, @@ -3014,6 +3660,29 @@ files = [ dev = ["pre-commit", "tox"] testing = ["pytest", "pytest-benchmark"] +[[package]] +name = "posthog" +version = "3.0.2" +description = "Integrate PostHog into any python application." +optional = false +python-versions = "*" +files = [ + {file = "posthog-3.0.2-py2.py3-none-any.whl", hash = "sha256:a8c0af6f2401fbe50f90e68c4143d0824b54e872de036b1c2f23b5abb39d88ce"}, + {file = "posthog-3.0.2.tar.gz", hash = "sha256:701fba6e446a4de687c6e861b587e7b7741955ad624bf34fe013c06a0fec6fb3"}, +] + +[package.dependencies] +backoff = ">=1.10.0" +monotonic = ">=1.5" +python-dateutil = ">2.1" +requests = ">=2.7,<3.0" +six = ">=1.5" + +[package.extras] +dev = ["black", "flake8", "flake8-print", "isort", "pre-commit"] +sentry = ["django", "sentry-sdk"] +test = ["coverage", "flake8", "freezegun (==0.3.15)", "mock (>=2.0.0)", "pylint", "pytest"] + [[package]] name = "pre-commit" version = "3.5.0" @@ -3078,6 +3747,17 @@ files = [ cymem = ">=2.0.2,<2.1.0" murmurhash = ">=0.28.0,<1.1.0" +[[package]] +name = "priority" +version = "2.0.0" +description = "A pure-Python implementation of the HTTP/2 priority tree" +optional = false +python-versions = ">=3.6.1" +files = [ + {file = "priority-2.0.0-py3-none-any.whl", hash = "sha256:6f8eefce5f3ad59baf2c080a664037bb4725cd0a790d53d59ab4059288faf6aa"}, + {file = "priority-2.0.0.tar.gz", hash = "sha256:c965d54f1b8d0d0b19479db3924c7c36cf672dbf2aec92d43fbdaf4492ba18c0"}, +] + [[package]] name = "prompt-toolkit" version = "3.0.39" @@ -3118,7 +3798,7 @@ files = [ name = "psutil" version = "5.9.6" description = "Cross-platform lib for process and system monitoring in Python." -optional = false +optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" files = [ {file = "psutil-5.9.6-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:fb8a697f11b0f5994550555fcfe3e69799e5b060c8ecf9e2f75c69302cc35c0d"}, @@ -3146,18 +3826,65 @@ test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] name = "ptyprocess" version = "0.7.0" description = "Run a subprocess in a pseudo terminal" -optional = false +optional = true python-versions = "*" files = [ {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, ] +[[package]] +name = "pulsar-client" +version = "3.3.0" +description = "Apache Pulsar Python client library" +optional = false +python-versions = "*" +files = [ + {file = "pulsar_client-3.3.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:c31afd3e67a044ff93177df89e08febf214cc965e95ede097d9fe8755af00e01"}, + {file = "pulsar_client-3.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f66982284571674b215324cc26b5c2f7c56c7043113c47a7084cb70d67a8afb"}, + {file = "pulsar_client-3.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7fe50a06f81c48a75a9b95c27a6446260039adca71d9face273740de96b2efca"}, + {file = "pulsar_client-3.3.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d4c46a4b96a6e9919cfe220156d69a2ede8053d9ea1add4ada108abcf2ba9775"}, + {file = "pulsar_client-3.3.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:1e4b5d44b992c9b036286b483f3588c10b89c6047fb59d80c7474445997f4e10"}, + {file = "pulsar_client-3.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:497a59ac6b650835a3b2c502f53477e5c98e5226998ca3f17c0b0a3eb4d67d08"}, + {file = "pulsar_client-3.3.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:386e78ff52058d881780bae1f6e84ac9434ae0b01a8581755ca8cc0dc844a332"}, + {file = "pulsar_client-3.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3e4ecb780df58bcfd3918590bd3ff31ed79bccfbef3a1a60370642eb1e14a9d2"}, + {file = "pulsar_client-3.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ce1e215c252f22a6f26ca5e9076826041a04d88dc213b92c86b524be2774a64"}, + {file = "pulsar_client-3.3.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:88b0fd5be73a4103986b9dbe3a66468cf8829371e34af87ff8f216e3980f4cbe"}, + {file = "pulsar_client-3.3.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:33656450536d83eed1563ff09692c2c415fb199d88e9ed97d701ca446a119e1b"}, + {file = "pulsar_client-3.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:ce33de700b06583df8777e139d68cb4b4b3d0a2eac168d74278d8935f357fb10"}, + {file = "pulsar_client-3.3.0-cp37-cp37m-macosx_10_15_universal2.whl", hash = "sha256:7b5dd25cf778d6c980d36c53081e843ea272afe7af4f0ad6394ae9513f94641b"}, + {file = "pulsar_client-3.3.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33c4e6865fda62a2e460f823dce4d49ac2973a4459b8ff99eda5fdd6aaaebf46"}, + {file = "pulsar_client-3.3.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1810ddc623c8de2675d17405ce47057a9a2b92298e708ce4d9564847f5ad904"}, + {file = "pulsar_client-3.3.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:8259c3b856eb6deaa1f93dce893ab18d99d36d102da5612c8e97a4fb41b70ab1"}, + {file = "pulsar_client-3.3.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:5e7a48b2e505cde758fd51a601b5da0671fa98c9baee38362aaaa3ab2b930c28"}, + {file = "pulsar_client-3.3.0-cp37-cp37m-win_amd64.whl", hash = "sha256:ede264385d47257b2f2b08ecde9181ec5338bea5639cc543d1856f01736778d2"}, + {file = "pulsar_client-3.3.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:0f64c62746ccd5b65a0c505f5f40b9af1f147eb1fa2d8f9c90cd5c8b92dd8597"}, + {file = "pulsar_client-3.3.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b84a20c9012e3c4ef1b7085acd7467197118c090b378dec27d773fb79d91556"}, + {file = "pulsar_client-3.3.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4e15fa696e275ccb66d0791fdc19c4dea0420d81349c8055e485b134125e14f"}, + {file = "pulsar_client-3.3.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:72cbb1bdcba2dd1265296b5ba65331622ee89c16db75edaad46dd7b90c6dd447"}, + {file = "pulsar_client-3.3.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d54dd12955bf587dd46d9184444af5e853d9da2a14bbfb739ed2c7c3b78ce280"}, + {file = "pulsar_client-3.3.0-cp38-cp38-win_amd64.whl", hash = "sha256:43f98afdf0334b2b957a4d96f97a1fe8a7f7fd1e2631d40c3f00b4162f396485"}, + {file = "pulsar_client-3.3.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:efe7c1e6a96daccc522c3567b6847ffa54c13e0f510d9a427b4aeff9fbebe54b"}, + {file = "pulsar_client-3.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f28e94420090fceeb38e23fc744f3edf8710e48314ef5927d2b674a1d1e43ee0"}, + {file = "pulsar_client-3.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42c8f3eaa98e2351805ecb6efb6d5fedf47a314a3ce6af0e05ea1449ea7244ed"}, + {file = "pulsar_client-3.3.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:5e69750f8ae57e55fddf97b459ce0d8b38b2bb85f464a71e871ee6a86d893be7"}, + {file = "pulsar_client-3.3.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7e147e5ba460c1818bc05254279a885b4e552bcafb8961d40e31f98d5ff46628"}, + {file = "pulsar_client-3.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:694530af1d6c75fb81456fb509778c1868adee31e997ddece6e21678200182ea"}, +] + +[package.dependencies] +certifi = "*" + +[package.extras] +all = ["apache-bookkeeper-client (>=4.16.1)", "fastavro (==1.7.3)", "grpcio (>=1.8.2)", "prometheus-client", "protobuf (>=3.6.1,<=3.20.3)", "ratelimit"] +avro = ["fastavro (==1.7.3)"] +functions = ["apache-bookkeeper-client (>=4.16.1)", "grpcio (>=1.8.2)", "prometheus-client", "protobuf (>=3.6.1,<=3.20.3)", "ratelimit"] + [[package]] name = "pure-eval" version = "0.2.2" description = "Safely evaluate AST nodes without side effects" -optional = false +optional = true python-versions = "*" files = [ {file = "pure_eval-0.2.2-py3-none-any.whl", hash = "sha256:01eaab343580944bc56080ebe0a674b39ec44a945e6d09ba7db3cb8cec289350"}, @@ -3292,7 +4019,7 @@ files = [ name = "pygments" version = "2.16.1" description = "Pygments is a syntax highlighting package written in Python." -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "Pygments-2.16.1-py3-none-any.whl", hash = "sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692"}, @@ -3306,7 +4033,7 @@ plugins = ["importlib-metadata"] name = "pyhumps" version = "3.8.0" description = "🐫 Convert strings (and dictionary keys) between snake case, camel case and pascal case in Python. Inspired by Humps for Node" -optional = false +optional = true python-versions = "*" files = [ {file = "pyhumps-3.8.0-py3-none-any.whl", hash = "sha256:060e1954d9069f428232a1adda165db0b9d8dfdce1d265d36df7fbff540acfd6"}, @@ -3355,6 +4082,27 @@ docs = ["myst_parser", "sphinx", "sphinx_rtd_theme"] full = ["Pillow", "PyCryptodome"] image = ["Pillow"] +[[package]] +name = "pypika" +version = "0.48.9" +description = "A SQL query builder API for Python" +optional = false +python-versions = "*" +files = [ + {file = "PyPika-0.48.9.tar.gz", hash = "sha256:838836a61747e7c8380cd1b7ff638694b7a7335345d0f559b04b2cd832ad5378"}, +] + +[[package]] +name = "pyreadline3" +version = "3.4.1" +description = "A python implementation of GNU readline." +optional = false +python-versions = "*" +files = [ + {file = "pyreadline3-3.4.1-py3-none-any.whl", hash = "sha256:b0efb6516fd4fb07b45949053826a62fa4cb353db5be2bbb4a7aa1fdd1e345fb"}, + {file = "pyreadline3-3.4.1.tar.gz", hash = "sha256:6f3d1f7b8a31ba32b73917cefc1f28cc660562f39aea8646d30bd6eff21f7bae"}, +] + [[package]] name = "pysocks" version = "1.7.1" @@ -3573,7 +4321,7 @@ dev = ["atomicwrites (==1.2.1)", "attrs (==19.2.0)", "coverage (==6.5.0)", "hatc name = "pytz" version = "2023.3.post1" description = "World timezone definitions, modern and historical" -optional = false +optional = true python-versions = "*" files = [ {file = "pytz-2023.3.post1-py2.py3-none-any.whl", hash = "sha256:ce42d816b81b68506614c11e8937d3aa9e41007ceb50bfdcb0749b921bf646c7"}, @@ -3584,7 +4332,7 @@ files = [ name = "pyvis" version = "0.3.2" description = "A Python network graph visualization library" -optional = false +optional = true python-versions = ">3.6" files = [ {file = "pyvis-0.3.2-py3-none-any.whl", hash = "sha256:5720c4ca8161dc5d9ab352015723abb7a8bb8fb443edeb07f7a322db34a97555"}, @@ -4007,7 +4755,7 @@ testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jar name = "setuptools-scm" version = "8.0.4" description = "the blessed package to manage your versions by scm tags" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "setuptools-scm-8.0.4.tar.gz", hash = "sha256:b5f43ff6800669595193fd09891564ee9d1d7dcb196cab4b2506d53a2e1c95c7"}, @@ -4231,6 +4979,92 @@ files = [ {file = "spacy_loggers-1.0.5-py3-none-any.whl", hash = "sha256:196284c9c446cc0cdb944005384270d775fdeaf4f494d8e269466cfa497ef645"}, ] +[[package]] +name = "sqlalchemy" +version = "2.0.22" +description = "Database Abstraction Library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "SQLAlchemy-2.0.22-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f146c61ae128ab43ea3a0955de1af7e1633942c2b2b4985ac51cc292daf33222"}, + {file = "SQLAlchemy-2.0.22-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:875de9414393e778b655a3d97d60465eb3fae7c919e88b70cc10b40b9f56042d"}, + {file = "SQLAlchemy-2.0.22-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:13790cb42f917c45c9c850b39b9941539ca8ee7917dacf099cc0b569f3d40da7"}, + {file = "SQLAlchemy-2.0.22-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e04ab55cf49daf1aeb8c622c54d23fa4bec91cb051a43cc24351ba97e1dd09f5"}, + {file = "SQLAlchemy-2.0.22-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:a42c9fa3abcda0dcfad053e49c4f752eef71ecd8c155221e18b99d4224621176"}, + {file = "SQLAlchemy-2.0.22-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:14cd3bcbb853379fef2cd01e7c64a5d6f1d005406d877ed9509afb7a05ff40a5"}, + {file = "SQLAlchemy-2.0.22-cp310-cp310-win32.whl", hash = "sha256:d143c5a9dada696bcfdb96ba2de4a47d5a89168e71d05a076e88a01386872f97"}, + {file = "SQLAlchemy-2.0.22-cp310-cp310-win_amd64.whl", hash = "sha256:ccd87c25e4c8559e1b918d46b4fa90b37f459c9b4566f1dfbce0eb8122571547"}, + {file = "SQLAlchemy-2.0.22-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4f6ff392b27a743c1ad346d215655503cec64405d3b694228b3454878bf21590"}, + {file = "SQLAlchemy-2.0.22-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f776c2c30f0e5f4db45c3ee11a5f2a8d9de68e81eb73ec4237de1e32e04ae81c"}, + {file = "SQLAlchemy-2.0.22-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8f1792d20d2f4e875ce7a113f43c3561ad12b34ff796b84002a256f37ce9437"}, + {file = "SQLAlchemy-2.0.22-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d80eeb5189d7d4b1af519fc3f148fe7521b9dfce8f4d6a0820e8f5769b005051"}, + {file = "SQLAlchemy-2.0.22-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:69fd9e41cf9368afa034e1c81f3570afb96f30fcd2eb1ef29cb4d9371c6eece2"}, + {file = "SQLAlchemy-2.0.22-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:54bcceaf4eebef07dadfde424f5c26b491e4a64e61761dea9459103ecd6ccc95"}, + {file = "SQLAlchemy-2.0.22-cp311-cp311-win32.whl", hash = "sha256:7ee7ccf47aa503033b6afd57efbac6b9e05180f492aeed9fcf70752556f95624"}, + {file = "SQLAlchemy-2.0.22-cp311-cp311-win_amd64.whl", hash = "sha256:b560f075c151900587ade06706b0c51d04b3277c111151997ea0813455378ae0"}, + {file = "SQLAlchemy-2.0.22-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:2c9bac865ee06d27a1533471405ad240a6f5d83195eca481f9fc4a71d8b87df8"}, + {file = "SQLAlchemy-2.0.22-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:625b72d77ac8ac23da3b1622e2da88c4aedaee14df47c8432bf8f6495e655de2"}, + {file = "SQLAlchemy-2.0.22-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b39a6e21110204a8c08d40ff56a73ba542ec60bab701c36ce721e7990df49fb9"}, + {file = "SQLAlchemy-2.0.22-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53a766cb0b468223cafdf63e2d37f14a4757476157927b09300c8c5832d88560"}, + {file = "SQLAlchemy-2.0.22-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0e1ce8ebd2e040357dde01a3fb7d30d9b5736b3e54a94002641dfd0aa12ae6ce"}, + {file = "SQLAlchemy-2.0.22-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:505f503763a767556fa4deae5194b2be056b64ecca72ac65224381a0acab7ebe"}, + {file = "SQLAlchemy-2.0.22-cp312-cp312-win32.whl", hash = "sha256:154a32f3c7b00de3d090bc60ec8006a78149e221f1182e3edcf0376016be9396"}, + {file = "SQLAlchemy-2.0.22-cp312-cp312-win_amd64.whl", hash = "sha256:129415f89744b05741c6f0b04a84525f37fbabe5dc3774f7edf100e7458c48cd"}, + {file = "SQLAlchemy-2.0.22-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3940677d341f2b685a999bffe7078697b5848a40b5f6952794ffcf3af150c301"}, + {file = "SQLAlchemy-2.0.22-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:55914d45a631b81a8a2cb1a54f03eea265cf1783241ac55396ec6d735be14883"}, + {file = "SQLAlchemy-2.0.22-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2096d6b018d242a2bcc9e451618166f860bb0304f590d205173d317b69986c95"}, + {file = "SQLAlchemy-2.0.22-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:19c6986cf2fb4bc8e0e846f97f4135a8e753b57d2aaaa87c50f9acbe606bd1db"}, + {file = "SQLAlchemy-2.0.22-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6ac28bd6888fe3c81fbe97584eb0b96804bd7032d6100b9701255d9441373ec1"}, + {file = "SQLAlchemy-2.0.22-cp37-cp37m-win32.whl", hash = "sha256:cb9a758ad973e795267da334a92dd82bb7555cb36a0960dcabcf724d26299db8"}, + {file = "SQLAlchemy-2.0.22-cp37-cp37m-win_amd64.whl", hash = "sha256:40b1206a0d923e73aa54f0a6bd61419a96b914f1cd19900b6c8226899d9742ad"}, + {file = "SQLAlchemy-2.0.22-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3aa1472bf44f61dd27987cd051f1c893b7d3b17238bff8c23fceaef4f1133868"}, + {file = "SQLAlchemy-2.0.22-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:56a7e2bb639df9263bf6418231bc2a92a773f57886d371ddb7a869a24919face"}, + {file = "SQLAlchemy-2.0.22-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ccca778c0737a773a1ad86b68bda52a71ad5950b25e120b6eb1330f0df54c3d0"}, + {file = "SQLAlchemy-2.0.22-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c6c3e9350f9fb16de5b5e5fbf17b578811a52d71bb784cc5ff71acb7de2a7f9"}, + {file = "SQLAlchemy-2.0.22-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:564e9f9e4e6466273dbfab0e0a2e5fe819eec480c57b53a2cdee8e4fdae3ad5f"}, + {file = "SQLAlchemy-2.0.22-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:af66001d7b76a3fab0d5e4c1ec9339ac45748bc4a399cbc2baa48c1980d3c1f4"}, + {file = "SQLAlchemy-2.0.22-cp38-cp38-win32.whl", hash = "sha256:9e55dff5ec115316dd7a083cdc1a52de63693695aecf72bc53a8e1468ce429e5"}, + {file = "SQLAlchemy-2.0.22-cp38-cp38-win_amd64.whl", hash = "sha256:4e869a8ff7ee7a833b74868a0887e8462445ec462432d8cbeff5e85f475186da"}, + {file = "SQLAlchemy-2.0.22-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9886a72c8e6371280cb247c5d32c9c8fa141dc560124348762db8a8b236f8692"}, + {file = "SQLAlchemy-2.0.22-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a571bc8ac092a3175a1d994794a8e7a1f2f651e7c744de24a19b4f740fe95034"}, + {file = "SQLAlchemy-2.0.22-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8db5ba8b7da759b727faebc4289a9e6a51edadc7fc32207a30f7c6203a181592"}, + {file = "SQLAlchemy-2.0.22-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b0b3f2686c3f162123adba3cb8b626ed7e9b8433ab528e36ed270b4f70d1cdb"}, + {file = "SQLAlchemy-2.0.22-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0c1fea8c0abcb070ffe15311853abfda4e55bf7dc1d4889497b3403629f3bf00"}, + {file = "SQLAlchemy-2.0.22-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:4bb062784f37b2d75fd9b074c8ec360ad5df71f933f927e9e95c50eb8e05323c"}, + {file = "SQLAlchemy-2.0.22-cp39-cp39-win32.whl", hash = "sha256:58a3aba1bfb32ae7af68da3f277ed91d9f57620cf7ce651db96636790a78b736"}, + {file = "SQLAlchemy-2.0.22-cp39-cp39-win_amd64.whl", hash = "sha256:92e512a6af769e4725fa5b25981ba790335d42c5977e94ded07db7d641490a85"}, + {file = "SQLAlchemy-2.0.22-py3-none-any.whl", hash = "sha256:3076740335e4aaadd7deb3fe6dcb96b3015f1613bd190a4e1634e1b99b02ec86"}, + {file = "SQLAlchemy-2.0.22.tar.gz", hash = "sha256:5434cc601aa17570d79e5377f5fd45ff92f9379e2abed0be5e8c2fba8d353d2b"}, +] + +[package.dependencies] +greenlet = {version = "!=0.4.17", markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\""} +typing-extensions = ">=4.2.0" + +[package.extras] +aiomysql = ["aiomysql (>=0.2.0)", "greenlet (!=0.4.17)"] +aiosqlite = ["aiosqlite", "greenlet (!=0.4.17)", "typing-extensions (!=3.10.0.1)"] +asyncio = ["greenlet (!=0.4.17)"] +asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (!=0.4.17)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5)"] +mssql = ["pyodbc"] +mssql-pymssql = ["pymssql"] +mssql-pyodbc = ["pyodbc"] +mypy = ["mypy (>=0.910)"] +mysql = ["mysqlclient (>=1.4.0)"] +mysql-connector = ["mysql-connector-python"] +oracle = ["cx-oracle (>=7)"] +oracle-oracledb = ["oracledb (>=1.0.1)"] +postgresql = ["psycopg2 (>=2.7)"] +postgresql-asyncpg = ["asyncpg", "greenlet (!=0.4.17)"] +postgresql-pg8000 = ["pg8000 (>=1.29.1)"] +postgresql-psycopg = ["psycopg (>=3.0.7)"] +postgresql-psycopg2binary = ["psycopg2-binary"] +postgresql-psycopg2cffi = ["psycopg2cffi"] +postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] +pymysql = ["pymysql"] +sqlcipher = ["sqlcipher3-binary"] + [[package]] name = "srsly" version = "2.4.8" @@ -4281,7 +5115,7 @@ catalogue = ">=2.0.3,<2.1.0" name = "stack-data" version = "0.6.3" description = "Extract data from python stack frames and tracebacks for informative displays" -optional = false +optional = true python-versions = "*" files = [ {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"}, @@ -4313,6 +5147,34 @@ anyio = ">=3.4.0,<5" [package.extras] full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart", "pyyaml"] +[[package]] +name = "sympy" +version = "1.12" +description = "Computer algebra system (CAS) in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "sympy-1.12-py3-none-any.whl", hash = "sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5"}, + {file = "sympy-1.12.tar.gz", hash = "sha256:ebf595c8dac3e0fdc4152c51878b498396ec7f30e7a914d6071e674d49420fb8"}, +] + +[package.dependencies] +mpmath = ">=0.19" + +[[package]] +name = "tenacity" +version = "8.2.3" +description = "Retry code until it succeeds" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tenacity-8.2.3-py3-none-any.whl", hash = "sha256:ce510e327a630c9e1beaf17d42e6ffacc88185044ad85cf74c0a8887c6a0f88c"}, + {file = "tenacity-8.2.3.tar.gz", hash = "sha256:5398ef0d78e63f40007c1fb4c0bff96e1911394d2fa8d194f77619c05ff6cc8a"}, +] + +[package.extras] +doc = ["reno", "sphinx", "tornado (>=4.5)"] + [[package]] name = "thinc" version = "8.1.12" @@ -4433,6 +5295,121 @@ requests = ">=2.26.0" [package.extras] blobfile = ["blobfile (>=2)"] +[[package]] +name = "tokenizers" +version = "0.14.1" +description = "" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tokenizers-0.14.1-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:04ec1134a18ede355a05641cdc7700f17280e01f69f2f315769f02f7e295cf1e"}, + {file = "tokenizers-0.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:638abedb39375f0ddce2de536fc9c976639b2d1b7202d715c2e7a25f0ebfd091"}, + {file = "tokenizers-0.14.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:901635098565773a44f74068639d265f19deaaca47ea77b428fd9bee13a61d87"}, + {file = "tokenizers-0.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72e95184bf5b9a4c08153ed07c16c130ff174835c9a1e6ee2b311be758c8b3ef"}, + {file = "tokenizers-0.14.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ebefbc26ccff5e96ae7d40772172e7310174f9aa3683d2870a1882313ec3a4d5"}, + {file = "tokenizers-0.14.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d3a6330c9f1deda22873e8b4ac849cc06d3ff33d60b3217ac0bb397b541e1509"}, + {file = "tokenizers-0.14.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6cba7483ba45600346a35c466bde32327b108575022f73c35a0f7170b5a71ae2"}, + {file = "tokenizers-0.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60fec380778d75cbb492f14ca974f11f37b41d53c057b9c8ba213315b86e1f84"}, + {file = "tokenizers-0.14.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:930c19b699dd7e1077eac98967adc2fe5f0b104bd96cc1f26778ab82b31ceb24"}, + {file = "tokenizers-0.14.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a1e30a13376db5329570e09b14c8eb36c017909ed7e88591ca3aa81f3c7d6f32"}, + {file = "tokenizers-0.14.1-cp310-none-win32.whl", hash = "sha256:370b5b86da9bddbe65fa08711f0e8ffdf8b0036558178d1a31dfcb44efcde72a"}, + {file = "tokenizers-0.14.1-cp310-none-win_amd64.whl", hash = "sha256:c2c659f2106b6d154f118ad1b700e68148c46c59b720f04867b1fc5f26a85060"}, + {file = "tokenizers-0.14.1-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:00df4c5bf25c153b432b98689609b426ae701a44f3d8074dcb619f410bc2a870"}, + {file = "tokenizers-0.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fee553657dcdb7e73df8823c49e8611457ba46e9d7026b7e9c44820c08c327c3"}, + {file = "tokenizers-0.14.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a480bd902e327dfcaa52b7dd14fdc71e7aa45d73a3d6e41e028a75891d2823cf"}, + {file = "tokenizers-0.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e448b2be0430ab839cf7954715c39d6f34ff6cf2b49393f336283b7a59f485af"}, + {file = "tokenizers-0.14.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c11444984aecd342f0cf160c3320288edeb1763871fbb560ed466654b2a7016c"}, + {file = "tokenizers-0.14.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfe164a1c72c6be3c5c26753c6c412f81412f4dae0d7d06371e0b396a9cc0fc9"}, + {file = "tokenizers-0.14.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:72d9967fb1f927542cfb5347207fde01b29f25c9bb8cbc7ced280decfa015983"}, + {file = "tokenizers-0.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37cc955c84ec67c2d11183d372044399342b20a1fa447b7a33040f4889bba318"}, + {file = "tokenizers-0.14.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:db96cf092d86d4cb543daa9148e299011e0a40770380bb78333b9fd700586fcb"}, + {file = "tokenizers-0.14.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c84d3cb1349936c2b96ca6175b50f5a9518170bffd76464219ee0ea6022a64a7"}, + {file = "tokenizers-0.14.1-cp311-none-win32.whl", hash = "sha256:8db3a6f3d430ac3dc3793c53fa8e5e665c23ba359484d365a191027ad8b65a30"}, + {file = "tokenizers-0.14.1-cp311-none-win_amd64.whl", hash = "sha256:c65d76052561c60e17cb4fa289885ed00a9995d59e97019fac2138bd45142057"}, + {file = "tokenizers-0.14.1-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:c375161b588982be381c43eb7158c250f430793d0f708ce379a0f196164c6778"}, + {file = "tokenizers-0.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:50f03d2330a153a9114c2429061137bd323736059f384de8348d7cb1ca1baa15"}, + {file = "tokenizers-0.14.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0c8ee283b249c3c3c201c41bc23adc3be2514ae4121eacdb5c5250a461eaa8c6"}, + {file = "tokenizers-0.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9f27399b8d50c5d3f08f0aae961bcc66a1dead1cd0ae9401e4c2a43a623322a"}, + {file = "tokenizers-0.14.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:89cbeec7e9d5d8773ec4779c64e3cbcbff53d234ca6ad7b1a3736588003bba48"}, + {file = "tokenizers-0.14.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:08e55920b453c30b46d58accc68a38e8e7488d0c03babfdb29c55d3f39dd2052"}, + {file = "tokenizers-0.14.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:91d32bd1056c0e83a0f90e4ffa213c25096b2d8b9f0e2d172a45f138c7d8c081"}, + {file = "tokenizers-0.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44f1748035c36c939848c935715bde41734d9249ab7b844ff9bfbe984be8952c"}, + {file = "tokenizers-0.14.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1ff516d129f01bb7a4aa95bc6aae88e4d86dd63bfc2d57db9302c2624d1be7cb"}, + {file = "tokenizers-0.14.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:acfc8db61c6e919d932448cc7985b85e330c8d745528e12fce6e62d40d268bce"}, + {file = "tokenizers-0.14.1-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:ba336bc9107acbc1da2ad30967df7b2db93448ca66538ad86aa1fbb91116f631"}, + {file = "tokenizers-0.14.1-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:f77371b5030e53f8bf92197640af437539e3bba1bc8342b97888c8e26567bfdc"}, + {file = "tokenizers-0.14.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d72d25c57a9c814240802d188ff0a808b701e2dd2bf1c64721c7088ceeeb1ed7"}, + {file = "tokenizers-0.14.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:caf0df8657277e32671aa8a4d3cc05f2050ab19d9b49447f2265304168e9032c"}, + {file = "tokenizers-0.14.1-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cb3c6bc6e599e46a26ad559ad5dec260ffdf705663cc9b894033d64a69314e86"}, + {file = "tokenizers-0.14.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8cf2fcdc2368df4317e05571e33810eeed24cd594acc9dfc9788b21dac6b3a8"}, + {file = "tokenizers-0.14.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f475d5eda41d2ed51ca775a07c80529a923dd759fcff7abf03ccdd83d9f7564e"}, + {file = "tokenizers-0.14.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cce4d1a97a7eb2253b5d3f29f4a478d8c37ba0303ea34024eb9e65506d4209f8"}, + {file = "tokenizers-0.14.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:ff66577ae55114f7d0f6aa0d4d335f27cae96bf245962a745b718ec887bbe7eb"}, + {file = "tokenizers-0.14.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a687099e085f5162e5b88b3402adb6c2b41046180c015c5075c9504440b6e971"}, + {file = "tokenizers-0.14.1-cp37-none-win32.whl", hash = "sha256:49f5336b82e315a33bef1025d247ca08d95719715b29e33f0e9e8cf15ff1dfb6"}, + {file = "tokenizers-0.14.1-cp37-none-win_amd64.whl", hash = "sha256:117c8da60d1bd95a6df2692926f36de7971baa1d89ff702fae47b6689a4465ad"}, + {file = "tokenizers-0.14.1-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:01d2bd5935642de22a6c6778bb2307f9949cd6eaeeb5c77f9b98f0060b69f0db"}, + {file = "tokenizers-0.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b05ec04132394c20bd6bcb692d557a8eb8ab1bac1646d28e49c67c00907d17c8"}, + {file = "tokenizers-0.14.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:7d9025b185465d9d18679406f6f394850347d5ed2681efc203539d800f36f459"}, + {file = "tokenizers-0.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2539831838ab5393f78a893d7bbf27d5c36e43baf77e91dc9992922b2b97e09d"}, + {file = "tokenizers-0.14.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ec8f46d533092d8e20bc742c47918cbe24b8641dbfbbcb83177c5de3c9d4decb"}, + {file = "tokenizers-0.14.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8b019c4810903fdea3b230f358b9d27377c0f38454778b607676c9e1b57d14b7"}, + {file = "tokenizers-0.14.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e8984114fd83ed3913d89526c992395920930c9620a2feee61faf035f41d7b9a"}, + {file = "tokenizers-0.14.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11284b32f0036fe7ef4b8b00201dda79c00f3fcea173bc0e5c599e09c937ab0f"}, + {file = "tokenizers-0.14.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:53614f44f36917282a583180e402105bc63d61d1aca067d51cb7f051eb489901"}, + {file = "tokenizers-0.14.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e3b6082e9532309727273443c8943bb9558d52e36788b246aa278bda7c642116"}, + {file = "tokenizers-0.14.1-cp38-none-win32.whl", hash = "sha256:7560fca3e17a6bc876d20cd825d7721c101fa2b1cd0bfa0abf9a2e781e49b37b"}, + {file = "tokenizers-0.14.1-cp38-none-win_amd64.whl", hash = "sha256:c318a5acb429ca38f632577754235140bbb8c5a27faca1c51b43fbf575596e34"}, + {file = "tokenizers-0.14.1-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:b886e0f5c72aa4249c609c24b9610a9ca83fd963cbb5066b19302723ea505279"}, + {file = "tokenizers-0.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f522f28c88a0d5b2f9e895cf405dd594cd518e99d61905406aec74d30eb6383b"}, + {file = "tokenizers-0.14.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5bef76c4d9329913cef2fe79ce1f4dab98f77fa4887e5f0420ffc9386941de32"}, + {file = "tokenizers-0.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59c7df2103052b30b7c76d4fa8251326c9f82689578a912698a127dc1737f43e"}, + {file = "tokenizers-0.14.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:232445e7b85255ccfe68dfd42185db8a3f3349b34ad7068404856c4a5f67c355"}, + {file = "tokenizers-0.14.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8e63781da85aa8948864970e529af10abc4084a990d30850c41bbdb5f83eee45"}, + {file = "tokenizers-0.14.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5760a831c0f3c6d3229b50ef3fafa4c164ec99d7e8c2237fe144e67a9d33b120"}, + {file = "tokenizers-0.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c84b456ff8525ec3ff09762e32ccc27888d036dcd0ba2883e1db491e164dd725"}, + {file = "tokenizers-0.14.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:463ee5f3afbfec29cbf5652752c9d1032bdad63daf48bb8cb9970064cc81d5f9"}, + {file = "tokenizers-0.14.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ee6b63aecf929a7bcf885bdc8a8aec96c43bc4442f63fe8c6d48f24fc992b05b"}, + {file = "tokenizers-0.14.1-cp39-none-win32.whl", hash = "sha256:aae42798ba1da3bc1572b2048fe42e61dd6bacced2b424cb0f5572c5432f79c2"}, + {file = "tokenizers-0.14.1-cp39-none-win_amd64.whl", hash = "sha256:68c4699147dded6926a3d2c2f948d435d54d027f69909e0ef3c6587933723ed2"}, + {file = "tokenizers-0.14.1-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:5f9afdcf701a1aa3c41e0e748c152d2162434d61639a1e5d8523ecf60ae35aea"}, + {file = "tokenizers-0.14.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:6859d81243cd09854be9054aca3ecab14a2dee5b3c9f6d7ef12061d478ca0c57"}, + {file = "tokenizers-0.14.1-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:7975178f9478ccedcf613332d5d6f37b67c74ef4e2e47e0c965597506b921f04"}, + {file = "tokenizers-0.14.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ce2f0ff2e5f12ac5bebaa690606395725239265d7ffa35f35c243a379316297"}, + {file = "tokenizers-0.14.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c7cfc3d42e81cda802f93aa9e92caf79feaa1711426e28ce620560b8aaf5e4d"}, + {file = "tokenizers-0.14.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:67d3adff654dc7f7c7091dd259b3b847fe119c08d0bda61db91e2ea2b61c38c0"}, + {file = "tokenizers-0.14.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:956729b7dd599020e57133fb95b777e4f81ee069ff0a70e80f6eeac82658972f"}, + {file = "tokenizers-0.14.1-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:fe2ea1177146a7ab345ab61e90a490eeea25d5f063e1cb9d4eb1425b169b64d7"}, + {file = "tokenizers-0.14.1-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9930f31f603ecc6ea54d5c6dfa299f926ab3e921f72f94babcb02598c32b57c6"}, + {file = "tokenizers-0.14.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d49567a2754e9991c05c2b5a7e6650b56e24365b7cab504558e58033dcf0edc4"}, + {file = "tokenizers-0.14.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3678be5db330726f19c1949d8ae1b845a02eeb2a2e1d5a8bb8eaa82087ae25c1"}, + {file = "tokenizers-0.14.1-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:42b180ed1bec58ab9bdc65d406577e0c0fb7241b74b8c032846073c7743c9f86"}, + {file = "tokenizers-0.14.1-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:319e4367596fb0d52be645b3de1616faf0fadaf28507ce1c7595bebd9b4c402c"}, + {file = "tokenizers-0.14.1-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:2cda65b689aec63b7c76a77f43a08044fa90bbc6ad9849267cedfee9795913f3"}, + {file = "tokenizers-0.14.1-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:ca0bfc79b27d84fcb7fa09339b2ee39077896738d9a30ff99c0332376e985072"}, + {file = "tokenizers-0.14.1-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a7093767e070269e22e2c5f845e46510304f124c32d2cd249633c0f27eb29d86"}, + {file = "tokenizers-0.14.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad759ba39cd32c2c2247864d02c84ea5883b5f6cc6a4ee0c95602a3dde52268f"}, + {file = "tokenizers-0.14.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26fee36a6d8f2bd9464f3566b95e3e3fb7fd7dad723f775c500aac8204ec98c6"}, + {file = "tokenizers-0.14.1-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d091c62cb7abbd32e527a85c41f7c8eb4526a926251891fc4ecbe5f974142ffb"}, + {file = "tokenizers-0.14.1-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ca304402ea66d58f99c05aa3d7a6052faea61e5a8313b94f6bc36fbf27960e2d"}, + {file = "tokenizers-0.14.1-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:102f118fa9b720b93c3217c1e239ed7bc1ae1e8dbfe9b4983a4f2d7b4ce6f2ec"}, + {file = "tokenizers-0.14.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:df4f058e96e8b467b7742e5dba7564255cd482d3c1e6cf81f8cb683bb0433340"}, + {file = "tokenizers-0.14.1-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:040ee44efc1806900de72b13c1c3036154077d9cde189c9a7e7a50bbbdcbf39f"}, + {file = "tokenizers-0.14.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7618b84118ae704f7fa23c4a190bd80fc605671841a4427d5ca14b9b8d9ec1a3"}, + {file = "tokenizers-0.14.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ecdfe9736c4a73343f629586016a137a10faed1a29c6dc699d8ab20c2d3cf64"}, + {file = "tokenizers-0.14.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:92c34de04fec7f4ff95f7667d4eb085c4e4db46c31ef44c3d35c38df128430da"}, + {file = "tokenizers-0.14.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:628b654ba555b2ba9111c0936d558b14bfc9d5f57b8c323b02fc846036b38b2f"}, + {file = "tokenizers-0.14.1.tar.gz", hash = "sha256:ea3b3f8908a9a5b9d6fc632b5f012ece7240031c44c6d4764809f33736534166"}, +] + +[package.dependencies] +huggingface_hub = ">=0.16.4,<0.18" + +[package.extras] +dev = ["tokenizers[testing]"] +docs = ["setuptools_rust", "sphinx", "sphinx_rtd_theme"] +testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"] + [[package]] name = "toml" version = "0.10.2" @@ -4479,7 +5456,7 @@ telegram = ["requests"] name = "traitlets" version = "5.11.2" description = "Traitlets Python configuration system" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "traitlets-5.11.2-py3-none-any.whl", hash = "sha256:98277f247f18b2c5cabaf4af369187754f4fb0e85911d473f72329db8a7f4fae"}, @@ -4609,7 +5586,7 @@ files = [ name = "types-requests" version = "2.31.0.9" description = "Typing stubs for requests" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "types-requests-2.31.0.9.tar.gz", hash = "sha256:3bb11188795cc3aa39f9635032044ee771009370fb31c3a06ae952b267b6fcd7"}, @@ -4634,7 +5611,7 @@ files = [ name = "tzdata" version = "2023.3" description = "Provider of IANA time zone data" -optional = false +optional = true python-versions = ">=2" files = [ {file = "tzdata-2023.3-py2.py3-none-any.whl", hash = "sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda"}, @@ -4654,13 +5631,13 @@ files = [ [[package]] name = "urllib3" -version = "2.0.6" +version = "2.0.7" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false python-versions = ">=3.7" files = [ - {file = "urllib3-2.0.6-py3-none-any.whl", hash = "sha256:7a7c7003b000adf9e7ca2a377c9688bbc54ed41b985789ed576570342a375cd2"}, - {file = "urllib3-2.0.6.tar.gz", hash = "sha256:b19e1a85d206b56d7df1d5e683df4a7725252a964e3993648dd0fb5a1c157564"}, + {file = "urllib3-2.0.7-py3-none-any.whl", hash = "sha256:fdb6d215c776278489906c2f8916e6e7d4f5a9b602ccbcfdf7f016fc8da0596e"}, + {file = "urllib3-2.0.7.tar.gz", hash = "sha256:c97dfde1f7bd43a71c8d2a58e369e9b2bf692d1334ea9f9cae55add7d0dd0f84"}, ] [package.dependencies] @@ -4685,12 +5662,68 @@ files = [ [package.dependencies] click = ">=7.0" +colorama = {version = ">=0.4", optional = true, markers = "sys_platform == \"win32\" and extra == \"standard\""} h11 = ">=0.8" +httptools = {version = ">=0.5.0", optional = true, markers = "extra == \"standard\""} +python-dotenv = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} +pyyaml = {version = ">=5.1", optional = true, markers = "extra == \"standard\""} typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} +uvloop = {version = ">=0.14.0,<0.15.0 || >0.15.0,<0.15.1 || >0.15.1", optional = true, markers = "(sys_platform != \"win32\" and sys_platform != \"cygwin\") and platform_python_implementation != \"PyPy\" and extra == \"standard\""} +watchfiles = {version = ">=0.13", optional = true, markers = "extra == \"standard\""} +websockets = {version = ">=10.4", optional = true, markers = "extra == \"standard\""} [package.extras] standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"] +[[package]] +name = "uvloop" +version = "0.18.0" +description = "Fast implementation of asyncio event loop on top of libuv" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "uvloop-0.18.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1f354d669586fca96a9a688c585b6257706d216177ac457c92e15709acaece10"}, + {file = "uvloop-0.18.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:280904236a5b333a273292b3bcdcbfe173690f69901365b973fa35be302d7781"}, + {file = "uvloop-0.18.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ad79cd30c7e7484bdf6e315f3296f564b3ee2f453134a23ffc80d00e63b3b59e"}, + {file = "uvloop-0.18.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99deae0504547d04990cc5acf631d9f490108c3709479d90c1dcd14d6e7af24d"}, + {file = "uvloop-0.18.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:edbb4de38535f42f020da1e3ae7c60f2f65402d027a08a8c60dc8569464873a6"}, + {file = "uvloop-0.18.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:54b211c46facb466726b227f350792770fc96593c4ecdfaafe20dc00f3209aef"}, + {file = "uvloop-0.18.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:25b714f07c68dcdaad6994414f6ec0f2a3b9565524fba181dcbfd7d9598a3e73"}, + {file = "uvloop-0.18.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1121087dfeb46e9e65920b20d1f46322ba299b8d93f7cb61d76c94b5a1adc20c"}, + {file = "uvloop-0.18.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74020ef8061678e01a40c49f1716b4f4d1cc71190d40633f08a5ef8a7448a5c6"}, + {file = "uvloop-0.18.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f4a549cd747e6f4f8446f4b4c8cb79504a8372d5d3a9b4fc20e25daf8e76c05"}, + {file = "uvloop-0.18.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6132318e1ab84a626639b252137aa8d031a6c0550250460644c32ed997604088"}, + {file = "uvloop-0.18.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:585b7281f9ea25c4a5fa993b1acca4ad3d8bc3f3fe2e393f0ef51b6c1bcd2fe6"}, + {file = "uvloop-0.18.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:61151cc207cf5fc88863e50de3d04f64ee0fdbb979d0b97caf21cae29130ed78"}, + {file = "uvloop-0.18.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c65585ae03571b73907b8089473419d8c0aff1e3826b3bce153776de56cbc687"}, + {file = "uvloop-0.18.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3d301e23984dcbc92d0e42253e0e0571915f0763f1eeaf68631348745f2dccc"}, + {file = "uvloop-0.18.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:680da98f12a7587f76f6f639a8aa7708936a5d17c5e7db0bf9c9d9cbcb616593"}, + {file = "uvloop-0.18.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:75baba0bfdd385c886804970ae03f0172e0d51e51ebd191e4df09b929771b71e"}, + {file = "uvloop-0.18.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ed3c28337d2fefc0bac5705b9c66b2702dc392f2e9a69badb1d606e7e7f773bb"}, + {file = "uvloop-0.18.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8849b8ef861431543c07112ad8436903e243cdfa783290cbee3df4ce86d8dd48"}, + {file = "uvloop-0.18.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:211ce38d84118ae282a91408f61b85cf28e2e65a0a8966b9a97e0e9d67c48722"}, + {file = "uvloop-0.18.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b0a8f706b943c198dcedf1f2fb84899002c195c24745e47eeb8f2fb340f7dfc3"}, + {file = "uvloop-0.18.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:58e44650cbc8607a218caeece5a689f0a2d10be084a69fc32f7db2e8f364927c"}, + {file = "uvloop-0.18.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2b8b7cf7806bdc745917f84d833f2144fabcc38e9cd854e6bc49755e3af2b53e"}, + {file = "uvloop-0.18.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:56c1026a6b0d12b378425e16250acb7d453abaefe7a2f5977143898db6cfe5bd"}, + {file = "uvloop-0.18.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:12af0d2e1b16780051d27c12de7e419b9daeb3516c503ab3e98d364cc55303bb"}, + {file = "uvloop-0.18.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b028776faf9b7a6d0a325664f899e4c670b2ae430265189eb8d76bd4a57d8a6e"}, + {file = "uvloop-0.18.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53aca21735eee3859e8c11265445925911ffe410974f13304edb0447f9f58420"}, + {file = "uvloop-0.18.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:847f2ed0887047c63da9ad788d54755579fa23f0784db7e752c7cf14cf2e7506"}, + {file = "uvloop-0.18.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6e20bb765fcac07879cd6767b6dca58127ba5a456149717e0e3b1f00d8eab51c"}, + {file = "uvloop-0.18.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e14de8800765b9916d051707f62e18a304cde661fa2b98a58816ca38d2b94029"}, + {file = "uvloop-0.18.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f3b18663efe0012bc4c315f1b64020e44596f5fabc281f5b0d9bc9465288559c"}, + {file = "uvloop-0.18.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6d341bc109fb8ea69025b3ec281fcb155d6824a8ebf5486c989ff7748351a37"}, + {file = "uvloop-0.18.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:895a1e3aca2504638a802d0bec2759acc2f43a0291a1dff886d69f8b7baff399"}, + {file = "uvloop-0.18.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4d90858f32a852988d33987d608bcfba92a1874eb9f183995def59a34229f30d"}, + {file = "uvloop-0.18.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:db1fcbad5deb9551e011ca589c5e7258b5afa78598174ac37a5f15ddcfb4ac7b"}, + {file = "uvloop-0.18.0.tar.gz", hash = "sha256:d5d1135beffe9cd95d0350f19e2716bc38be47d5df296d7cc46e3b7557c0d1ff"}, +] + +[package.extras] +docs = ["Sphinx (>=4.1.2,<4.2.0)", "sphinx-rtd-theme (>=0.5.2,<0.6.0)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"] +test = ["Cython (>=0.29.36,<0.30.0)", "aiohttp (==3.9.0b0)", "aiohttp (>=3.8.1)", "flake8 (>=5.0,<6.0)", "mypy (>=0.800)", "psutil", "pyOpenSSL (>=23.0.0,<23.1.0)", "pycodestyle (>=2.9.0,<2.10.0)"] + [[package]] name = "vcrpy" version = "5.1.0" @@ -4745,6 +5778,93 @@ files = [ [package.dependencies] colorama = {version = ">=0.4.6", markers = "sys_platform == \"win32\" and python_version >= \"3.7\""} +[[package]] +name = "watchfiles" +version = "0.21.0" +description = "Simple, modern and high performance file watching and code reload in python." +optional = false +python-versions = ">=3.8" +files = [ + {file = "watchfiles-0.21.0-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:27b4035013f1ea49c6c0b42d983133b136637a527e48c132d368eb19bf1ac6aa"}, + {file = "watchfiles-0.21.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c81818595eff6e92535ff32825f31c116f867f64ff8cdf6562cd1d6b2e1e8f3e"}, + {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6c107ea3cf2bd07199d66f156e3ea756d1b84dfd43b542b2d870b77868c98c03"}, + {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d9ac347653ebd95839a7c607608703b20bc07e577e870d824fa4801bc1cb124"}, + {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5eb86c6acb498208e7663ca22dbe68ca2cf42ab5bf1c776670a50919a56e64ab"}, + {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f564bf68404144ea6b87a78a3f910cc8de216c6b12a4cf0b27718bf4ec38d303"}, + {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d0f32ebfaa9c6011f8454994f86108c2eb9c79b8b7de00b36d558cadcedaa3d"}, + {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6d45d9b699ecbac6c7bd8e0a2609767491540403610962968d258fd6405c17c"}, + {file = "watchfiles-0.21.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:aff06b2cac3ef4616e26ba17a9c250c1fe9dd8a5d907d0193f84c499b1b6e6a9"}, + {file = "watchfiles-0.21.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d9792dff410f266051025ecfaa927078b94cc7478954b06796a9756ccc7e14a9"}, + {file = "watchfiles-0.21.0-cp310-none-win32.whl", hash = "sha256:214cee7f9e09150d4fb42e24919a1e74d8c9b8a9306ed1474ecaddcd5479c293"}, + {file = "watchfiles-0.21.0-cp310-none-win_amd64.whl", hash = "sha256:1ad7247d79f9f55bb25ab1778fd47f32d70cf36053941f07de0b7c4e96b5d235"}, + {file = "watchfiles-0.21.0-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:668c265d90de8ae914f860d3eeb164534ba2e836811f91fecc7050416ee70aa7"}, + {file = "watchfiles-0.21.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a23092a992e61c3a6a70f350a56db7197242f3490da9c87b500f389b2d01eef"}, + {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e7941bbcfdded9c26b0bf720cb7e6fd803d95a55d2c14b4bd1f6a2772230c586"}, + {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11cd0c3100e2233e9c53106265da31d574355c288e15259c0d40a4405cbae317"}, + {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d78f30cbe8b2ce770160d3c08cff01b2ae9306fe66ce899b73f0409dc1846c1b"}, + {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6674b00b9756b0af620aa2a3346b01f8e2a3dc729d25617e1b89cf6af4a54eb1"}, + {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd7ac678b92b29ba630d8c842d8ad6c555abda1b9ef044d6cc092dacbfc9719d"}, + {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c873345680c1b87f1e09e0eaf8cf6c891b9851d8b4d3645e7efe2ec20a20cc7"}, + {file = "watchfiles-0.21.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:49f56e6ecc2503e7dbe233fa328b2be1a7797d31548e7a193237dcdf1ad0eee0"}, + {file = "watchfiles-0.21.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:02d91cbac553a3ad141db016e3350b03184deaafeba09b9d6439826ee594b365"}, + {file = "watchfiles-0.21.0-cp311-none-win32.whl", hash = "sha256:ebe684d7d26239e23d102a2bad2a358dedf18e462e8808778703427d1f584400"}, + {file = "watchfiles-0.21.0-cp311-none-win_amd64.whl", hash = "sha256:4566006aa44cb0d21b8ab53baf4b9c667a0ed23efe4aaad8c227bfba0bf15cbe"}, + {file = "watchfiles-0.21.0-cp311-none-win_arm64.whl", hash = "sha256:c550a56bf209a3d987d5a975cdf2063b3389a5d16caf29db4bdddeae49f22078"}, + {file = "watchfiles-0.21.0-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:51ddac60b96a42c15d24fbdc7a4bfcd02b5a29c047b7f8bf63d3f6f5a860949a"}, + {file = "watchfiles-0.21.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:511f0b034120cd1989932bf1e9081aa9fb00f1f949fbd2d9cab6264916ae89b1"}, + {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:cfb92d49dbb95ec7a07511bc9efb0faff8fe24ef3805662b8d6808ba8409a71a"}, + {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f92944efc564867bbf841c823c8b71bb0be75e06b8ce45c084b46411475a915"}, + {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:642d66b75eda909fd1112d35c53816d59789a4b38c141a96d62f50a3ef9b3360"}, + {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d23bcd6c8eaa6324fe109d8cac01b41fe9a54b8c498af9ce464c1aeeb99903d6"}, + {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18d5b4da8cf3e41895b34e8c37d13c9ed294954907929aacd95153508d5d89d7"}, + {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b8d1eae0f65441963d805f766c7e9cd092f91e0c600c820c764a4ff71a0764c"}, + {file = "watchfiles-0.21.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1fd9a5205139f3c6bb60d11f6072e0552f0a20b712c85f43d42342d162be1235"}, + {file = "watchfiles-0.21.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a1e3014a625bcf107fbf38eece0e47fa0190e52e45dc6eee5a8265ddc6dc5ea7"}, + {file = "watchfiles-0.21.0-cp312-none-win32.whl", hash = "sha256:9d09869f2c5a6f2d9df50ce3064b3391d3ecb6dced708ad64467b9e4f2c9bef3"}, + {file = "watchfiles-0.21.0-cp312-none-win_amd64.whl", hash = "sha256:18722b50783b5e30a18a8a5db3006bab146d2b705c92eb9a94f78c72beb94094"}, + {file = "watchfiles-0.21.0-cp312-none-win_arm64.whl", hash = "sha256:a3b9bec9579a15fb3ca2d9878deae789df72f2b0fdaf90ad49ee389cad5edab6"}, + {file = "watchfiles-0.21.0-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:4ea10a29aa5de67de02256a28d1bf53d21322295cb00bd2d57fcd19b850ebd99"}, + {file = "watchfiles-0.21.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:40bca549fdc929b470dd1dbfcb47b3295cb46a6d2c90e50588b0a1b3bd98f429"}, + {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9b37a7ba223b2f26122c148bb8d09a9ff312afca998c48c725ff5a0a632145f7"}, + {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec8c8900dc5c83650a63dd48c4d1d245343f904c4b64b48798c67a3767d7e165"}, + {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8ad3fe0a3567c2f0f629d800409cd528cb6251da12e81a1f765e5c5345fd0137"}, + {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d353c4cfda586db2a176ce42c88f2fc31ec25e50212650c89fdd0f560ee507b"}, + {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:83a696da8922314ff2aec02987eefb03784f473281d740bf9170181829133765"}, + {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a03651352fc20975ee2a707cd2d74a386cd303cc688f407296064ad1e6d1562"}, + {file = "watchfiles-0.21.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3ad692bc7792be8c32918c699638b660c0de078a6cbe464c46e1340dadb94c19"}, + {file = "watchfiles-0.21.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06247538e8253975bdb328e7683f8515ff5ff041f43be6c40bff62d989b7d0b0"}, + {file = "watchfiles-0.21.0-cp38-none-win32.whl", hash = "sha256:9a0aa47f94ea9a0b39dd30850b0adf2e1cd32a8b4f9c7aa443d852aacf9ca214"}, + {file = "watchfiles-0.21.0-cp38-none-win_amd64.whl", hash = "sha256:8d5f400326840934e3507701f9f7269247f7c026d1b6cfd49477d2be0933cfca"}, + {file = "watchfiles-0.21.0-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:7f762a1a85a12cc3484f77eee7be87b10f8c50b0b787bb02f4e357403cad0c0e"}, + {file = "watchfiles-0.21.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6e9be3ef84e2bb9710f3f777accce25556f4a71e15d2b73223788d528fcc2052"}, + {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4c48a10d17571d1275701e14a601e36959ffada3add8cdbc9e5061a6e3579a5d"}, + {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c889025f59884423428c261f212e04d438de865beda0b1e1babab85ef4c0f01"}, + {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:66fac0c238ab9a2e72d026b5fb91cb902c146202bbd29a9a1a44e8db7b710b6f"}, + {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b4a21f71885aa2744719459951819e7bf5a906a6448a6b2bbce8e9cc9f2c8128"}, + {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c9198c989f47898b2c22201756f73249de3748e0fc9de44adaf54a8b259cc0c"}, + {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8f57c4461cd24fda22493109c45b3980863c58a25b8bec885ca8bea6b8d4b28"}, + {file = "watchfiles-0.21.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:853853cbf7bf9408b404754b92512ebe3e3a83587503d766d23e6bf83d092ee6"}, + {file = "watchfiles-0.21.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d5b1dc0e708fad9f92c296ab2f948af403bf201db8fb2eb4c8179db143732e49"}, + {file = "watchfiles-0.21.0-cp39-none-win32.whl", hash = "sha256:59137c0c6826bd56c710d1d2bda81553b5e6b7c84d5a676747d80caf0409ad94"}, + {file = "watchfiles-0.21.0-cp39-none-win_amd64.whl", hash = "sha256:6cb8fdc044909e2078c248986f2fc76f911f72b51ea4a4fbbf472e01d14faa58"}, + {file = "watchfiles-0.21.0-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:ab03a90b305d2588e8352168e8c5a1520b721d2d367f31e9332c4235b30b8994"}, + {file = "watchfiles-0.21.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:927c589500f9f41e370b0125c12ac9e7d3a2fd166b89e9ee2828b3dda20bfe6f"}, + {file = "watchfiles-0.21.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bd467213195e76f838caf2c28cd65e58302d0254e636e7c0fca81efa4a2e62c"}, + {file = "watchfiles-0.21.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02b73130687bc3f6bb79d8a170959042eb56eb3a42df3671c79b428cd73f17cc"}, + {file = "watchfiles-0.21.0-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:08dca260e85ffae975448e344834d765983237ad6dc308231aa16e7933db763e"}, + {file = "watchfiles-0.21.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:3ccceb50c611c433145502735e0370877cced72a6c70fd2410238bcbc7fe51d8"}, + {file = "watchfiles-0.21.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57d430f5fb63fea141ab71ca9c064e80de3a20b427ca2febcbfcef70ff0ce895"}, + {file = "watchfiles-0.21.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dd5fad9b9c0dd89904bbdea978ce89a2b692a7ee8a0ce19b940e538c88a809c"}, + {file = "watchfiles-0.21.0-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:be6dd5d52b73018b21adc1c5d28ac0c68184a64769052dfeb0c5d9998e7f56a2"}, + {file = "watchfiles-0.21.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b3cab0e06143768499384a8a5efb9c4dc53e19382952859e4802f294214f36ec"}, + {file = "watchfiles-0.21.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c6ed10c2497e5fedadf61e465b3ca12a19f96004c15dcffe4bd442ebadc2d85"}, + {file = "watchfiles-0.21.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43babacef21c519bc6631c5fce2a61eccdfc011b4bcb9047255e9620732c8097"}, + {file = "watchfiles-0.21.0.tar.gz", hash = "sha256:c76c635fabf542bb78524905718c39f736a98e5ab25b23ec6d4abede1a85a6a3"}, +] + +[package.dependencies] +anyio = ">=3.0.0" + [[package]] name = "wcwidth" version = "0.2.8" @@ -4788,6 +5908,85 @@ docs = ["Sphinx (>=6.0)", "sphinx-rtd-theme (>=1.1.0)"] optional = ["python-socks", "wsaccel"] test = ["websockets"] +[[package]] +name = "websockets" +version = "11.0.3" +description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "websockets-11.0.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3ccc8a0c387629aec40f2fc9fdcb4b9d5431954f934da3eaf16cdc94f67dbfac"}, + {file = "websockets-11.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d67ac60a307f760c6e65dad586f556dde58e683fab03323221a4e530ead6f74d"}, + {file = "websockets-11.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:84d27a4832cc1a0ee07cdcf2b0629a8a72db73f4cf6de6f0904f6661227f256f"}, + {file = "websockets-11.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffd7dcaf744f25f82190856bc26ed81721508fc5cbf2a330751e135ff1283564"}, + {file = "websockets-11.0.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7622a89d696fc87af8e8d280d9b421db5133ef5b29d3f7a1ce9f1a7bf7fcfa11"}, + {file = "websockets-11.0.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bceab846bac555aff6427d060f2fcfff71042dba6f5fca7dc4f75cac815e57ca"}, + {file = "websockets-11.0.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:54c6e5b3d3a8936a4ab6870d46bdd6ec500ad62bde9e44462c32d18f1e9a8e54"}, + {file = "websockets-11.0.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:41f696ba95cd92dc047e46b41b26dd24518384749ed0d99bea0a941ca87404c4"}, + {file = "websockets-11.0.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:86d2a77fd490ae3ff6fae1c6ceaecad063d3cc2320b44377efdde79880e11526"}, + {file = "websockets-11.0.3-cp310-cp310-win32.whl", hash = "sha256:2d903ad4419f5b472de90cd2d40384573b25da71e33519a67797de17ef849b69"}, + {file = "websockets-11.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:1d2256283fa4b7f4c7d7d3e84dc2ece74d341bce57d5b9bf385df109c2a1a82f"}, + {file = "websockets-11.0.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e848f46a58b9fcf3d06061d17be388caf70ea5b8cc3466251963c8345e13f7eb"}, + {file = "websockets-11.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aa5003845cdd21ac0dc6c9bf661c5beddd01116f6eb9eb3c8e272353d45b3288"}, + {file = "websockets-11.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b58cbf0697721120866820b89f93659abc31c1e876bf20d0b3d03cef14faf84d"}, + {file = "websockets-11.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:660e2d9068d2bedc0912af508f30bbeb505bbbf9774d98def45f68278cea20d3"}, + {file = "websockets-11.0.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c1f0524f203e3bd35149f12157438f406eff2e4fb30f71221c8a5eceb3617b6b"}, + {file = "websockets-11.0.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:def07915168ac8f7853812cc593c71185a16216e9e4fa886358a17ed0fd9fcf6"}, + {file = "websockets-11.0.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b30c6590146e53149f04e85a6e4fcae068df4289e31e4aee1fdf56a0dead8f97"}, + {file = "websockets-11.0.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:619d9f06372b3a42bc29d0cd0354c9bb9fb39c2cbc1a9c5025b4538738dbffaf"}, + {file = "websockets-11.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:01f5567d9cf6f502d655151645d4e8b72b453413d3819d2b6f1185abc23e82dd"}, + {file = "websockets-11.0.3-cp311-cp311-win32.whl", hash = "sha256:e1459677e5d12be8bbc7584c35b992eea142911a6236a3278b9b5ce3326f282c"}, + {file = "websockets-11.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:e7837cb169eca3b3ae94cc5787c4fed99eef74c0ab9506756eea335e0d6f3ed8"}, + {file = "websockets-11.0.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:9f59a3c656fef341a99e3d63189852be7084c0e54b75734cde571182c087b152"}, + {file = "websockets-11.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2529338a6ff0eb0b50c7be33dc3d0e456381157a31eefc561771ee431134a97f"}, + {file = "websockets-11.0.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34fd59a4ac42dff6d4681d8843217137f6bc85ed29722f2f7222bd619d15e95b"}, + {file = "websockets-11.0.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:332d126167ddddec94597c2365537baf9ff62dfcc9db4266f263d455f2f031cb"}, + {file = "websockets-11.0.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:6505c1b31274723ccaf5f515c1824a4ad2f0d191cec942666b3d0f3aa4cb4007"}, + {file = "websockets-11.0.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f467ba0050b7de85016b43f5a22b46383ef004c4f672148a8abf32bc999a87f0"}, + {file = "websockets-11.0.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9d9acd80072abcc98bd2c86c3c9cd4ac2347b5a5a0cae7ed5c0ee5675f86d9af"}, + {file = "websockets-11.0.3-cp37-cp37m-win32.whl", hash = "sha256:e590228200fcfc7e9109509e4d9125eace2042fd52b595dd22bbc34bb282307f"}, + {file = "websockets-11.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:b16fff62b45eccb9c7abb18e60e7e446998093cdcb50fed33134b9b6878836de"}, + {file = "websockets-11.0.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fb06eea71a00a7af0ae6aefbb932fb8a7df3cb390cc217d51a9ad7343de1b8d0"}, + {file = "websockets-11.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8a34e13a62a59c871064dfd8ffb150867e54291e46d4a7cf11d02c94a5275bae"}, + {file = "websockets-11.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4841ed00f1026dfbced6fca7d963c4e7043aa832648671b5138008dc5a8f6d99"}, + {file = "websockets-11.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a073fc9ab1c8aff37c99f11f1641e16da517770e31a37265d2755282a5d28aa"}, + {file = "websockets-11.0.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:68b977f21ce443d6d378dbd5ca38621755f2063d6fdb3335bda981d552cfff86"}, + {file = "websockets-11.0.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1a99a7a71631f0efe727c10edfba09ea6bee4166a6f9c19aafb6c0b5917d09c"}, + {file = "websockets-11.0.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:bee9fcb41db2a23bed96c6b6ead6489702c12334ea20a297aa095ce6d31370d0"}, + {file = "websockets-11.0.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4b253869ea05a5a073ebfdcb5cb3b0266a57c3764cf6fe114e4cd90f4bfa5f5e"}, + {file = "websockets-11.0.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:1553cb82942b2a74dd9b15a018dce645d4e68674de2ca31ff13ebc2d9f283788"}, + {file = "websockets-11.0.3-cp38-cp38-win32.whl", hash = "sha256:f61bdb1df43dc9c131791fbc2355535f9024b9a04398d3bd0684fc16ab07df74"}, + {file = "websockets-11.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:03aae4edc0b1c68498f41a6772d80ac7c1e33c06c6ffa2ac1c27a07653e79d6f"}, + {file = "websockets-11.0.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:777354ee16f02f643a4c7f2b3eff8027a33c9861edc691a2003531f5da4f6bc8"}, + {file = "websockets-11.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8c82f11964f010053e13daafdc7154ce7385ecc538989a354ccc7067fd7028fd"}, + {file = "websockets-11.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3580dd9c1ad0701169e4d6fc41e878ffe05e6bdcaf3c412f9d559389d0c9e016"}, + {file = "websockets-11.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f1a3f10f836fab6ca6efa97bb952300b20ae56b409414ca85bff2ad241d2a61"}, + {file = "websockets-11.0.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:df41b9bc27c2c25b486bae7cf42fccdc52ff181c8c387bfd026624a491c2671b"}, + {file = "websockets-11.0.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:279e5de4671e79a9ac877427f4ac4ce93751b8823f276b681d04b2156713b9dd"}, + {file = "websockets-11.0.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1fdf26fa8a6a592f8f9235285b8affa72748dc12e964a5518c6c5e8f916716f7"}, + {file = "websockets-11.0.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:69269f3a0b472e91125b503d3c0b3566bda26da0a3261c49f0027eb6075086d1"}, + {file = "websockets-11.0.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:97b52894d948d2f6ea480171a27122d77af14ced35f62e5c892ca2fae9344311"}, + {file = "websockets-11.0.3-cp39-cp39-win32.whl", hash = "sha256:c7f3cb904cce8e1be667c7e6fef4516b98d1a6a0635a58a57528d577ac18a128"}, + {file = "websockets-11.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:c792ea4eabc0159535608fc5658a74d1a81020eb35195dd63214dcf07556f67e"}, + {file = "websockets-11.0.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f2e58f2c36cc52d41f2659e4c0cbf7353e28c8c9e63e30d8c6d3494dc9fdedcf"}, + {file = "websockets-11.0.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de36fe9c02995c7e6ae6efe2e205816f5f00c22fd1fbf343d4d18c3d5ceac2f5"}, + {file = "websockets-11.0.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0ac56b661e60edd453585f4bd68eb6a29ae25b5184fd5ba51e97652580458998"}, + {file = "websockets-11.0.3-pp37-pypy37_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e052b8467dd07d4943936009f46ae5ce7b908ddcac3fda581656b1b19c083d9b"}, + {file = "websockets-11.0.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:42cc5452a54a8e46a032521d7365da775823e21bfba2895fb7b77633cce031bb"}, + {file = "websockets-11.0.3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e6316827e3e79b7b8e7d8e3b08f4e331af91a48e794d5d8b099928b6f0b85f20"}, + {file = "websockets-11.0.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8531fdcad636d82c517b26a448dcfe62f720e1922b33c81ce695d0edb91eb931"}, + {file = "websockets-11.0.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c114e8da9b475739dde229fd3bc6b05a6537a88a578358bc8eb29b4030fac9c9"}, + {file = "websockets-11.0.3-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e063b1865974611313a3849d43f2c3f5368093691349cf3c7c8f8f75ad7cb280"}, + {file = "websockets-11.0.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:92b2065d642bf8c0a82d59e59053dd2fdde64d4ed44efe4870fa816c1232647b"}, + {file = "websockets-11.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0ee68fe502f9031f19d495dae2c268830df2760c0524cbac5d759921ba8c8e82"}, + {file = "websockets-11.0.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcacf2c7a6c3a84e720d1bb2b543c675bf6c40e460300b628bab1b1efc7c034c"}, + {file = "websockets-11.0.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b67c6f5e5a401fc56394f191f00f9b3811fe843ee93f4a70df3c389d1adf857d"}, + {file = "websockets-11.0.3-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d5023a4b6a5b183dc838808087033ec5df77580485fc533e7dab2567851b0a4"}, + {file = "websockets-11.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ed058398f55163a79bb9f06a90ef9ccc063b204bb346c4de78efc5d15abfe602"}, + {file = "websockets-11.0.3-py3-none-any.whl", hash = "sha256:6681ba9e7f8f3b19440921e99efbb40fc89f26cd71bf539e45d8c8a25c976dc6"}, + {file = "websockets-11.0.3.tar.gz", hash = "sha256:88fc51d9a26b10fc331be344f1781224a375b78488fc343620184e95a4b27016"}, +] + [[package]] name = "win32-setctime" version = "1.1.0" @@ -4987,7 +6186,25 @@ files = [ idna = ">=2.0" multidict = ">=4.0" +[[package]] +name = "zipp" +version = "3.17.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.8" +files = [ + {file = "zipp-3.17.0-py3-none-any.whl", hash = "sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31"}, + {file = "zipp-3.17.0.tar.gz", hash = "sha256:84e64a1c28cf7e91ed2078bb8cc8c259cb19b76942096c8d7b84947690cabaf0"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"] + +[extras] +benchmark = ["agbenchmark"] + [metadata] lock-version = "2.0" python-versions = "^3.10" -content-hash = "2db7216c26d6a87713b4aabc2652135252f35ba88bd2e7d1cf795b583e3e1c9e" +content-hash = "f6045904ba92bfb92c55fcb8b4e9294899ead53d49aa4e6ae88a0ca2ae0ec454" diff --git a/autogpts/autogpt/pyproject.toml b/autogpts/autogpt/pyproject.toml index 0f7c06e3..9871db54 100644 --- a/autogpts/autogpt/pyproject.toml +++ b/autogpts/autogpt/pyproject.toml @@ -16,11 +16,15 @@ packages = [{ include = "autogpt" }] [tool.poetry.scripts] -autogpt = "autogpt.app.cli:main" +autogpt = "autogpt.app.cli:run" +run = "autogpt.app.cli:run" +serve = "autogpt.app.cli:serve" [tool.poetry.dependencies] python = "^3.10" +# autogpt-forge = { path = "../forge" } +autogpt-forge = {git = "https://github.com/Significant-Gravitas/AutoGPT.git", subdirectory = "autogpts/forge", rev = "10aecec"} beautifulsoup4 = "^4.12.2" charset-normalizer = "^3.1.0" click = "*" @@ -29,9 +33,11 @@ distro = "^1.8.0" docker = "*" duckduckgo-search = "^3.0.2" en-core-web-sm = {url = "https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.5.0/en_core_web_sm-3.5.0-py3-none-any.whl"} +fastapi = "*" ftfy = "^6.1.1" google-api-python-client = "*" gTTS = "^2.3.1" +hypercorn = "^0.14.4" inflection = "*" jsonschema = "*" markdown = "*" @@ -56,17 +62,18 @@ spacy = "^3.0.0" tiktoken = "^0.5.0" webdriver-manager = "*" -# web server -fastapi = "*" -uvicorn = "*" - # OpenAI and Generic plugins import openapi-python-client = "^0.14.0" +# Benchmarking +agbenchmark = { path = "../../benchmark", optional = true } + +[tool.poetry.extras] +benchmark = ["agbenchmark"] + [tool.poetry.group.dev.dependencies] auto-gpt-plugin-template = {git = "https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template", rev = "0.1.0"} black = "*" -coverage = "*" flake8 = "*" gitpython = "^3.1.32" isort = "*" @@ -77,8 +84,9 @@ types-colorama = "*" types-Markdown = "*" types-Pillow = "*" -# Testing dependencies +# Testing asynctest = "*" +coverage = "*" pytest = "*" pytest-asyncio = "*" pytest-benchmark = "*" @@ -89,9 +97,6 @@ pytest-recording = "*" pytest-xdist = "*" vcrpy = {git = "https://github.com/Significant-Gravitas/vcrpy.git", rev = "master"} -[tool.poetry.group.benchmark.dependencies] -agbenchmark = { path = "../../benchmark" } - [build-system] requires = ["poetry-core"] diff --git a/autogpts/autogpt/run b/autogpts/autogpt/run new file mode 100755 index 00000000..b37c053f --- /dev/null +++ b/autogpts/autogpt/run @@ -0,0 +1,10 @@ +#!/bin/sh + +kill $(lsof -t -i :8000) + +if [ ! -f .env ]; then + cp .env.example .env + echo "Please add your api keys to the .env file." >&2 + # exit 1 +fi +poetry run serve --debug diff --git a/autogpts/autogpt/run_benchmark b/autogpts/autogpt/run_benchmark new file mode 100755 index 00000000..72640794 --- /dev/null +++ b/autogpts/autogpt/run_benchmark @@ -0,0 +1,9 @@ +#!/bin/sh + +# Kill processes using port 8080 if any. +if lsof -t -i :8080; then + kill $(lsof -t -i :8080) +fi +# This is the cli entry point for the benchmarking tool. +# To run this in server mode pass in `serve` as the first argument. +poetry run agbenchmark "$@" diff --git a/autogpts/autogpt/setup b/autogpts/autogpt/setup new file mode 100755 index 00000000..511ef727 --- /dev/null +++ b/autogpts/autogpt/setup @@ -0,0 +1,4 @@ +#!/bin/sh + +poetry install --no-interaction --extras benchmark +echo "Setup completed successfully." diff --git a/autogpts/autogpt/tests/challenges/__init__.py b/autogpts/autogpt/tests/challenges/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/autogpts/autogpt/tests/challenges/basic_abilities/__init__.py b/autogpts/autogpt/tests/challenges/basic_abilities/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/autogpts/autogpt/tests/challenges/basic_abilities/goal_oriented_tasks.md b/autogpts/autogpt/tests/challenges/basic_abilities/goal_oriented_tasks.md deleted file mode 100644 index b06b371b..00000000 --- a/autogpts/autogpt/tests/challenges/basic_abilities/goal_oriented_tasks.md +++ /dev/null @@ -1,10 +0,0 @@ -If the goal oriented task pipeline fails, it means: -- you somehow changed the way the system prompt is generated -- or you broke autogpt. - -To know which one, you can run the following command: -```bash -pytest -s -k tests/integration/goal_oriented - -If the test is successful, it will record new cassettes in VCR. Then you can just push these to your branch and the pipeline -will pass diff --git a/autogpts/autogpt/tests/challenges/basic_abilities/test_browse_website.py b/autogpts/autogpt/tests/challenges/basic_abilities/test_browse_website.py deleted file mode 100644 index fafa9ad6..00000000 --- a/autogpts/autogpt/tests/challenges/basic_abilities/test_browse_website.py +++ /dev/null @@ -1,34 +0,0 @@ -import pytest - -from autogpt.workspace import Workspace -from tests.challenges.challenge_decorator.challenge_decorator import challenge -from tests.challenges.utils import run_challenge - -CYCLE_COUNT = 2 -USER_INPUTS = [ - "Use the browse_website command to visit http://books.toscrape.com/catalogue/meditations_33/index.html and answer the question 'What is the price of the book?'\nWrite the price of the book to a file named 'browse_website.txt'.'\nUse the task_complete command to complete the task.\nDo not use any other commands." -] - - -@challenge() -def test_browse_website( - patched_api_requestor: None, - monkeypatch: pytest.MonkeyPatch, - level_to_run: int, - challenge_name: str, - workspace: Workspace, - patched_make_workspace: pytest.fixture, -) -> None: - run_challenge( - challenge_name, - level_to_run, - monkeypatch, - USER_INPUTS[level_to_run - 1], - CYCLE_COUNT, - ) - - file_path = workspace.get_path("browse_website.txt") - - with open(file_path, "r") as file: - content = file.read() - assert "£25.89" in content, f"Expected £25.89, got {content}" diff --git a/autogpts/autogpt/tests/challenges/basic_abilities/test_write_file.py b/autogpts/autogpt/tests/challenges/basic_abilities/test_write_file.py deleted file mode 100644 index 2a202ee3..00000000 --- a/autogpts/autogpt/tests/challenges/basic_abilities/test_write_file.py +++ /dev/null @@ -1,47 +0,0 @@ -import pytest - -from autogpt.config import Config -from autogpt.workspace import Workspace -from tests.challenges.challenge_decorator.challenge_decorator import challenge -from tests.challenges.utils import get_workspace_path, run_challenge - -CYCLE_COUNT_PER_LEVEL = [1, 1] -EXPECTED_OUTPUTS_PER_LEVEL = [ - {"hello_world.txt": ["Hello World"]}, - {"hello_world_1.txt": ["Hello World"], "hello_world_2.txt": ["Hello World"]}, -] -USER_INPUTS = [ - "Write 'Hello World' into a file named \"hello_world.txt\".", - 'Write \'Hello World\' into 2 files named "hello_world_1.txt"and "hello_world_2.txt".', -] - - -@challenge() -def test_write_file( - config: Config, - patched_api_requestor: None, - monkeypatch: pytest.MonkeyPatch, - level_to_run: int, - challenge_name: str, - workspace: Workspace, - patched_make_workspace: pytest.fixture, -) -> None: - run_challenge( - challenge_name, - level_to_run, - monkeypatch, - USER_INPUTS[level_to_run - 1], - CYCLE_COUNT_PER_LEVEL[level_to_run - 1], - ) - - expected_outputs = EXPECTED_OUTPUTS_PER_LEVEL[level_to_run - 1] - - for file_name, expected_lines in expected_outputs.items(): - file_path = get_workspace_path(workspace, file_name) - with open(file_path, "r") as file: - content = file.read() - - for expected_line in expected_lines: - assert ( - expected_line in content - ), f"Expected '{expected_line}' in file {file_name}, but it was not found" diff --git a/autogpts/autogpt/tests/challenges/challenge_decorator/__init__.py b/autogpts/autogpt/tests/challenges/challenge_decorator/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/autogpts/autogpt/tests/challenges/challenge_decorator/challenge.py b/autogpts/autogpt/tests/challenges/challenge_decorator/challenge.py deleted file mode 100644 index e875ac99..00000000 --- a/autogpts/autogpt/tests/challenges/challenge_decorator/challenge.py +++ /dev/null @@ -1,24 +0,0 @@ -from typing import Optional - - -class Challenge: - BEAT_CHALLENGES = False - DEFAULT_CHALLENGE_NAME = "default_challenge_name" - - def __init__( - self, - name: str, - category: str, - max_level: int, - is_new_challenge: bool, - max_level_beaten: Optional[int] = None, - level_to_run: Optional[int] = None, - ) -> None: - self.name = name - self.category = category - self.max_level_beaten = max_level_beaten - self.max_level = max_level - self.succeeded = False - self.skipped = False - self.level_to_run = level_to_run - self.is_new_challenge = is_new_challenge diff --git a/autogpts/autogpt/tests/challenges/challenge_decorator/challenge_decorator.py b/autogpts/autogpt/tests/challenges/challenge_decorator/challenge_decorator.py deleted file mode 100644 index 3d72ff9b..00000000 --- a/autogpts/autogpt/tests/challenges/challenge_decorator/challenge_decorator.py +++ /dev/null @@ -1,84 +0,0 @@ -import os -from functools import wraps -from typing import Any, Callable, Optional - -import pytest - -from tests.challenges.challenge_decorator.challenge import Challenge -from tests.challenges.challenge_decorator.challenge_utils import create_challenge -from tests.challenges.challenge_decorator.score_utils import ( - get_scores, - update_new_score, -) - -MAX_LEVEL_TO_IMPROVE_ON = ( - 1 # we will attempt to beat 1 level above the current level for now. -) - -CHALLENGE_FAILED_MESSAGE = "Challenges can sometimes fail randomly, please run this test again and if it fails reach out to us on https://discord.gg/autogpt in the 'challenges' channel to let us know the challenge you're struggling with." - - -def challenge() -> Callable[[Callable[..., Any]], Callable[..., None]]: - def decorator(func: Callable[..., Any]) -> Callable[..., None]: - @pytest.mark.requires_openai_api_key - @pytest.mark.vcr - @wraps(func) - def wrapper(*args: Any, **kwargs: Any) -> None: - run_remaining = MAX_LEVEL_TO_IMPROVE_ON if Challenge.BEAT_CHALLENGES else 1 - original_error: Optional[Exception] = None - - while run_remaining > 0: - current_score, new_score, new_score_location = get_scores() - level_to_run = ( - kwargs["level_to_run"] if "level_to_run" in kwargs else None - ) - challenge = create_challenge( - func, current_score, Challenge.BEAT_CHALLENGES, level_to_run - ) - if challenge.level_to_run is not None: - kwargs["level_to_run"] = challenge.level_to_run - kwargs["challenge_name"] = challenge.name - try: - func(*args, **kwargs) - challenge.succeeded = True - except AssertionError as err: - original_error = AssertionError( - f"{CHALLENGE_FAILED_MESSAGE}\n{err}" - ) - challenge.succeeded = False - except Exception as err: - original_error = err - challenge.succeeded = False - else: - challenge.skipped = True - if os.environ.get("CI") == "true": - new_max_level_beaten = get_new_max_level_beaten( - challenge, Challenge.BEAT_CHALLENGES - ) - update_new_score( - new_score_location, new_score, challenge, new_max_level_beaten - ) - if challenge.level_to_run is None: - pytest.skip("This test has not been unlocked yet.") - - if not challenge.succeeded: - if Challenge.BEAT_CHALLENGES or challenge.is_new_challenge: - pytest.xfail(str(original_error)) - if original_error: - raise original_error - run_remaining -= 1 - - return wrapper - - return decorator - - -def get_new_max_level_beaten( - challenge: Challenge, beat_challenges: bool -) -> Optional[int]: - if challenge.succeeded: - return challenge.level_to_run - if challenge.skipped: - return challenge.max_level_beaten - # Challenge failed - return challenge.max_level_beaten if beat_challenges else None diff --git a/autogpts/autogpt/tests/challenges/challenge_decorator/challenge_utils.py b/autogpts/autogpt/tests/challenges/challenge_decorator/challenge_utils.py deleted file mode 100644 index 74f4cf56..00000000 --- a/autogpts/autogpt/tests/challenges/challenge_decorator/challenge_utils.py +++ /dev/null @@ -1,85 +0,0 @@ -import os -from typing import Any, Callable, Dict, Optional, Tuple - -from tests.challenges.challenge_decorator.challenge import Challenge - -CHALLENGE_PREFIX = "test_" - - -def create_challenge( - func: Callable[..., Any], - current_score: Dict[str, Any], - is_beat_challenges: bool, - level_to_run: Optional[int] = None, -) -> Challenge: - challenge_category, challenge_name = get_challenge_identifiers(func) - is_new_challenge = challenge_name not in current_score.get(challenge_category, {}) - max_level = get_max_level(current_score, challenge_category, challenge_name) - max_level_beaten = get_max_level_beaten( - current_score, challenge_category, challenge_name - ) - level_to_run = get_level_to_run( - is_beat_challenges, level_to_run, max_level, max_level_beaten, is_new_challenge - ) - - return Challenge( - name=challenge_name, - category=challenge_category, - max_level=max_level, - max_level_beaten=max_level_beaten, - level_to_run=level_to_run, - is_new_challenge=is_new_challenge, - ) - - -def get_level_to_run( - is_beat_challenges: bool, - level_to_run: Optional[int], - max_level: int, - max_level_beaten: Optional[int], - is_new_challenge: bool, -) -> Optional[int]: - if is_new_challenge: - return 1 - if level_to_run is not None: - if level_to_run > max_level: - raise ValueError( - f"Level to run ({level_to_run}) is greater than max level ({max_level})" - ) - return level_to_run - if is_beat_challenges: - if max_level_beaten == max_level: - return None - return 1 if max_level_beaten is None else max_level_beaten + 1 - return max_level_beaten - - -def get_challenge_identifiers(func: Callable[..., Any]) -> Tuple[str, str]: - full_path = os.path.dirname(os.path.abspath(func.__code__.co_filename)) - challenge_category = os.path.basename(full_path) - challenge_name = func.__name__.replace(CHALLENGE_PREFIX, "") - return challenge_category, challenge_name - - -def get_max_level( - current_score: Dict[str, Any], - challenge_category: str, - challenge_name: str, -) -> int: - return ( - current_score.get(challenge_category, {}) - .get(challenge_name, {}) - .get("max_level", 1) - ) - - -def get_max_level_beaten( - current_score: Dict[str, Any], - challenge_category: str, - challenge_name: str, -) -> Optional[int]: - return ( - current_score.get(challenge_category, {}) - .get(challenge_name, {}) - .get("max_level_beaten", None) - ) diff --git a/autogpts/autogpt/tests/challenges/challenge_decorator/score_utils.py b/autogpts/autogpt/tests/challenges/challenge_decorator/score_utils.py deleted file mode 100644 index 1a8be744..00000000 --- a/autogpts/autogpt/tests/challenges/challenge_decorator/score_utils.py +++ /dev/null @@ -1,59 +0,0 @@ -import json -import os -from typing import Any, Dict, Optional, Tuple - -from tests.challenges.challenge_decorator.challenge import Challenge - -CURRENT_SCORE_LOCATION = "../current_score" -NEW_SCORE_LOCATION = "../new_score" - - -def update_new_score( - filename_new_score: str, - new_score: Dict[str, Any], - challenge: Challenge, - new_max_level_beaten: Optional[int], -) -> None: - write_new_score(new_score, challenge, new_max_level_beaten) - write_new_score_to_file(new_score, filename_new_score) - - -def write_new_score( - new_score: Dict[str, Any], challenge: Challenge, new_max_level_beaten: Optional[int] -) -> Dict[str, Any]: - new_score.setdefault(challenge.category, {}) - new_score[challenge.category][challenge.name] = { - "max_level_beaten": new_max_level_beaten, - "max_level": challenge.max_level, - } - return new_score - - -def write_new_score_to_file(new_score: Dict[str, Any], filename: str) -> None: - with open(filename, "w") as file: - json.dump(new_score, file, indent=4) - - -def get_scores() -> Tuple[Dict[str, Any], Dict[str, Any], str]: - filename_current_score, filename_new_score = get_score_locations() - current_score = load_json(filename_current_score) - new_score = load_json(filename_new_score) - return current_score, new_score, filename_new_score - - -def load_json(filename: str) -> Dict[str, Any]: - if os.path.isfile(filename): - with open(filename, "r") as file: - return json.load(file) - else: - return {} - - -def get_score_locations() -> Tuple[str, str]: - pid = os.getpid() - project_root = os.path.dirname(os.path.abspath(__file__)) - filename_current_score = os.path.join( - project_root, f"{CURRENT_SCORE_LOCATION}.json" - ) - filename_new_score = os.path.join(project_root, f"{NEW_SCORE_LOCATION}_{pid}.json") - return filename_current_score, filename_new_score diff --git a/autogpts/autogpt/tests/challenges/conftest.py b/autogpts/autogpt/tests/challenges/conftest.py deleted file mode 100644 index 784dbf71..00000000 --- a/autogpts/autogpt/tests/challenges/conftest.py +++ /dev/null @@ -1,77 +0,0 @@ -from typing import Any, Dict, Generator, Optional - -import pytest -from _pytest.config import Config -from _pytest.config.argparsing import Parser -from _pytest.fixtures import FixtureRequest -from pytest_mock import MockerFixture - -from autogpt.workspace import Workspace -from tests.challenges.challenge_decorator.challenge import Challenge -from tests.vcr import before_record_response - - -def before_record_response_filter_errors( - response: Dict[str, Any] -) -> Optional[Dict[str, Any]]: - """In challenges we don't want to record errors (See issue #4461)""" - if response["status"]["code"] >= 400: - return None - - return before_record_response(response) - - -@pytest.fixture(scope="module") -def vcr_config(get_base_vcr_config: Dict[str, Any]) -> Dict[str, Any]: - # this fixture is called by the pytest-recording vcr decorator. - return get_base_vcr_config | { - "before_record_response": before_record_response_filter_errors, - } - - -def pytest_addoption(parser: Parser) -> None: - parser.addoption( - "--level", action="store", default=None, type=int, help="Specify test level" - ) - parser.addoption( - "--beat-challenges", - action="store_true", - help="Spepcifies whether the test suite should attempt to beat challenges", - ) - - -def pytest_configure(config: Config) -> None: - level = config.getoption("--level", default=None) - config.option.level = level - beat_challenges = config.getoption("--beat-challenges", default=False) - config.option.beat_challenges = beat_challenges - - -@pytest.fixture -def level_to_run(request: FixtureRequest) -> int: - ## used for challenges in the goal oriented tests - return request.config.option.level - - -@pytest.fixture -def challenge_name() -> str: - return Challenge.DEFAULT_CHALLENGE_NAME - - -@pytest.fixture(autouse=True) -def check_beat_challenges(request: FixtureRequest) -> None: - Challenge.BEAT_CHALLENGES = request.config.getoption("--beat-challenges") - - -@pytest.fixture -def patched_make_workspace(mocker: MockerFixture, workspace: Workspace) -> Generator: - def patched_make_workspace(*args: Any, **kwargs: Any) -> str: - return workspace.root - - mocker.patch.object( - Workspace, - "make_workspace", - new=patched_make_workspace, - ) - - yield diff --git a/autogpts/autogpt/tests/challenges/current_score.json b/autogpts/autogpt/tests/challenges/current_score.json deleted file mode 100644 index 524f4fd9..00000000 --- a/autogpts/autogpt/tests/challenges/current_score.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "basic_abilities": { - "browse_website": { - "max_level": 1, - "max_level_beaten": null - }, - "write_file": { - "max_level": 2, - "max_level_beaten": 1 - } - }, - "debug_code": { - "debug_code_challenge_a": { - "max_level": 2, - "max_level_beaten": 1 - } - }, - "information_retrieval": { - "information_retrieval_challenge_a": { - "max_level": 3, - "max_level_beaten": null - }, - "information_retrieval_challenge_b": { - "max_level": 1, - "max_level_beaten": null - }, - "information_retrieval_challenge_c": { - "max_level": 3, - "max_level_beaten": null - } - }, - "kubernetes": { - "kubernetes_template_challenge_a": { - "max_level": 1, - "max_level_beaten": null - } - }, - "memory": { - "memory_challenge_a": { - "max_level": 3, - "max_level_beaten": 3 - }, - "memory_challenge_b": { - "max_level": 5, - "max_level_beaten": null - }, - "memory_challenge_c": { - "max_level": 5, - "max_level_beaten": null - }, - "memory_challenge_d": { - "max_level": 5, - "max_level_beaten": null - } - } -} diff --git a/autogpts/autogpt/tests/challenges/debug_code/data/code.py b/autogpts/autogpt/tests/challenges/debug_code/data/code.py deleted file mode 100644 index df8120bf..00000000 --- a/autogpts/autogpt/tests/challenges/debug_code/data/code.py +++ /dev/null @@ -1,13 +0,0 @@ -# mypy: ignore-errors -from typing import List, Optional - - -def two_sum(nums: List, target: int) -> Optional[List[int]]: - seen = {} - for i, num in enumerate(nums): - typo - complement = target - num - if complement in seen: - return [seen[complement], i] - seen[num] = i - return None diff --git a/autogpts/autogpt/tests/challenges/debug_code/data/test.py b/autogpts/autogpt/tests/challenges/debug_code/data/test.py deleted file mode 100644 index d85d1353..00000000 --- a/autogpts/autogpt/tests/challenges/debug_code/data/test.py +++ /dev/null @@ -1,31 +0,0 @@ -# mypy: ignore-errors -from code import two_sum -from typing import List - - -def test_two_sum(nums: List, target: int, expected_result: List[int]) -> None: - result = two_sum(nums, target) - print(result) - assert ( - result == expected_result - ), f"AssertionError: Expected the output to be {expected_result}" - - -if __name__ == "__main__": - # test the trivial case with the first two numbers - nums = [2, 7, 11, 15] - target = 9 - expected_result = [0, 1] - test_two_sum(nums, target, expected_result) - - # test for ability to use zero and the same number twice - nums = [2, 7, 0, 15, 12, 0] - target = 0 - expected_result = [2, 5] - test_two_sum(nums, target, expected_result) - - # test for first and last index usage and negative numbers - nums = [-6, 7, 11, 4] - target = -2 - expected_result = [0, 3] - test_two_sum(nums, target, expected_result) diff --git a/autogpts/autogpt/tests/challenges/debug_code/test_debug_code_challenge_a.py b/autogpts/autogpt/tests/challenges/debug_code/test_debug_code_challenge_a.py deleted file mode 100644 index 9bd49271..00000000 --- a/autogpts/autogpt/tests/challenges/debug_code/test_debug_code_challenge_a.py +++ /dev/null @@ -1,68 +0,0 @@ -from pathlib import Path - -import pytest -from pytest_mock import MockerFixture - -from autogpt.agents import Agent -from autogpt.commands.execute_code import execute_python_file -from autogpt.workspace import Workspace -from tests.challenges.challenge_decorator.challenge_decorator import challenge -from tests.challenges.utils import ( - copy_file_into_workspace, - get_workspace_path, - run_challenge, -) - -CYCLE_COUNT = 5 -EXPECTED_VALUES = ["[0, 1]", "[2, 5]", "[0, 3]"] -DIRECTORY_PATH = Path(__file__).parent / "data" -CODE_FILE_PATH = "code.py" -TEST_FILE_PATH = "test.py" -USER_INPUTS = [ - "1- Run test.py using the execute_python_file command.\n2- Read code.py using the read_file command.\n3- Modify code.py using the write_to_file command.\nRepeat step 1, 2 and 3 until test.py runs without errors. Do not modify the test.py file.", - "1- Run test.py.\n2- Read code.py.\n3- Modify code.py.\nRepeat step 1, 2 and 3 until test.py runs without errors.\n", - "Make test.py run without errors.", -] - - -@challenge() -def test_debug_code_challenge_a( - dummy_agent: Agent, - monkeypatch: pytest.MonkeyPatch, - patched_api_requestor: MockerFixture, - level_to_run: int, - challenge_name: str, - workspace: Workspace, - patched_make_workspace: pytest.fixture, -) -> None: - """ - Test whether the agent can debug a simple code snippet. - - :param debug_code_agent: The agent to test. - :param monkeypatch: pytest's monkeypatch utility for modifying builtins. - :patched_api_requestor: Sends api requests to our API CI pipeline - :level_to_run: The level to run. - """ - - copy_file_into_workspace(workspace, DIRECTORY_PATH, CODE_FILE_PATH) - copy_file_into_workspace(workspace, DIRECTORY_PATH, TEST_FILE_PATH) - - run_challenge( - challenge_name, - level_to_run, - monkeypatch, - USER_INPUTS[level_to_run - 1], - CYCLE_COUNT, - ) - - output = execute_python_file( - get_workspace_path(workspace, TEST_FILE_PATH), - agent=dummy_agent, - ) - - assert "error" not in output.lower(), f"Errors found in output: {output}!" - - for expected_value in EXPECTED_VALUES: - assert ( - expected_value in output - ), f"Expected output to contain {expected_value}, but it was not found in {output}!" diff --git a/autogpts/autogpt/tests/challenges/information_retrieval/test_information_retrieval_challenge_a.py b/autogpts/autogpt/tests/challenges/information_retrieval/test_information_retrieval_challenge_a.py deleted file mode 100644 index e117dba9..00000000 --- a/autogpts/autogpt/tests/challenges/information_retrieval/test_information_retrieval_challenge_a.py +++ /dev/null @@ -1,50 +0,0 @@ -import pytest -from pytest_mock import MockerFixture - -from autogpt.workspace import Workspace -from tests.challenges.basic_abilities.test_browse_website import USER_INPUTS -from tests.challenges.challenge_decorator.challenge_decorator import challenge -from tests.challenges.utils import get_workspace_path, run_challenge - -CYCLE_COUNT = 3 -EXPECTED_REVENUES = [["81"], ["81"], ["81", "53", "24", "21", "11", "7", "4", "3", "2"]] - -OUTPUT_LOCATION = "output.txt" -USER_INPUTS = [ - "Write to a file called output.txt containing tesla's revenue in 2022 after searching for 'tesla revenue 2022'.", - "Write to a file called output.txt containing tesla's revenue in 2022.", - "Write to a file called output.txt containing tesla's revenue every year since its creation.", -] - - -@challenge() -def test_information_retrieval_challenge_a( - monkeypatch: pytest.MonkeyPatch, - patched_api_requestor: MockerFixture, - level_to_run: int, - challenge_name: str, - workspace: Workspace, - patched_make_workspace: pytest.fixture, -) -> None: - """ - Test the challenge_a function in a given agent by mocking user inputs and checking the output file content. - - :param get_company_revenue_agent: The agent to test. - :param monkeypatch: pytest's monkeypatch utility for modifying builtins. - """ - run_challenge( - challenge_name, - level_to_run, - monkeypatch, - USER_INPUTS[level_to_run - 1], - CYCLE_COUNT, - ) - - file_path = get_workspace_path(workspace, OUTPUT_LOCATION) - with open(file_path, "r") as file: - content = file.read() - expected_revenues = EXPECTED_REVENUES[level_to_run - 1] - for revenue in expected_revenues: - assert ( - f"{revenue}." in content or f"{revenue}," in content - ), f"Expected the file to contain {revenue}" diff --git a/autogpts/autogpt/tests/challenges/information_retrieval/test_information_retrieval_challenge_b.py b/autogpts/autogpt/tests/challenges/information_retrieval/test_information_retrieval_challenge_b.py deleted file mode 100644 index 010afd87..00000000 --- a/autogpts/autogpt/tests/challenges/information_retrieval/test_information_retrieval_challenge_b.py +++ /dev/null @@ -1,52 +0,0 @@ -import pytest -from pytest_mock import MockerFixture - -from autogpt.workspace import Workspace -from tests.challenges.challenge_decorator.challenge_decorator import challenge -from tests.challenges.utils import get_workspace_path, run_challenge - -CYCLE_COUNT = 3 -OUTPUT_LOCATION = "2010_nobel_prize_winners.txt" -USER_INPUTS = [ - "Write to file the winner's name(s), affiliated university, and discovery of the 2010 nobel prize in physics. Write your final answer to 2010_nobel_prize_winners.txt." -] - - -@challenge() -def test_information_retrieval_challenge_b( - monkeypatch: pytest.MonkeyPatch, - patched_api_requestor: MockerFixture, - level_to_run: int, - challenge_name: str, - workspace: Workspace, - patched_make_workspace: pytest.fixture, -) -> None: - """ - Test the challenge_b function in a given agent by mocking user inputs and checking the output file content. - - :param get_nobel_prize_agent: The agent to test. - :param monkeypatch: pytest's monkeypatch utility for modifying builtins. - :param patched_api_requestor: APIRequestor Patch to override the openai.api_requestor module for testing. - :param level_to_run: The level to run. - """ - - run_challenge( - challenge_name, - level_to_run, - monkeypatch, - USER_INPUTS[level_to_run - 1], - CYCLE_COUNT, - ) - - file_path = get_workspace_path(workspace, OUTPUT_LOCATION) - - with open(file_path, "r") as file: - content = file.read() - assert "Andre Geim" in content, "Expected the file to contain Andre Geim" - assert ( - "Konstantin Novoselov" in content - ), "Expected the file to contain Konstantin Novoselov" - assert ( - "University of Manchester" in content - ), "Expected the file to contain University of Manchester" - assert "graphene" in content, "Expected the file to contain graphene" diff --git a/autogpts/autogpt/tests/challenges/information_retrieval/test_information_retrieval_challenge_c.py b/autogpts/autogpt/tests/challenges/information_retrieval/test_information_retrieval_challenge_c.py deleted file mode 100644 index e827967d..00000000 --- a/autogpts/autogpt/tests/challenges/information_retrieval/test_information_retrieval_challenge_c.py +++ /dev/null @@ -1,47 +0,0 @@ -import pytest -from pytest_mock import MockerFixture - -from autogpt.workspace import Workspace -from tests.challenges.challenge_decorator.challenge_decorator import challenge -from tests.challenges.utils import get_workspace_path, run_challenge - -CYCLE_COUNT = 3 -COO = [["Luke Lafreniere"], ["Luke Lafreniere"], ["Luke Lafreniere 2017"]] - -OUTPUT_LOCATION = "output.txt" -USER_INPUTS = [ - "Write to a file called output.txt containing the name and title of the current Chief Operating Officer of Floatplane Media.", - "Write to a file called output.txt containing the name and title of the current Chief Operating Officer of https://www.floatplane.com.", - "Write to a file called output.txt containing the name and title of the current Chief Operating Officer of https://www.floatplane.com and the year it was formed.", -] - - -@challenge() -def test_information_retrieval_challenge_c( - monkeypatch: pytest.MonkeyPatch, - patched_api_requestor: MockerFixture, - level_to_run: int, - challenge_name: str, - workspace: Workspace, - patched_make_workspace: pytest.fixture, -) -> None: - """ - Test the challenge_c function in a given agent by mocking user inputs and checking the output file content. - - :param get_floatplane_ceo_agent: The agent to test. - :param monkeypatch: pytest's monkeypatch utility for modifying builtins. - """ - run_challenge( - challenge_name, - level_to_run, - monkeypatch, - USER_INPUTS[level_to_run - 1], - CYCLE_COUNT, - ) - - file_path = get_workspace_path(workspace, OUTPUT_LOCATION) - with open(file_path, "r") as file: - content = file.read() - coo_name = COO[level_to_run - 1] - for chief in coo_name: - assert chief in content, f"Expected the file to contain {chief}" diff --git a/autogpts/autogpt/tests/challenges/kubernetes/test_kubernetes_template_challenge_a.py b/autogpts/autogpt/tests/challenges/kubernetes/test_kubernetes_template_challenge_a.py deleted file mode 100644 index cd923e67..00000000 --- a/autogpts/autogpt/tests/challenges/kubernetes/test_kubernetes_template_challenge_a.py +++ /dev/null @@ -1,53 +0,0 @@ -from typing import Any, Dict - -import pytest -import yaml -from pytest_mock import MockerFixture - -from autogpt.workspace import Workspace -from tests.challenges.challenge_decorator.challenge_decorator import challenge -from tests.challenges.utils import get_workspace_path, run_challenge - -CYCLE_COUNT = 3 -OUTPUT_LOCATION = "kube.yaml" -USER_INPUTS = ["Write a simple kubernetes deployment file and save it as a kube.yaml."] - - -@challenge() -def test_kubernetes_template_challenge_a( - monkeypatch: pytest.MonkeyPatch, - patched_api_requestor: MockerFixture, - level_to_run: int, - challenge_name: str, - workspace: Workspace, - patched_make_workspace: pytest.fixture, -) -> None: - """ - Test the challenge_a function in a given agent by mocking user inputs - and checking the output file content. - - Args: - kubernetes_agent (Agent) - monkeypatch (pytest.MonkeyPatch) - level_to_run (int) - """ - run_challenge( - challenge_name, - level_to_run, - monkeypatch, - USER_INPUTS[level_to_run - 1], - CYCLE_COUNT, - ) - - file_path = get_workspace_path(workspace, OUTPUT_LOCATION) - with open(file_path, "r") as file: - content_string = file.read() - - for word in ["apiVersion", "kind", "metadata", "spec"]: - assert word in content_string, f"Expected the file to contain {word}" - - yaml_as_dict: Dict[str, Any] = yaml.safe_load(content_string) - for word in ["Service", "Deployment", "Pod"]: - assert word in yaml_as_dict.get( - "kind", "" - ), f"Expected the file to contain {word}" diff --git a/autogpts/autogpt/tests/challenges/memory/__init__.py b/autogpts/autogpt/tests/challenges/memory/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/autogpts/autogpt/tests/challenges/memory/test_memory_challenge_a.py b/autogpts/autogpt/tests/challenges/memory/test_memory_challenge_a.py deleted file mode 100644 index bbd221f4..00000000 --- a/autogpts/autogpt/tests/challenges/memory/test_memory_challenge_a.py +++ /dev/null @@ -1,86 +0,0 @@ -import pytest -from pytest_mock import MockerFixture - -from autogpt.config import Config -from autogpt.workspace import Workspace -from tests.challenges.challenge_decorator.challenge_decorator import challenge -from tests.challenges.utils import get_workspace_path, run_challenge - -OUTPUT_LOCATION = "output.txt" - -USER_INPUT = "Use the command read_file to read the instructions_1.txt file\nFollow the instructions in the instructions_1.txt file" - - -@challenge() -def test_memory_challenge_a( - config: Config, - patched_api_requestor: MockerFixture, - monkeypatch: pytest.MonkeyPatch, - level_to_run: int, - challenge_name: str, - workspace: Workspace, - patched_make_workspace: pytest.fixture, -) -> None: - """ - The agent reads a file containing a task_id. Then, it reads a series of other files. - After reading 'n' files, the agent must write the task_id into a new file. - Args: - workspace (Workspace) - patched_api_requestor (MockerFixture) - monkeypatch (pytest.MonkeyPatch) - level_to_run (int) - """ - task_id = "2314" - create_instructions_files(workspace, level_to_run, task_id) - - run_challenge( - challenge_name, level_to_run, monkeypatch, USER_INPUT, level_to_run + 2 - ) - - file_path = get_workspace_path(workspace, OUTPUT_LOCATION) - with open(file_path, "r") as file: - content = file.read() - assert task_id in content, f"Expected the file to contain {task_id}" - - -def create_instructions_files( - workspace: Workspace, - num_files: int, - task_id: str, - base_filename: str = "instructions_", -) -> None: - """ - Creates a series of instructions files for the memory challenge. - Args: - workspace (Workspace) - num_files (int) - task_id (str) - base_filename (str, optional) - """ - for i in range(1, num_files + 1): - content = generate_content(i, task_id, base_filename, num_files) - file_name = f"{base_filename}{i}.txt" - file_path = get_workspace_path(workspace, file_name) - with open(file_path, "w") as file: - file.write(content) - - -def generate_content( - index: int, task_id: str, base_filename: str, num_files: int -) -> str: - """ - Args: - index: int - task_id: str - base_filename: str - num_files: int - - Returns: str - """ - if index == 1: - return ( - f"This task_id is {task_id}\nRead the file {base_filename}{index + 1}.txt" - ) - if index != num_files: - return f"Read the file {base_filename}{index + 1}.txt" - return "Write the task_id into the file output.txt\nShutdown" diff --git a/autogpts/autogpt/tests/challenges/memory/test_memory_challenge_b.py b/autogpts/autogpt/tests/challenges/memory/test_memory_challenge_b.py deleted file mode 100644 index 30d9b161..00000000 --- a/autogpts/autogpt/tests/challenges/memory/test_memory_challenge_b.py +++ /dev/null @@ -1,86 +0,0 @@ -import pytest -from pytest_mock import MockerFixture - -from autogpt.workspace import Workspace -from tests.challenges.challenge_decorator.challenge_decorator import challenge -from tests.challenges.utils import generate_noise, get_workspace_path, run_challenge - -NOISE = 1000 -OUTPUT_LOCATION = "output.txt" -USER_INPUT = "Use the command read_file to read the instructions_1.txt file\nFollow the instructions in the instructions_1.txt file" - - -@challenge() -def test_memory_challenge_b( - patched_api_requestor: MockerFixture, - monkeypatch: pytest.MonkeyPatch, - level_to_run: int, - challenge_name: str, - workspace: Workspace, - patched_make_workspace: pytest.fixture, -) -> None: - """ - The agent reads a series of files, each containing a task_id and noise. After reading 'n' files, - the agent must write all the task_ids into a new file, filtering out the noise. - - Args: - workspace (Workspace) - patched_api_requestor (MockerFixture) - monkeypatch (pytest.MonkeyPatch) - level_to_run (int) - """ - task_ids = [str(i * 1111) for i in range(1, level_to_run + 1)] - create_instructions_files(workspace, level_to_run, task_ids) - - run_challenge( - challenge_name, level_to_run, monkeypatch, USER_INPUT, level_to_run + 2 - ) - - file_path = get_workspace_path(workspace, OUTPUT_LOCATION) - with open(file_path, "r") as file: - content = file.read() - for task_id in task_ids: - assert task_id in content, f"Expected the file to contain {task_id}" - - -def create_instructions_files( - workspace: Workspace, - level: int, - task_ids: list, - base_filename: str = "instructions_", -) -> None: - """ - Creates a series of instructions files for the memory challenge. - Args: - level: - workspace (Workspace) - num_files (int) - task_ids (list) - base_filename (str, optional) - """ - for i in range(1, level + 1): - content = generate_content(i, task_ids, base_filename, level) - file_name = f"{base_filename}{i}.txt" - file_path = get_workspace_path(workspace, file_name) - - with open(file_path, "w") as file: - file.write(content) - - -def generate_content(index: int, task_ids: list, base_filename: str, level: int) -> str: - """ - Args: - index: int - task_ids: list - base_filename: str - num_files: int - - Returns: str - """ - task_id = task_ids[index - 1] - noise = generate_noise(NOISE) - if index != level: - if level == 1: - return f"{noise}\nThe current task_id is {task_id}.\n{noise}\nWrite all the task_ids into the file output.txt. The file has not been created yet. After that, use the task_complete command." - return f"{noise}\nThe current task_id is {task_id}.\n{noise}\nRead the file {base_filename}{index + 1}.txt using the read_file command." - return f"{noise}\nThis task_id is {task_id}\n{noise}\nWrite all the task_ids into the file output.txt. The file has not been created yet. After that, use the task_complete command.\n" diff --git a/autogpts/autogpt/tests/challenges/memory/test_memory_challenge_c.py b/autogpts/autogpt/tests/challenges/memory/test_memory_challenge_c.py deleted file mode 100644 index db58cd4b..00000000 --- a/autogpts/autogpt/tests/challenges/memory/test_memory_challenge_c.py +++ /dev/null @@ -1,105 +0,0 @@ -import pytest -from pytest_mock import MockerFixture - -from autogpt.commands.file_operations import read_file -from autogpt.workspace import Workspace -from tests.challenges.challenge_decorator.challenge_decorator import challenge -from tests.challenges.utils import generate_noise, get_workspace_path, run_challenge - -NOISE = 1200 -OUTPUT_LOCATION = "output.txt" -USER_INPUT = "Use the command read_file to read the instructions_1.txt file\nFollow the instructions in the instructions_1.txt file" - - -@challenge() -def test_memory_challenge_c( - patched_api_requestor: MockerFixture, - monkeypatch: pytest.MonkeyPatch, - level_to_run: int, - challenge_name: str, - workspace: Workspace, - patched_make_workspace: pytest.fixture, -) -> None: - """ - Instead of reading task Ids from files as with the previous challenges, the agent now must remember - phrases which may have semantically similar meaning and the agent must write the phrases to a file - after seeing several of them. - - Args: - workspace (Workspace) - patched_api_requestor (MockerFixture) - monkeypatch (pytest.MonkeyPatch) - level_to_run (int) - """ - silly_phrases = [ - "The purple elephant danced on a rainbow while eating a taco", - "The sneaky toaster stole my socks and ran away to Hawaii", - "My pet rock sings better than Beyoncé on Tuesdays", - "The giant hamster rode a unicycle through the crowded mall", - "The talking tree gave me a high-five and then flew away", - "I have a collection of invisible hats that I wear on special occasions", - "The flying spaghetti monster stole my sandwich and left a note saying 'thanks for the snack'", - "My imaginary friend is a dragon who loves to play video games", - "I once saw a cloud shaped like a giant chicken eating a pizza", - "The ninja unicorn disguised itself as a potted plant and infiltrated the office", - ] - - level_silly_phrases = silly_phrases[:level_to_run] - create_instructions_files( - workspace, - level_to_run, - level_silly_phrases, - ) - - run_challenge( - challenge_name, level_to_run, monkeypatch, USER_INPUT, level_to_run + 2 - ) - - file_path = get_workspace_path(workspace, OUTPUT_LOCATION) - content = read_file(file_path, agent=workspace) - for phrase in level_silly_phrases: - assert phrase in content, f"Expected the file to contain {phrase}" - - -def create_instructions_files( - workspace: Workspace, - level: int, - task_ids: list, - base_filename: str = "instructions_", -) -> None: - """ - Creates a series of instructions files for the memory challenge. - Args: - level: - workspace (Workspace) - num_files (int) - task_ids (list) - base_filename (str, optional) - """ - for i in range(1, level + 1): - content = generate_content(i, task_ids, base_filename, level) - file_name = f"{base_filename}{i}.txt" - file_path = get_workspace_path(workspace, file_name) - with open(file_path, "w") as file: - file.write(content) - - -def generate_content( - index: int, silly_phrases: list, base_filename: str, level: int -) -> str: - """ - Args: - index: int - task_ids: list - base_filename: str - num_files: int - - Returns: str - """ - phrase = silly_phrases[index - 1] - noise = generate_noise(NOISE) - if index != level: - if level == 1: - return f"{noise}\nThe current phrase to remember is '{phrase}'.\n{noise}\nWrite all the phrases into the file output.txt. The file has not been created yet. After that, use the task_complete command." - return f"{noise}\nThe current phrase is '{phrase}'.\n{noise}\nRead the file {base_filename}{index + 1}.txt using the read_file command." - return f"{noise}\nThis phrase is '{phrase}'\n{noise}\nWrite all the phrases into the file output.txt. The file has not been created yet. After that, use the task_complete command.\n" diff --git a/autogpts/autogpt/tests/challenges/memory/test_memory_challenge_d.py b/autogpts/autogpt/tests/challenges/memory/test_memory_challenge_d.py deleted file mode 100644 index 9cd9c8b0..00000000 --- a/autogpts/autogpt/tests/challenges/memory/test_memory_challenge_d.py +++ /dev/null @@ -1,239 +0,0 @@ -import json -from typing import Dict - -import pytest -from pytest_mock import MockerFixture - -from autogpt.commands.file_operations import read_file -from autogpt.workspace import Workspace -from tests.challenges.challenge_decorator.challenge_decorator import challenge -from tests.challenges.utils import get_workspace_path, run_challenge - -LEVEL_CURRENTLY_BEATEN = 1 -MAX_LEVEL = 5 -OUTPUT_LOCATION = "output.txt" -USER_INPUT = "Use the command read_file to read the instructions_1.txt file\nFollow the instructions in the instructions_1.txt file" - - -@challenge() -def test_memory_challenge_d( - patched_api_requestor: MockerFixture, - monkeypatch: pytest.MonkeyPatch, - level_to_run: int, - challenge_name: str, - workspace: Workspace, - patched_make_workspace: pytest.fixture, -) -> None: - """ - The agent is given a series of events and must remember the respective beliefs of the characters. - Args: - workspace (Workspace) - user_selected_level (int) - """ - sally_anne_test_phrases = [ - "Sally has a marble (marble A) and she puts it in her basket (basket S), then leaves the room. Anne moves marble A from Sally's basket (basket S) to her own basket (basket A).", - "Sally gives a new marble (marble B) to Bob who is outside with her. Bob goes into the room and places marble B into Anne's basket (basket A). Anne tells Bob to tell Sally that he lost the marble b. Bob leaves the room and speaks to Sally about the marble B. Meanwhile, after Bob left the room, Anne moves marble A into the green box, but tells Charlie to tell Sally that marble A is under the sofa. Charlie leaves the room and speaks to Sally about the marble A as instructed by Anne.", - "Sally gives a new marble (marble C) to Charlie who is outside with her. Charlie enters the room and exchanges marble C with marble B in Anne's basket (basket A). Anne tells Charlie to tell Sally that he put marble C into the red box. Charlie leaves the room and speak to Sally about marble C as instructed by Anne. Meanwhile, after Charlie leaves the room, Bob enters into the room and moves marble A from the green box to under the sofa, but tells Anne to tell Sally that marble A is in the green box. Anne leaves the room and speak to Sally about the marble A as instructed by Bob", - "Sally gives a new marble (marble D) to Anne. Anne gives the marble to Charlie. Charlie enters the room and gives marble D to Bob. Bob tells Charlie to tell Sally that he put marble D under the sofa. Bob put marble D under the sofa Charlie leaves the room and speaks to Sally about marble D. Meanwhile, after Charlie leaves the room, Bob takes marble A from under the sofa and places it in the blue box.", - "Sally gives a new marble (marble E) to Charlie who is outside with her. Charlie enters the room and places marble E in the red box. Anne, who is already in the room, takes marble E from the red box, and hides it under the sofa. Then Anne leaves the room and tells Sally that marble E is in the green box. Meanwhile, after Anne leaves the room, Charlie who re-enters the room takes marble D from under the sofa and places it in his own basket (basket C).", - ] - level_sally_anne_test_phrases = sally_anne_test_phrases[:level_to_run] - create_instructions_files(workspace, level_to_run, level_sally_anne_test_phrases) - run_challenge( - challenge_name, level_to_run, monkeypatch, USER_INPUT, level_to_run + 2 - ) - - file_path = get_workspace_path(workspace, OUTPUT_LOCATION) - - content = read_file(file_path, workspace) - check_beliefs(content, level_to_run) - - -def check_beliefs(content: str, level: int) -> None: - # Define the expected beliefs for each level - expected_beliefs = { - 1: { - "Sally": { - "marble A": "basket S", - }, - "Anne": { - "marble A": "basket A", - }, - }, - 2: { - "Sally": { - "marble A": "sofa", # Because Charlie told her - "marble B": "lost", # Because Bob told her - }, - "Anne": { - "marble A": "green box", # Because she moved it there - "marble B": "basket A", # Because Bob put it there and she was in the room - }, - "Bob": { - "marble B": "basket A", # Last place he put it - }, - "Charlie": { - "marble A": "sofa", # Because Anne told him to tell Sally so - }, - }, - 3: { - "Sally": { - "marble A": "green box", # Because Anne told her - "marble C": "red box", # Because Charlie told her - }, - "Anne": { - "marble A": "sofa", # Because Bob moved it there and told her - "marble B": "basket A", # Because Charlie exchanged marble C with marble B in her basket - "marble C": "basket A", # Because Charlie exchanged marble C with marble B in her basket - }, - "Bob": { - "marble A": "sofa", # Because he moved it there - "marble B": "basket A", - # Because Charlie exchanged marble C with marble B in Anne's basket, and he was in the room - "marble C": "basket A", - # Because Charlie exchanged marble C with marble B in Anne's basket, and he was in the room - }, - "Charlie": { - "marble A": "sofa", # Last place he knew it was - "marble B": "basket A", # Because he exchanged marble C with marble B in Anne's basket - "marble C": "red box", # Because Anne told him to tell Sally so - }, - }, - 4: { - "Sally": { - "marble A": "green box", # Because Anne told her in the last conversation - "marble C": "red box", # Because Charlie told her - "marble D": "sofa", # Because Charlie told her - }, - "Anne": { - "marble A": "blue box", # Because Bob moved it there, and she was not in the room to see - "marble B": "basket A", # Last place she knew it was - "marble C": "basket A", # Last place she knew it was - "marble D": "sofa", # Because Bob moved it there, and she was in the room to see - }, - "Bob": { - "marble A": "blue box", # Because he moved it there - "marble B": "basket A", # Last place he knew it was - "marble C": "basket A", # Last place he knew it was - "marble D": "sofa", # Because he moved it there - }, - "Charlie": { - "marble A": "sofa", # Last place he knew it was - "marble B": "basket A", # Last place he knew it was - "marble C": "red box", # Last place he knew it was - "marble D": "sofa", # Because Bob told him to tell Sally so - }, - }, - 5: { - "Sally": { - "marble A": "green box", # Because Anne told her in the last level - "marble C": "red box", # Because Charlie told her - "marble D": "sofa", # Because Charlie told her - "marble E": "green box", # Because Anne told her - }, - "Anne": { - "marble A": "blue box", # Last place she knew it was - "marble B": "basket A", # Last place she knew it was - "marble C": "basket A", # Last place she knew it was - "marble D": "basket C", # Last place she knew it was - "marble E": "sofa", # Because she moved it there - }, - "Charlie": { - "marble A": "blue box", # Last place he knew it was - "marble B": "basket A", # Last place he knew it was - "marble C": "basket A", # Last place he knew it was - "marble D": "basket C", # Because he moved it there - "marble E": "red box", # Last place he knew it was - }, - "Bob": { - "marble A": "blue box", # Last place he knew it was - "marble C": "red box", # Last place he knew it was - "marble D": "sofa", # Last place he knew it was - }, - }, - } - - # Extract the beliefs from the AI's response - ai_beliefs = extract_beliefs(content) - # Check the AI's beliefs against the expected beliefs - for character, belief in expected_beliefs[level].items(): - for marble, location in belief.items(): - ai_belief = ai_beliefs.get(character, {}).get(marble, "") - assert ( - location in ai_belief - ), f"For {character}'s {marble}, expected '{location}' to be in '{ai_belief}'" - - -def extract_beliefs(content: str) -> Dict[str, Dict[str, str]]: - """Extract the beliefs of each character from the AI's output.""" - # Parse the JSON content - content_dict = json.loads(content) - beliefs = content_dict.get("beliefs", {}) - return beliefs - - -def create_instructions_files( - workspace: Workspace, - level: int, - test_phrases: list, - base_filename: str = "instructions_", -) -> None: - """ - Creates a series of instructions files for the memory challenge. - Args: - level: - workspace (Workspace) - test_phrases (list) - base_filename (str, optional) - """ - for i in range(1, level + 1): - content = generate_content(i, test_phrases, base_filename, level) - file_name = f"{base_filename}{i}.txt" - file_path = get_workspace_path(workspace, file_name) - with open(file_path, "w") as file: - file.write(content) - - -def generate_content( - index: int, test_phrases: list, base_filename: str, level: int -) -> str: - """ - Args: - index: int - test_phrases: list - base_filename: str - level: int - Returns: str - """ - phrase = test_phrases[index - 1] - - if index != level: - return f"The current event is '{phrase}'.\nRead the file {base_filename}{index + 1}.txt using the read_file command." - else: - return f""" - This event is '{phrase}' - rules: - 1. Everyone inside the room see and know what are the actions of the people inside the room. - 2. Everyone outside the room do not see and do not know the actions of the people inside the room. - 3. Only write about the marbles that are present in the level - 4. Marbles names are marble A, marble B, marble C, marble D, ... - 5. Believed_location_of_the_specific_marble describes where is the marble like drawer, basket S, sofa, ... - 6. Do not use expression like use instead. - 7. All characters believe what they're being told by other characters. - Instructions: - I) Write the following information in the file output.txt in JSON format: - 1. The respective beliefs of the characters (which means where every marble is according to character x, y, z. Character x should say where it believes every marble it is aware exist is) - The format should be as follows: - {{ - "beliefs": {{ - "": {{ - "": "", - ... - }}, - ... - }}, - }} - Example of output (only use this to understand and learn how to use the format. Nothing else): - {{"beliefs": {{"Sally": {{"marble A": "basket A"}}, "Bob": {{"marble B": "basket S"}}, "Anne": {{"marble A": "green box"}}, "Charlie": {{"marble B": "sofa"}}}} - II) The file output.txt has not been created yet. You need to create it. After that, use the task_complete command. - """ diff --git a/autogpts/autogpt/tests/challenges/schema.py b/autogpts/autogpt/tests/challenges/schema.py deleted file mode 100644 index 9c86772a..00000000 --- a/autogpts/autogpt/tests/challenges/schema.py +++ /dev/null @@ -1,7 +0,0 @@ -from pydantic import BaseModel - - -class Task(BaseModel): - """Jsonifiable representation of a task""" - - user_input: str diff --git a/autogpts/autogpt/tests/challenges/test_challenge_should_be_formatted_properly.py b/autogpts/autogpt/tests/challenges/test_challenge_should_be_formatted_properly.py deleted file mode 100644 index f71bc200..00000000 --- a/autogpts/autogpt/tests/challenges/test_challenge_should_be_formatted_properly.py +++ /dev/null @@ -1,59 +0,0 @@ -import importlib.util -import inspect -import os -from types import ModuleType -from typing import List - -# Path to the challenges folder -CHALLENGES_DIR = os.path.join( - os.path.dirname(os.path.realpath(__file__)), "../challenges" -) - - -def get_python_files(directory: str, exclude_file: str) -> List[str]: - """Recursively get all python files in a directory and subdirectories.""" - python_files: List[str] = [] - for root, dirs, files in os.walk(directory): - for file in files: - if ( - file.endswith(".py") - and file.startswith("test_") - and file != exclude_file - ): - python_files.append(os.path.join(root, file)) - return python_files - - -def load_module_from_file(test_file: str) -> ModuleType: - spec = importlib.util.spec_from_file_location("module.name", test_file) - assert spec is not None, f"Unable to get spec for module in file {test_file}" - module = importlib.util.module_from_spec(spec) - assert ( - spec.loader is not None - ), f"Unable to get loader for module in file {test_file}" - spec.loader.exec_module(module) - return module - - -def get_test_functions(module: ModuleType) -> List: - return [ - o - for o in inspect.getmembers(module) - if inspect.isfunction(o[1]) and o[0].startswith("test_") - ] - - -def assert_single_test_function(functions_list: List, test_file: str) -> None: - assert len(functions_list) == 1, f"{test_file} should contain only one function" - assert ( - functions_list[0][0][5:] == os.path.basename(test_file)[5:-3] - ), f"The function in {test_file} should have the same name as the file without 'test_' prefix" - - -def test_method_name_and_count() -> None: - current_file: str = os.path.basename(__file__) - test_files: List[str] = get_python_files(CHALLENGES_DIR, current_file) - for test_file in test_files: - module = load_module_from_file(test_file) - functions_list = get_test_functions(module) - assert_single_test_function(functions_list, test_file) diff --git a/autogpts/autogpt/tests/challenges/utils.py b/autogpts/autogpt/tests/challenges/utils.py deleted file mode 100644 index 883f3f11..00000000 --- a/autogpts/autogpt/tests/challenges/utils.py +++ /dev/null @@ -1,81 +0,0 @@ -import contextlib -import random -import shutil -from pathlib import Path -from typing import Any, AsyncIterator - -import pytest - -from agbenchmark_config.benchmarks import run_specific_agent -from autogpt.logs import LogCycleHandler -from autogpt.workspace import Workspace -from tests.challenges.schema import Task - - -def generate_noise(noise_size: int) -> str: - random.seed(42) - return "".join( - random.choices( - "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789", - k=noise_size, - ) - ) - - -def setup_mock_input(monkeypatch: pytest.MonkeyPatch, cycle_count: int) -> None: - """ - Sets up the mock input for testing. - - :param monkeypatch: pytest's monkeypatch utility for modifying builtins. - :param cycle_count: The number of cycles to mock. - """ - input_sequence = ["y"] * (cycle_count) + ["EXIT"] - - async def input_generator() -> AsyncIterator[str]: - """ - Creates a generator that yields input strings from the given sequence. - """ - for input in input_sequence: - yield input - - gen = input_generator() - monkeypatch.setattr( - "autogpt.app.utils.session.prompt_async", lambda _, **kwargs: anext(gen) - ) - - -def setup_mock_log_cycle_agent_name( - monkeypatch: pytest.MonkeyPatch, challenge_name: str, level_to_run: int -) -> None: - def mock_get_agent_short_name(*args: Any, **kwargs: Any) -> str: - return f"{challenge_name}_level_{level_to_run}" - - monkeypatch.setattr( - LogCycleHandler, "get_agent_short_name", mock_get_agent_short_name - ) - - -def get_workspace_path(workspace: Workspace, file_name: str) -> str: - return str(workspace.get_path(file_name)) - - -def copy_file_into_workspace( - workspace: Workspace, directory_path: Path, file_path: str -) -> None: - workspace_code_file_path = get_workspace_path(workspace, file_path) - code_file_path = directory_path / file_path - shutil.copy(code_file_path, workspace_code_file_path) - - -def run_challenge( - challenge_name: str, - level_to_run: int, - monkeypatch: pytest.MonkeyPatch, - user_input: str, - cycle_count: int, -) -> None: - setup_mock_input(monkeypatch, cycle_count) - setup_mock_log_cycle_agent_name(monkeypatch, challenge_name, level_to_run) - task = Task(user_input=user_input) - with contextlib.suppress(SystemExit): - run_specific_agent(task.user_input) diff --git a/autogpts/autogpt/tests/challenges/utils/build_current_score.py b/autogpts/autogpt/tests/challenges/utils/build_current_score.py deleted file mode 100644 index b8e75242..00000000 --- a/autogpts/autogpt/tests/challenges/utils/build_current_score.py +++ /dev/null @@ -1,44 +0,0 @@ -import glob -import json -import os -from typing import Any, Dict - - -def deep_merge(source: Dict[Any, Any], dest: Dict[Any, Any]) -> Dict[Any, Any]: - for key, value in source.items(): - if isinstance(value, Dict): - dest[key] = deep_merge(value, dest.get(key, {})) - else: - dest[key] = value - return dest - - -import collections - - -def recursive_sort_dict(data: dict) -> dict: - for key, value in data.items(): - if isinstance(value, dict): - data[key] = recursive_sort_dict(value) - return collections.OrderedDict(sorted(data.items())) - - # setup - - -cwd = os.getcwd() # get current working directory -new_score_filename_pattern = os.path.join(cwd, "tests/challenges/new_score_*.json") -current_score_filename = os.path.join(cwd, "tests/challenges/current_score.json") - -merged_data: Dict[str, Any] = {} -for filename in glob.glob(new_score_filename_pattern): - with open(filename, "r") as f_new: - data = json.load(f_new) - merged_data = deep_merge( - data, merged_data - ) # deep merge the new data with the merged data - os.remove(filename) # remove the individual file -sorted_data = recursive_sort_dict(merged_data) - -with open(current_score_filename, "w") as f_current: - json_data = json.dumps(sorted_data, indent=4) - f_current.write(json_data + "\n") diff --git a/autogpts/autogpt/tests/conftest.py b/autogpts/autogpt/tests/conftest.py index b0c84c82..460c0a66 100644 --- a/autogpts/autogpt/tests/conftest.py +++ b/autogpts/autogpt/tests/conftest.py @@ -8,13 +8,13 @@ from pytest_mock import MockerFixture from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings from autogpt.app.main import _configure_openai_provider -from autogpt.config import AIConfig, Config, ConfigBuilder +from autogpt.config import AIProfile, Config, ConfigBuilder from autogpt.core.resource.model_providers import ChatModelProvider, OpenAIProvider +from autogpt.file_workspace import FileWorkspace from autogpt.llm.api_manager import ApiManager from autogpt.logs.config import configure_logging from autogpt.memory.vector import get_memory from autogpt.models.command_registry import CommandRegistry -from autogpt.workspace import Workspace pytest_plugins = [ "tests.integration.agent_factory", @@ -24,21 +24,37 @@ pytest_plugins = [ @pytest.fixture() -def workspace_root(tmp_path: Path) -> Path: - return tmp_path / "home/users/monty/auto_gpt_workspace" +def tmp_project_root(tmp_path: Path) -> Path: + return tmp_path @pytest.fixture() -def workspace(workspace_root: Path) -> Workspace: - workspace_root = Workspace.make_workspace(workspace_root) - return Workspace(workspace_root, restrict_to_workspace=True) +def app_data_dir(tmp_project_root: Path) -> Path: + return tmp_project_root / "data" + + +@pytest.fixture() +def agent_data_dir(app_data_dir: Path) -> Path: + return app_data_dir / "agents/AutoGPT" + + +@pytest.fixture() +def workspace_root(agent_data_dir: Path) -> Path: + return agent_data_dir / "workspace" + + +@pytest.fixture() +def workspace(workspace_root: Path) -> FileWorkspace: + workspace = FileWorkspace(workspace_root, restrict_to_root=True) + workspace.initialize() + return workspace @pytest.fixture def temp_plugins_config_file(): """Create a plugins_config.yaml file in a temp directory so that it doesn't mess with existing ones""" config_directory = TemporaryDirectory() - config_file = os.path.join(config_directory.name, "plugins_config.yaml") + config_file = Path(config_directory.name) / "plugins_config.yaml" with open(config_file, "w+") as f: f.write(yaml.dump({})) @@ -46,12 +62,17 @@ def temp_plugins_config_file(): @pytest.fixture() -def config(temp_plugins_config_file: str, mocker: MockerFixture, workspace: Workspace): - config = ConfigBuilder.build_config_from_env(workspace.root.parent) +def config( + temp_plugins_config_file: Path, + tmp_project_root: Path, + app_data_dir: Path, + mocker: MockerFixture, +): + config = ConfigBuilder.build_config_from_env(project_root=tmp_project_root) if not os.environ.get("OPENAI_API_KEY"): os.environ["OPENAI_API_KEY"] = "sk-dummy" - config.workspace_path = workspace.root + config.app_data_dir = app_data_dir config.plugins_dir = "tests/unit/data/test_plugins" config.plugins_config_file = temp_plugins_config_file @@ -63,23 +84,20 @@ def config(temp_plugins_config_file: str, mocker: MockerFixture, workspace: Work from autogpt.plugins.plugins_config import PluginsConfig config.plugins_config = PluginsConfig.load_config( - plugins_config_file=config.workdir / config.plugins_config_file, + plugins_config_file=config.plugins_config_file, plugins_denylist=config.plugins_denylist, plugins_allowlist=config.plugins_allowlist, ) - - # Do a little setup and teardown since the config object is a singleton - mocker.patch.multiple( - config, - workspace_path=workspace.root, - file_logger_path=workspace.get_path("file_logger.log"), - ) yield config @pytest.fixture(scope="session") def setup_logger(config: Config): - configure_logging(config, Path(__file__).parent / "logs") + configure_logging( + debug_mode=config.debug_mode, + plain_output=config.plain_output, + log_dir=Path(__file__).parent / "logs", + ) @pytest.fixture() @@ -95,17 +113,16 @@ def llm_provider(config: Config) -> OpenAIProvider: @pytest.fixture -def agent(config: Config, llm_provider: ChatModelProvider) -> Agent: - ai_config = AIConfig( +def agent( + agent_data_dir: Path, config: Config, llm_provider: ChatModelProvider +) -> Agent: + ai_profile = AIProfile( ai_name="Base", ai_role="A base AI", ai_goals=[], ) command_registry = CommandRegistry() - config.memory_backend = "json_file" - memory_json_file = get_memory(config) - memory_json_file.clear() agent_prompt_config = Agent.default_settings.prompt_config.copy(deep=True) agent_prompt_config.use_functions_api = config.openai_functions @@ -113,10 +130,11 @@ def agent(config: Config, llm_provider: ChatModelProvider) -> Agent: agent_settings = AgentSettings( name=Agent.default_settings.name, description=Agent.default_settings.description, - ai_config=ai_config, + ai_profile=ai_profile, config=AgentConfiguration( fast_llm=config.fast_llm, smart_llm=config.smart_llm, + allow_fs_access=not config.restrict_to_workspace, use_functions_api=config.openai_functions, plugins=config.plugins, ), @@ -124,10 +142,11 @@ def agent(config: Config, llm_provider: ChatModelProvider) -> Agent: history=Agent.default_settings.history.copy(deep=True), ) - return Agent( + agent = Agent( settings=agent_settings, llm_provider=llm_provider, command_registry=command_registry, - memory=memory_json_file, legacy_config=config, ) + agent.attach_fs(agent_data_dir) + return agent diff --git a/autogpts/autogpt/tests/integration/agent_factory.py b/autogpts/autogpt/tests/integration/agent_factory.py index dd3abe94..dfff73b9 100644 --- a/autogpts/autogpt/tests/integration/agent_factory.py +++ b/autogpts/autogpt/tests/integration/agent_factory.py @@ -1,7 +1,7 @@ import pytest from autogpt.agents.agent import Agent, AgentConfiguration, AgentSettings -from autogpt.config import AIConfig, Config +from autogpt.config import AIProfile, Config from autogpt.memory.vector import get_memory from autogpt.models.command_registry import CommandRegistry @@ -22,7 +22,7 @@ def memory_json_file(config: Config): def dummy_agent(config: Config, llm_provider, memory_json_file): command_registry = CommandRegistry() - ai_config = AIConfig( + ai_profile = AIProfile( ai_name="Dummy Agent", ai_role="Dummy Role", ai_goals=[ @@ -35,7 +35,7 @@ def dummy_agent(config: Config, llm_provider, memory_json_file): agent_settings = AgentSettings( name=Agent.default_settings.name, description=Agent.default_settings.description, - ai_config=ai_config, + ai_profile=ai_profile, config=AgentConfiguration( fast_llm=config.fast_llm, smart_llm=config.smart_llm, @@ -50,7 +50,6 @@ def dummy_agent(config: Config, llm_provider, memory_json_file): settings=agent_settings, llm_provider=llm_provider, command_registry=command_registry, - memory=memory_json_file, legacy_config=config, ) diff --git a/autogpts/autogpt/tests/integration/memory/test_json_file_memory.py b/autogpts/autogpt/tests/integration/memory/_test_json_file_memory.py similarity index 92% rename from autogpts/autogpt/tests/integration/memory/test_json_file_memory.py rename to autogpts/autogpt/tests/integration/memory/_test_json_file_memory.py index 76f867e2..d8e82c69 100644 --- a/autogpts/autogpt/tests/integration/memory/test_json_file_memory.py +++ b/autogpts/autogpt/tests/integration/memory/_test_json_file_memory.py @@ -4,11 +4,13 @@ import orjson import pytest from autogpt.config import Config +from autogpt.file_workspace import FileWorkspace from autogpt.memory.vector import JSONFileMemory, MemoryItem -from autogpt.workspace import Workspace -def test_json_memory_init_without_backing_file(config: Config, workspace: Workspace): +def test_json_memory_init_without_backing_file( + config: Config, workspace: FileWorkspace +): index_file = workspace.root / f"{config.memory_index}.json" assert not index_file.exists() @@ -17,7 +19,9 @@ def test_json_memory_init_without_backing_file(config: Config, workspace: Worksp assert index_file.read_text() == "[]" -def test_json_memory_init_with_backing_empty_file(config: Config, workspace: Workspace): +def test_json_memory_init_with_backing_empty_file( + config: Config, workspace: FileWorkspace +): index_file = workspace.root / f"{config.memory_index}.json" index_file.touch() @@ -28,7 +32,7 @@ def test_json_memory_init_with_backing_empty_file(config: Config, workspace: Wor def test_json_memory_init_with_backing_invalid_file( - config: Config, workspace: Workspace + config: Config, workspace: FileWorkspace ): index_file = workspace.root / f"{config.memory_index}.json" index_file.touch() diff --git a/autogpts/autogpt/tests/integration/test_execute_code.py b/autogpts/autogpt/tests/integration/test_execute_code.py index 3049d01e..cc281a0d 100644 --- a/autogpts/autogpt/tests/integration/test_execute_code.py +++ b/autogpts/autogpt/tests/integration/test_execute_code.py @@ -11,7 +11,6 @@ from autogpt.agents.utils.exceptions import ( InvalidArgumentError, OperationNotAllowedError, ) -from autogpt.config import Config @pytest.fixture @@ -20,8 +19,8 @@ def random_code(random_string) -> str: @pytest.fixture -def python_test_file(config: Config, random_code: str): - temp_file = tempfile.NamedTemporaryFile(dir=config.workspace_path, suffix=".py") +def python_test_file(agent: Agent, random_code: str): + temp_file = tempfile.NamedTemporaryFile(dir=agent.workspace.root, suffix=".py") temp_file.write(str.encode(random_code)) temp_file.flush() @@ -30,8 +29,8 @@ def python_test_file(config: Config, random_code: str): @pytest.fixture -def python_test_args_file(config: Config): - temp_file = tempfile.NamedTemporaryFile(dir=config.workspace_path, suffix=".py") +def python_test_args_file(agent: Agent): + temp_file = tempfile.NamedTemporaryFile(dir=agent.workspace.root, suffix=".py") temp_file.write(str.encode("import sys\nprint(sys.argv[1], sys.argv[2])")) temp_file.flush() diff --git a/autogpts/autogpt/tests/integration/test_setup.py b/autogpts/autogpt/tests/integration/test_setup.py index 91a39dc1..3c66e257 100644 --- a/autogpts/autogpt/tests/integration/test_setup.py +++ b/autogpts/autogpt/tests/integration/test_setup.py @@ -2,78 +2,69 @@ from unittest.mock import patch import pytest -from autogpt.app.setup import generate_aiconfig_automatic, interactive_ai_config_setup -from autogpt.config.ai_config import AIConfig +from autogpt.app.setup import ( + apply_overrides_to_ai_settings, + interactively_revise_ai_settings, +) +from autogpt.config import AIDirectives, Config +from autogpt.config.ai_profile import AIProfile -@pytest.mark.vcr -@pytest.mark.requires_openai_api_key -async def test_generate_aiconfig_automatic_default( - patched_api_requestor, config, llm_provider -): - user_inputs = [""] - with patch("autogpt.app.utils.session.prompt", side_effect=user_inputs): - ai_config = await interactive_ai_config_setup(config, llm_provider) +@pytest.mark.asyncio +async def test_apply_overrides_to_ai_settings(): + ai_profile = AIProfile(ai_name="Test AI", ai_role="Test Role") + directives = AIDirectives( + resources=["Resource1"], + constraints=["Constraint1"], + best_practices=["BestPractice1"], + ) - assert isinstance(ai_config, AIConfig) - assert ai_config.ai_name is not None - assert ai_config.ai_role is not None - assert 1 <= len(ai_config.ai_goals) <= 5 + apply_overrides_to_ai_settings( + ai_profile, + directives, + override_name="New AI", + override_role="New Role", + replace_directives=True, + resources=["NewResource"], + constraints=["NewConstraint"], + best_practices=["NewBestPractice"], + ) + + assert ai_profile.ai_name == "New AI" + assert ai_profile.ai_role == "New Role" + assert directives.resources == ["NewResource"] + assert directives.constraints == ["NewConstraint"] + assert directives.best_practices == ["NewBestPractice"] -@pytest.mark.vcr -@pytest.mark.requires_openai_api_key -async def test_generate_aiconfig_automatic_typical( - patched_api_requestor, config, llm_provider -): - user_prompt = "Help me create a rock opera about cybernetic giraffes" - ai_config = await generate_aiconfig_automatic(user_prompt, config, llm_provider) +@pytest.mark.asyncio +async def test_interactively_revise_ai_settings(config: Config): + ai_profile = AIProfile(ai_name="Test AI", ai_role="Test Role") + directives = AIDirectives( + resources=["Resource1"], + constraints=["Constraint1"], + best_practices=["BestPractice1"], + ) - assert isinstance(ai_config, AIConfig) - assert ai_config.ai_name is not None - assert ai_config.ai_role is not None - assert 1 <= len(ai_config.ai_goals) <= 5 - - -@pytest.mark.vcr -@pytest.mark.requires_openai_api_key -async def test_generate_aiconfig_automatic_fallback( - patched_api_requestor, config, llm_provider -): user_inputs = [ - "T&GF£OIBECC()!*", - "Chef-GPT", - "an AI designed to browse bake a cake.", - "Purchase ingredients", - "Bake a cake", + "n", + "New AI", + "New Role", + "NewConstraint", "", + "NewResource", "", + "NewBestPractice", + "", + "y", ] - with patch("autogpt.app.utils.session.prompt", side_effect=user_inputs): - ai_config = await interactive_ai_config_setup(config, llm_provider) + with patch("autogpt.app.setup.clean_input", side_effect=user_inputs): + ai_profile, directives = await interactively_revise_ai_settings( + ai_profile, directives, config + ) - assert isinstance(ai_config, AIConfig) - assert ai_config.ai_name == "Chef-GPT" - assert ai_config.ai_role == "an AI designed to browse bake a cake." - assert ai_config.ai_goals == ["Purchase ingredients", "Bake a cake"] - - -@pytest.mark.vcr -@pytest.mark.requires_openai_api_key -async def test_prompt_user_manual_mode(patched_api_requestor, config, llm_provider): - user_inputs = [ - "--manual", - "Chef-GPT", - "an AI designed to browse bake a cake.", - "Purchase ingredients", - "Bake a cake", - "", - "", - ] - with patch("autogpt.app.utils.session.prompt", side_effect=user_inputs): - ai_config = await interactive_ai_config_setup(config, llm_provider) - - assert isinstance(ai_config, AIConfig) - assert ai_config.ai_name == "Chef-GPT" - assert ai_config.ai_role == "an AI designed to browse bake a cake." - assert ai_config.ai_goals == ["Purchase ingredients", "Bake a cake"] + assert ai_profile.ai_name == "New AI" + assert ai_profile.ai_role == "New Role" + assert directives.resources == ["NewResource"] + assert directives.constraints == ["NewConstraint"] + assert directives.best_practices == ["NewBestPractice"] diff --git a/autogpts/autogpt/tests/unit/test_ai_config.py b/autogpts/autogpt/tests/unit/test_ai_config.py deleted file mode 100644 index 6c999c2d..00000000 --- a/autogpts/autogpt/tests/unit/test_ai_config.py +++ /dev/null @@ -1,70 +0,0 @@ -from autogpt.config.ai_config import AIConfig - -""" -Test cases for the AIConfig class, which handles loads the AI configuration -settings from a YAML file. -""" - - -def test_goals_are_always_lists_of_strings(tmp_path): - """Test if the goals attribute is always a list of strings.""" - - yaml_content = """ -ai_goals: -- Goal 1: Make a sandwich -- Goal 2, Eat the sandwich -- Goal 3 - Go to sleep -- "Goal 4: Wake up" -ai_name: McFamished -ai_role: A hungry AI -api_budget: 0.0 -""" - ai_settings_file = tmp_path / "ai_settings.yaml" - ai_settings_file.write_text(yaml_content) - - ai_config = AIConfig.load(ai_settings_file) - - assert len(ai_config.ai_goals) == 4 - assert ai_config.ai_goals[0] == "Goal 1: Make a sandwich" - assert ai_config.ai_goals[1] == "Goal 2, Eat the sandwich" - assert ai_config.ai_goals[2] == "Goal 3 - Go to sleep" - assert ai_config.ai_goals[3] == "Goal 4: Wake up" - - ai_settings_file.write_text("") - ai_config.save(ai_settings_file) - - yaml_content2 = """ai_goals: -- 'Goal 1: Make a sandwich' -- Goal 2, Eat the sandwich -- Goal 3 - Go to sleep -- 'Goal 4: Wake up' -ai_name: McFamished -ai_role: A hungry AI -api_budget: 0.0 -""" - assert ai_settings_file.read_text() == yaml_content2 - - -def test_ai_config_file_not_exists(workspace): - """Test if file does not exist.""" - - ai_settings_file = workspace.get_path("ai_settings.yaml") - - ai_config = AIConfig.load(str(ai_settings_file)) - assert ai_config.ai_name == "" - assert ai_config.ai_role == "" - assert ai_config.ai_goals == [] - assert ai_config.api_budget == 0.0 - - -def test_ai_config_file_is_empty(workspace): - """Test if file does not exist.""" - - ai_settings_file = workspace.get_path("ai_settings.yaml") - ai_settings_file.write_text("") - - ai_config = AIConfig.load(str(ai_settings_file)) - assert ai_config.ai_name == "" - assert ai_config.ai_role == "" - assert ai_config.ai_goals == [] - assert ai_config.api_budget == 0.0 diff --git a/autogpts/autogpt/tests/unit/test_ai_profile.py b/autogpts/autogpt/tests/unit/test_ai_profile.py new file mode 100644 index 00000000..ae31b933 --- /dev/null +++ b/autogpts/autogpt/tests/unit/test_ai_profile.py @@ -0,0 +1,70 @@ +from autogpt.config.ai_profile import AIProfile + +""" +Test cases for the AIProfile class, which handles loads the AI configuration +settings from a YAML file. +""" + + +def test_goals_are_always_lists_of_strings(tmp_path): + """Test if the goals attribute is always a list of strings.""" + + yaml_content = """ +ai_goals: +- Goal 1: Make a sandwich +- Goal 2, Eat the sandwich +- Goal 3 - Go to sleep +- "Goal 4: Wake up" +ai_name: McFamished +ai_role: A hungry AI +api_budget: 0.0 +""" + ai_settings_file = tmp_path / "ai_settings.yaml" + ai_settings_file.write_text(yaml_content) + + ai_profile = AIProfile.load(ai_settings_file) + + assert len(ai_profile.ai_goals) == 4 + assert ai_profile.ai_goals[0] == "Goal 1: Make a sandwich" + assert ai_profile.ai_goals[1] == "Goal 2, Eat the sandwich" + assert ai_profile.ai_goals[2] == "Goal 3 - Go to sleep" + assert ai_profile.ai_goals[3] == "Goal 4: Wake up" + + ai_settings_file.write_text("") + ai_profile.save(ai_settings_file) + + yaml_content2 = """ai_goals: +- 'Goal 1: Make a sandwich' +- Goal 2, Eat the sandwich +- Goal 3 - Go to sleep +- 'Goal 4: Wake up' +ai_name: McFamished +ai_role: A hungry AI +api_budget: 0.0 +""" + assert ai_settings_file.read_text() == yaml_content2 + + +def test_ai_profile_file_not_exists(workspace): + """Test if file does not exist.""" + + ai_settings_file = workspace.get_path("ai_settings.yaml") + + ai_profile = AIProfile.load(str(ai_settings_file)) + assert ai_profile.ai_name == "" + assert ai_profile.ai_role == "" + assert ai_profile.ai_goals == [] + assert ai_profile.api_budget == 0.0 + + +def test_ai_profile_file_is_empty(workspace): + """Test if file does not exist.""" + + ai_settings_file = workspace.get_path("ai_settings.yaml") + ai_settings_file.write_text("") + + ai_profile = AIProfile.load(str(ai_settings_file)) + assert ai_profile.ai_name == "" + assert ai_profile.ai_role == "" + assert ai_profile.ai_goals == [] + assert ai_profile.api_budget == 0.0 diff --git a/autogpts/autogpt/tests/unit/test_config.py b/autogpts/autogpt/tests/unit/test_config.py index b851d559..8fc85262 100644 --- a/autogpts/autogpt/tests/unit/test_config.py +++ b/autogpts/autogpt/tests/unit/test_config.py @@ -9,18 +9,18 @@ from unittest.mock import patch import pytest -from autogpt.app.configurator import GPT_3_MODEL, GPT_4_MODEL, create_config +from autogpt.app.configurator import GPT_3_MODEL, GPT_4_MODEL, apply_overrides_to_config from autogpt.config import Config, ConfigBuilder -from autogpt.workspace.workspace import Workspace +from autogpt.file_workspace import FileWorkspace def test_initial_values(config: Config) -> None: """ Test if the initial values of the config class attributes are set correctly. """ - assert config.debug_mode == False - assert config.continuous_mode == False - assert config.speak_mode == False + assert config.debug_mode is False + assert config.continuous_mode is False + assert config.tts_config.speak_mode is False assert config.fast_llm == "gpt-3.5-turbo-16k" assert config.smart_llm == "gpt-4-0314" @@ -33,7 +33,7 @@ def test_set_continuous_mode(config: Config) -> None: continuous_mode = config.continuous_mode config.continuous_mode = True - assert config.continuous_mode == True + assert config.continuous_mode is True # Reset continuous mode config.continuous_mode = continuous_mode @@ -44,13 +44,13 @@ def test_set_speak_mode(config: Config) -> None: Test if the set_speak_mode() method updates the speak_mode attribute. """ # Store speak mode to reset it after the test - speak_mode = config.speak_mode + speak_mode = config.tts_config.speak_mode - config.speak_mode = True - assert config.speak_mode == True + config.tts_config.speak_mode = True + assert config.tts_config.speak_mode is True # Reset speak mode - config.speak_mode = speak_mode + config.tts_config.speak_mode = speak_mode def test_set_fast_llm(config: Config) -> None: @@ -89,7 +89,7 @@ def test_set_debug_mode(config: Config) -> None: debug_mode = config.debug_mode config.debug_mode = True - assert config.debug_mode == True + assert config.debug_mode is True # Reset debug mode config.debug_mode = debug_mode @@ -98,7 +98,7 @@ def test_set_debug_mode(config: Config) -> None: @patch("openai.Model.list") def test_smart_and_fast_llms_set_to_gpt4(mock_list_models: Any, config: Config) -> None: """ - Test if models update to gpt-3.5-turbo if both are set to gpt-4. + Test if models update to gpt-3.5-turbo if gpt-4 is not available. """ fast_llm = config.fast_llm smart_llm = config.smart_llm @@ -108,21 +108,10 @@ def test_smart_and_fast_llms_set_to_gpt4(mock_list_models: Any, config: Config) mock_list_models.return_value = {"data": [{"id": "gpt-3.5-turbo"}]} - create_config( + apply_overrides_to_config( config=config, - continuous=False, - continuous_limit=False, - ai_settings_file="", - prompt_settings_file="", - skip_reprompt=False, - speak=False, - debug=False, gpt3only=False, gpt4only=False, - memory_type="", - browser_name="", - allow_downloads=False, - skip_news=False, ) assert config.fast_llm == "gpt-3.5-turbo" @@ -133,13 +122,13 @@ def test_smart_and_fast_llms_set_to_gpt4(mock_list_models: Any, config: Config) config.smart_llm = smart_llm -def test_missing_azure_config(workspace: Workspace) -> None: +def test_missing_azure_config(workspace: FileWorkspace) -> None: config_file = workspace.get_path("azure_config.yaml") with pytest.raises(FileNotFoundError): - ConfigBuilder.load_azure_config(str(config_file)) + ConfigBuilder.load_azure_config(config_file) config_file.write_text("") - azure_config = ConfigBuilder.load_azure_config(str(config_file)) + azure_config = ConfigBuilder.load_azure_config(config_file) assert azure_config["openai_api_type"] == "azure" assert azure_config["openai_api_base"] == "" @@ -147,9 +136,9 @@ def test_missing_azure_config(workspace: Workspace) -> None: assert azure_config["azure_model_to_deployment_id_map"] == {} -def test_azure_config(config: Config, workspace: Workspace) -> None: +def test_azure_config(config: Config, workspace: FileWorkspace) -> None: config_file = workspace.get_path("azure_config.yaml") - yaml_content = f""" + yaml_content = """ azure_api_type: azure azure_api_base: https://dummy.openai.azure.com azure_api_version: 2023-06-01-preview @@ -162,7 +151,7 @@ azure_model_map: os.environ["USE_AZURE"] = "True" os.environ["AZURE_CONFIG_FILE"] = str(config_file) - config = ConfigBuilder.build_config_from_env(workspace.root.parent) + config = ConfigBuilder.build_config_from_env(project_root=workspace.root.parent) assert config.openai_api_type == "azure" assert config.openai_api_base == "https://dummy.openai.azure.com" @@ -209,21 +198,9 @@ azure_model_map: def test_create_config_gpt4only(config: Config) -> None: with mock.patch("autogpt.llm.api_manager.ApiManager.get_models") as mock_get_models: mock_get_models.return_value = [{"id": GPT_4_MODEL}] - create_config( + apply_overrides_to_config( config=config, - continuous=False, - continuous_limit=None, - ai_settings_file=None, - prompt_settings_file=None, - skip_reprompt=False, - speak=False, - debug=False, - gpt3only=False, gpt4only=True, - memory_type=None, - browser_name=None, - allow_downloads=False, - skip_news=False, ) assert config.fast_llm == GPT_4_MODEL assert config.smart_llm == GPT_4_MODEL @@ -232,21 +209,9 @@ def test_create_config_gpt4only(config: Config) -> None: def test_create_config_gpt3only(config: Config) -> None: with mock.patch("autogpt.llm.api_manager.ApiManager.get_models") as mock_get_models: mock_get_models.return_value = [{"id": GPT_3_MODEL}] - create_config( + apply_overrides_to_config( config=config, - continuous=False, - continuous_limit=None, - ai_settings_file=None, - prompt_settings_file=None, - skip_reprompt=False, - speak=False, - debug=False, gpt3only=True, - gpt4only=False, - memory_type=None, - browser_name=None, - allow_downloads=False, - skip_news=False, ) assert config.fast_llm == GPT_3_MODEL assert config.smart_llm == GPT_3_MODEL diff --git a/autogpts/autogpt/tests/unit/test_file_operations.py b/autogpts/autogpt/tests/unit/test_file_operations.py index 75b5c588..3636ce80 100644 --- a/autogpts/autogpt/tests/unit/test_file_operations.py +++ b/autogpts/autogpt/tests/unit/test_file_operations.py @@ -15,9 +15,9 @@ import autogpt.commands.file_operations as file_ops from autogpt.agents.agent import Agent from autogpt.agents.utils.exceptions import DuplicateOperationError from autogpt.config import Config +from autogpt.file_workspace import FileWorkspace from autogpt.memory.vector.memory_item import MemoryItem from autogpt.memory.vector.utils import Embedding -from autogpt.workspace import Workspace @pytest.fixture() @@ -50,7 +50,7 @@ def test_file_name(): @pytest.fixture -def test_file_path(test_file_name: Path, workspace: Workspace): +def test_file_path(test_file_name: Path, workspace: FileWorkspace): return workspace.get_path(test_file_name) @@ -73,12 +73,12 @@ def test_file_with_content_path(test_file: TextIOWrapper, file_content, agent: A @pytest.fixture() -def test_directory(workspace: Workspace): +def test_directory(workspace: FileWorkspace): return workspace.get_path("test_directory") @pytest.fixture() -def test_nested_file(workspace: Workspace): +def test_nested_file(workspace: FileWorkspace): return workspace.get_path("nested/test_file.txt") @@ -169,7 +169,7 @@ def test_is_duplicate_operation(agent: Agent, mocker: MockerFixture): # Test logging a file operation def test_log_operation(agent: Agent): file_ops.log_operation("log_test", "path/to/test", agent=agent) - with open(agent.legacy_config.file_logger_path, "r", encoding="utf-8") as f: + with open(agent.file_manager.file_ops_log_path, "r", encoding="utf-8") as f: content = f.read() assert f"log_test: path/to/test\n" in content @@ -183,7 +183,7 @@ def test_text_checksum(file_content: str): def test_log_operation_with_checksum(agent: Agent): file_ops.log_operation("log_test", "path/to/test", agent=agent, checksum="ABCDEF") - with open(agent.legacy_config.file_logger_path, "r", encoding="utf-8") as f: + with open(agent.file_manager.file_ops_log_path, "r", encoding="utf-8") as f: content = f.read() assert f"log_test: path/to/test #ABCDEF\n" in content @@ -224,7 +224,7 @@ def test_write_file_logs_checksum(test_file_name: Path, agent: Agent): new_content = "This is new content.\n" new_checksum = file_ops.text_checksum(new_content) file_ops.write_to_file(str(test_file_name), new_content, agent=agent) - with open(agent.legacy_config.file_logger_path, "r", encoding="utf-8") as f: + with open(agent.file_manager.file_ops_log_path, "r", encoding="utf-8") as f: log_entry = f.read() assert log_entry == f"write: {test_file_name} #{new_checksum}\n" @@ -264,9 +264,17 @@ def test_append_to_file_uses_checksum_from_appended_file( test_file_name: Path, agent: Agent ): append_text = "This is appended text.\n" - file_ops.append_to_file(test_file_name, append_text, agent=agent) - file_ops.append_to_file(test_file_name, append_text, agent=agent) - with open(agent.legacy_config.file_logger_path, "r", encoding="utf-8") as f: + file_ops.append_to_file( + agent.workspace.get_path(test_file_name), + append_text, + agent=agent, + ) + file_ops.append_to_file( + agent.workspace.get_path(test_file_name), + append_text, + agent=agent, + ) + with open(agent.file_manager.file_ops_log_path, "r", encoding="utf-8") as f: log_contents = f.read() digest = hashlib.md5() @@ -280,7 +288,7 @@ def test_append_to_file_uses_checksum_from_appended_file( ) -def test_list_files(workspace: Workspace, test_directory: Path, agent: Agent): +def test_list_files(workspace: FileWorkspace, test_directory: Path, agent: Agent): # Case 1: Create files A and B, search for A, and ensure we don't return A and B file_a = workspace.get_path("file_a.txt") file_b = workspace.get_path("file_b.txt") diff --git a/autogpts/autogpt/tests/unit/test_plugins.py b/autogpts/autogpt/tests/unit/test_plugins.py index 7dc79e27..981715ac 100644 --- a/autogpts/autogpt/tests/unit/test_plugins.py +++ b/autogpts/autogpt/tests/unit/test_plugins.py @@ -71,7 +71,7 @@ def test_create_base_config(config: Config): os.remove(config.plugins_config_file) plugins_config = PluginsConfig.load_config( - plugins_config_file=config.workdir / config.plugins_config_file, + plugins_config_file=config.plugins_config_file, plugins_denylist=config.plugins_denylist, plugins_allowlist=config.plugins_allowlist, ) @@ -107,7 +107,7 @@ def test_load_config(config: Config): # Load the config from disk plugins_config = PluginsConfig.load_config( - plugins_config_file=config.workdir / config.plugins_config_file, + plugins_config_file=config.plugins_config_file, plugins_denylist=config.plugins_denylist, plugins_allowlist=config.plugins_allowlist, ) diff --git a/autogpts/autogpt/tests/unit/test_web_search.py b/autogpts/autogpt/tests/unit/test_web_search.py index 7b57b9fa..a8ccb0ce 100644 --- a/autogpts/autogpt/tests/unit/test_web_search.py +++ b/autogpts/autogpt/tests/unit/test_web_search.py @@ -29,8 +29,8 @@ def test_safe_google_results_invalid_input(): ( "test", 1, - '[\n {\n "title": "Result 1",\n "link": "https://example.com/result1"\n }\n]', - [{"title": "Result 1", "link": "https://example.com/result1"}], + '[\n {\n "title": "Result 1",\n "url": "https://example.com/result1"\n }\n]', + [{"title": "Result 1", "href": "https://example.com/result1"}], ), ("", 1, "[]", []), ("no results", 1, "[]", []), diff --git a/autogpts/autogpt/tests/unit/test_workspace.py b/autogpts/autogpt/tests/unit/test_workspace.py index fbe14d8c..58cad459 100644 --- a/autogpts/autogpt/tests/unit/test_workspace.py +++ b/autogpts/autogpt/tests/unit/test_workspace.py @@ -3,7 +3,7 @@ from pathlib import Path import pytest -from autogpt.workspace import Workspace +from autogpt.file_workspace import FileWorkspace _WORKSPACE_ROOT = Path("home/users/monty/auto_gpt_workspace") @@ -40,7 +40,7 @@ _INACCESSIBLE_PATHS = ( "test_folder/{null_byte}", "test_folder/{null_byte}test_file.txt", ], - Workspace.NULL_BYTES, + FileWorkspace.NULL_BYTES, ) ] + [ @@ -68,7 +68,7 @@ def inaccessible_path(request): def test_sanitize_path_accessible(accessible_path, workspace_root): - full_path = Workspace._sanitize_path( + full_path = FileWorkspace._sanitize_path( accessible_path, root=workspace_root, restrict_to_root=True, @@ -79,7 +79,7 @@ def test_sanitize_path_accessible(accessible_path, workspace_root): def test_sanitize_path_inaccessible(inaccessible_path, workspace_root): with pytest.raises(ValueError): - Workspace._sanitize_path( + FileWorkspace._sanitize_path( inaccessible_path, root=workspace_root, restrict_to_root=True, @@ -87,13 +87,13 @@ def test_sanitize_path_inaccessible(inaccessible_path, workspace_root): def test_get_path_accessible(accessible_path, workspace_root): - workspace = Workspace(workspace_root, True) + workspace = FileWorkspace(workspace_root, True) full_path = workspace.get_path(accessible_path) assert full_path.is_absolute() assert full_path.is_relative_to(workspace_root) def test_get_path_inaccessible(inaccessible_path, workspace_root): - workspace = Workspace(workspace_root, True) + workspace = FileWorkspace(workspace_root, True) with pytest.raises(ValueError): workspace.get_path(inaccessible_path) diff --git a/docs/content/challenges/building_challenges.md b/docs/content/challenges/building_challenges.md index adff7f04..d6fd7bf4 100644 --- a/docs/content/challenges/building_challenges.md +++ b/docs/content/challenges/building_challenges.md @@ -42,21 +42,21 @@ def kubernetes_agent( command_registry.import_commands("autogpt.app") # Define all the settings of our challenged agent - ai_config = AIConfig( + ai_profile = AIProfile( ai_name="Kubernetes", ai_role="an autonomous agent that specializes in creating Kubernetes deployment templates.", ai_goals=[ "Write a simple kubernetes deployment file and save it as a kube.yaml.", ], ) - ai_config.command_registry = command_registry + ai_profile.command_registry = command_registry - system_prompt = ai_config.construct_full_prompt() + system_prompt = ai_profile.construct_full_prompt() agent_test_config.set_continuous_mode(False) agent = Agent( memory=memory_json_file, command_registry=command_registry, - config=ai_config, + config=ai_profile, next_action_count=0, triggering_prompt=DEFAULT_TRIGGERING_PROMPT, )