Organize all the llm stuff into a subpackage (#3436)

This commit is contained in:
James Collins
2023-04-28 12:00:54 -07:00
committed by GitHub
parent ee4043ae19
commit 3b74d2150e
31 changed files with 59 additions and 51 deletions

View File

@@ -1,11 +1,10 @@
from colorama import Fore, Style
from autogpt.app import execute_command, get_command
from autogpt.chat import chat_with_ai, create_chat_message
from autogpt.config import Config
from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques
from autogpt.json_utils.utilities import validate_json
from autogpt.llm_utils import create_chat_completion
from autogpt.llm import chat_with_ai, create_chat_completion, create_chat_message
from autogpt.logs import logger, print_assistant_thoughts
from autogpt.speech import say_text
from autogpt.spinner import Spinner

View File

@@ -4,7 +4,7 @@ from __future__ import annotations
from typing import List
from autogpt.config.config import Config
from autogpt.llm_utils import create_chat_completion
from autogpt.llm import create_chat_completion
from autogpt.singleton import Singleton
from autogpt.types.openai import Message

View File

@@ -2,7 +2,7 @@
from __future__ import annotations
from autogpt.commands.command import command
from autogpt.llm_utils import call_ai_function
from autogpt.llm import call_ai_function
@command(

View File

@@ -3,7 +3,7 @@ from __future__ import annotations
import json
from autogpt.commands.command import command
from autogpt.llm_utils import call_ai_function
from autogpt.llm import call_ai_function
@command(

View File

@@ -4,7 +4,7 @@ from __future__ import annotations
import json
from autogpt.commands.command import command
from autogpt.llm_utils import call_ai_function
from autogpt.llm import call_ai_function
@command(

View File

@@ -11,7 +11,7 @@ from regex import regex
from autogpt.config import Config
from autogpt.json_utils.json_fix_general import correct_json
from autogpt.llm_utils import call_ai_function
from autogpt.llm import call_ai_function
from autogpt.logs import logger
from autogpt.speech import say_text

22
autogpt/llm/__init__.py Normal file
View File

@@ -0,0 +1,22 @@
from autogpt.llm.api_manager import ApiManager
from autogpt.llm.chat import chat_with_ai, create_chat_message, generate_context
from autogpt.llm.llm_utils import (
call_ai_function,
create_chat_completion,
get_ada_embedding,
)
from autogpt.llm.modelsinfo import COSTS
from autogpt.llm.token_counter import count_message_tokens, count_string_tokens
__all__ = [
"ApiManager",
"create_chat_message",
"generate_context",
"chat_with_ai",
"call_ai_function",
"create_chat_completion",
"get_ada_embedding",
"COSTS",
"count_message_tokens",
"count_string_tokens",
]

View File

@@ -3,8 +3,8 @@ from __future__ import annotations
import openai
from autogpt.config import Config
from autogpt.llm.modelsinfo import COSTS
from autogpt.logs import logger
from autogpt.modelsinfo import COSTS
from autogpt.singleton import Singleton

View File

@@ -3,10 +3,10 @@ from random import shuffle
from openai.error import RateLimitError
from autogpt import token_counter
from autogpt.api_manager import ApiManager
from autogpt.config import Config
from autogpt.llm_utils import create_chat_completion
from autogpt.llm.api_manager import ApiManager
from autogpt.llm.llm_utils import create_chat_completion
from autogpt.llm.token_counter import count_message_tokens
from autogpt.logs import logger
from autogpt.types.openai import Message
@@ -43,7 +43,7 @@ def generate_context(prompt, relevant_memory, full_message_history, model):
next_message_to_add_index = len(full_message_history) - 1
insertion_index = len(current_context)
# Count the currently used tokens
current_tokens_used = token_counter.count_message_tokens(current_context, model)
current_tokens_used = count_message_tokens(current_context, model)
return (
next_message_to_add_index,
current_tokens_used,
@@ -114,7 +114,7 @@ def chat_with_ai(
prompt, relevant_memory, full_message_history, model
)
current_tokens_used += token_counter.count_message_tokens(
current_tokens_used += count_message_tokens(
[create_chat_message("user", user_input)], model
) # Account for user input (appended later)
@@ -122,9 +122,7 @@ def chat_with_ai(
# print (f"CURRENT TOKENS USED: {current_tokens_used}")
message_to_add = full_message_history[next_message_to_add_index]
tokens_to_add = token_counter.count_message_tokens(
[message_to_add], model
)
tokens_to_add = count_message_tokens([message_to_add], model)
if current_tokens_used + tokens_to_add > send_token_limit:
break
@@ -175,7 +173,7 @@ def chat_with_ai(
)
if not plugin_response or plugin_response == "":
continue
tokens_to_add = token_counter.count_message_tokens(
tokens_to_add = count_message_tokens(
[create_chat_message("system", plugin_response)], model
)
if current_tokens_used + tokens_to_add > send_token_limit:

View File

@@ -8,8 +8,8 @@ import openai
from colorama import Fore, Style
from openai.error import APIError, RateLimitError, Timeout
from autogpt.api_manager import ApiManager
from autogpt.config import Config
from autogpt.llm.api_manager import ApiManager
from autogpt.logs import logger
from autogpt.types.openai import Message

View File

@@ -1,11 +1,9 @@
"""Logging module for Auto-GPT."""
import json
import logging
import os
import random
import re
import time
import traceback
from logging import LogRecord
from colorama import Fore, Style

View File

@@ -7,7 +7,7 @@ from typing import Any, List
import numpy as np
import orjson
from autogpt.llm_utils import get_ada_embedding
from autogpt.llm import get_ada_embedding
from autogpt.memory.base import MemoryProviderSingleton
EMBED_DIM = 1536

View File

@@ -4,7 +4,7 @@ import re
from pymilvus import Collection, CollectionSchema, DataType, FieldSchema, connections
from autogpt.config import Config
from autogpt.llm_utils import get_ada_embedding
from autogpt.llm import get_ada_embedding
from autogpt.memory.base import MemoryProviderSingleton

View File

@@ -1,7 +1,7 @@
import pinecone
from colorama import Fore, Style
from autogpt.llm_utils import get_ada_embedding
from autogpt.llm import get_ada_embedding
from autogpt.logs import logger
from autogpt.memory.base import MemoryProviderSingleton

View File

@@ -10,7 +10,7 @@ from redis.commands.search.field import TextField, VectorField
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
from redis.commands.search.query import Query
from autogpt.llm_utils import get_ada_embedding
from autogpt.llm import get_ada_embedding
from autogpt.logs import logger
from autogpt.memory.base import MemoryProviderSingleton

View File

@@ -3,7 +3,7 @@ from weaviate import Client
from weaviate.embedded import EmbeddedOptions
from weaviate.util import generate_uuid5
from autogpt.llm_utils import get_ada_embedding
from autogpt.llm import get_ada_embedding
from autogpt.memory.base import MemoryProviderSingleton

View File

@@ -4,9 +4,8 @@ from typing import Dict, Generator, Optional
import spacy
from selenium.webdriver.remote.webdriver import WebDriver
from autogpt import token_counter
from autogpt.config import Config
from autogpt.llm_utils import create_chat_completion
from autogpt.llm import count_message_tokens, create_chat_completion
from autogpt.memory import get_memory
CFG = Config()
@@ -44,7 +43,7 @@ def split_text(
]
expected_token_usage = (
token_usage_of_chunk(messages=message_with_additional_sentence, model=model)
count_message_tokens(messages=message_with_additional_sentence, model=model)
+ 1
)
if expected_token_usage <= max_length:
@@ -56,7 +55,7 @@ def split_text(
create_message(" ".join(current_chunk), question)
]
expected_token_usage = (
token_usage_of_chunk(messages=message_this_sentence_only, model=model)
count_message_tokens(messages=message_this_sentence_only, model=model)
+ 1
)
if expected_token_usage > max_length:
@@ -68,10 +67,6 @@ def split_text(
yield " ".join(current_chunk)
def token_usage_of_chunk(messages, model):
return token_counter.count_message_tokens(messages, model)
def summarize_text(
url: str, text: str, question: str, driver: Optional[WebDriver] = None
) -> str:
@@ -112,7 +107,7 @@ def summarize_text(
memory.add(memory_to_add)
messages = [create_message(chunk, question)]
tokens_for_chunk = token_counter.count_message_tokens(messages, model)
tokens_for_chunk = count_message_tokens(messages, model)
print(
f"Summarizing chunk {i + 1} / {len(chunks)} of length {len(chunk)} characters, or {tokens_for_chunk} tokens"
)

View File

@@ -1,8 +1,8 @@
from colorama import Fore
from autogpt.api_manager import ApiManager
from autogpt.config.ai_config import AIConfig
from autogpt.config.config import Config
from autogpt.llm import ApiManager
from autogpt.logs import logger
from autogpt.prompts.generator import PromptGenerator
from autogpt.setup import prompt_user

View File

@@ -6,7 +6,7 @@ from colorama import Fore, Style
from autogpt import utils
from autogpt.config import Config
from autogpt.config.ai_config import AIConfig
from autogpt.llm_utils import create_chat_completion
from autogpt.llm import create_chat_completion
from autogpt.logs import logger
CFG = Config()

View File

@@ -3,8 +3,8 @@ from pathlib import Path
import pytest
from pytest_mock import MockerFixture
from autogpt.api_manager import ApiManager
from autogpt.config import Config
from autogpt.llm import ApiManager
from autogpt.workspace import Workspace
pytest_plugins = ["tests.integration.agent_factory"]

View File

@@ -3,7 +3,7 @@ import string
import pytest
from numpy.random import RandomState
from autogpt.llm_utils import get_ada_embedding
from autogpt.llm.llm_utils import get_ada_embedding
from tests.utils import requires_api_key

View File

@@ -5,7 +5,7 @@ from weaviate import Client
from weaviate.util import get_valid_uuid
from autogpt.config import Config
from autogpt.llm_utils import get_ada_embedding
from autogpt.llm import get_ada_embedding
from autogpt.memory.weaviate import WeaviateMemory

View File

@@ -3,10 +3,7 @@ from unittest.mock import MagicMock
import pytest
from autogpt.agent import Agent
from autogpt.chat import chat_with_ai
from autogpt.config import Config
from autogpt.speech import say_text
from autogpt.utils import clean_input
@pytest.fixture

View File

@@ -1,7 +1,7 @@
import pytest
from autogpt.agent.agent_manager import AgentManager
from autogpt.llm_utils import create_chat_completion
from autogpt.llm import create_chat_completion
@pytest.fixture

View File

@@ -2,7 +2,7 @@ from unittest.mock import MagicMock, patch
import pytest
from autogpt.api_manager import COSTS, ApiManager
from autogpt.llm import COSTS, ApiManager
api_manager = ApiManager()

View File

@@ -1,8 +1,8 @@
import pytest
from openai.error import APIError, RateLimitError
from autogpt.llm_utils import get_ada_embedding, retry_openai_api
from autogpt.modelsinfo import COSTS
from autogpt.llm import COSTS, get_ada_embedding
from autogpt.llm.llm_utils import retry_openai_api
@pytest.fixture(params=[RateLimitError, APIError])
@@ -19,7 +19,7 @@ def mock_create_embedding(mocker):
mock_response.usage.prompt_tokens = 5
mock_response.__getitem__.side_effect = lambda key: [{"embedding": [0.1, 0.2, 0.3]}]
return mocker.patch(
"autogpt.llm_utils.create_embedding", return_value=mock_response
"autogpt.llm.llm_utils.create_embedding", return_value=mock_response
)

View File

@@ -1,7 +1,6 @@
import pytest
import tests.context
from autogpt.token_counter import count_message_tokens, count_string_tokens
from autogpt.llm import count_message_tokens, count_string_tokens
def test_count_message_tokens():

View File

@@ -2,7 +2,7 @@
import time
from unittest.mock import patch
from autogpt.chat import create_chat_message, generate_context
from autogpt.llm import create_chat_message, generate_context
def test_happy_path_role_content():

View File

@@ -1,6 +1,6 @@
from autogpt.agent.agent import Agent
from autogpt.config import AIConfig
from autogpt.llm_utils import create_chat_completion
from autogpt.llm import create_chat_completion
def test_get_self_feedback(mocker):