replace 50+ occurrences of print() with logger (#3056)

Co-authored-by: James Collins <collijk@uw.edu>
Co-authored-by: Luke Kyohere <lkyohere@mfsafrica.com>
Co-authored-by: k-boikov <64261260+k-boikov@users.noreply.github.com>
Co-authored-by: Media <12145726+rihp@users.noreply.github.com>
Co-authored-by: Nicholas Tindle <nick@ntindle.com>
This commit is contained in:
Richard Beales
2023-04-30 05:40:57 +01:00
committed by GitHub
parent 6997bb0bdd
commit 06ae4684c8
20 changed files with 134 additions and 139 deletions

View File

@@ -8,7 +8,7 @@ from autogpt.llm import chat_with_ai, create_chat_completion, create_chat_messag
from autogpt.logs import logger, print_assistant_thoughts
from autogpt.speech import say_text
from autogpt.spinner import Spinner
from autogpt.utils import clean_input, send_chat_message_to_user
from autogpt.utils import clean_input
from autogpt.workspace import Workspace
@@ -83,11 +83,7 @@ class Agent:
logger.typewriter_log(
"Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}"
)
send_chat_message_to_user(
f"Continuous Limit Reached: \n {cfg.continuous_limit}"
)
break
send_chat_message_to_user("Thinking... \n")
# Send message to AI, get response
with Spinner("Thinking... "):
assistant_reply = chat_with_ai(
@@ -117,7 +113,6 @@ class Agent:
if cfg.speak_mode:
say_text(f"I want to execute {command_name}")
send_chat_message_to_user("Thinking... \n")
arguments = self._resolve_pathlike_command_args(arguments)
except Exception as e:
@@ -128,24 +123,19 @@ class Agent:
# Get key press: Prompt the user to press enter to continue or escape
# to exit
self.user_input = ""
send_chat_message_to_user(
"NEXT ACTION: \n " + f"COMMAND = {command_name} \n "
f"ARGUMENTS = {arguments}"
)
logger.typewriter_log(
"NEXT ACTION: ",
Fore.CYAN,
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} "
f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
)
print(
logger.info(
"Enter 'y' to authorise command, 'y -N' to run N continuous commands, 's' to run self-feedback commands"
"'n' to exit program, or enter feedback for "
f"{self.ai_name}...",
flush=True,
f"{self.ai_name}..."
)
while True:
console_input = ""
if cfg.chat_messages_enabled:
console_input = clean_input("Waiting for your response...")
else:
@@ -176,7 +166,7 @@ class Agent:
user_input = self_feedback_resp
break
elif console_input.lower().strip() == "":
print("Invalid input format.")
logger.warn("Invalid input format.")
continue
elif console_input.lower().startswith(f"{cfg.authorise_key} -"):
try:
@@ -185,8 +175,8 @@ class Agent:
)
user_input = "GENERATE NEXT COMMAND JSON"
except ValueError:
print(
f"Invalid input format. Please enter '{cfg.authorise_key} -N' where N is"
logger.warn(
"Invalid input format. Please enter 'y -n' where n is"
" the number of continuous tasks."
)
continue
@@ -206,16 +196,10 @@ class Agent:
"",
)
elif user_input == "EXIT":
send_chat_message_to_user("Exiting...")
print("Exiting...", flush=True)
logger.info("Exiting...")
break
else:
# Print command
send_chat_message_to_user(
"NEXT ACTION: \n " + f"COMMAND = {command_name} \n "
f"ARGUMENTS = {arguments}"
)
logger.typewriter_log(
"NEXT ACTION: ",
Fore.CYAN,

View File

@@ -6,6 +6,7 @@ from autogpt.agent.agent_manager import AgentManager
from autogpt.commands.command import CommandRegistry, command
from autogpt.commands.web_requests import scrape_links, scrape_text
from autogpt.config import Config
from autogpt.logs import logger
from autogpt.memory import get_memory
from autogpt.processing.text import summarize_text
from autogpt.prompts.generator import PromptGenerator
@@ -172,7 +173,7 @@ def get_hyperlinks(url: str) -> Union[str, List[str]]:
def shutdown() -> NoReturn:
"""Shut down the program"""
print("Shutting down...")
logger.info("Shutting down...")
quit()

View File

@@ -8,6 +8,7 @@ from docker.errors import ImageNotFound
from autogpt.commands.command import command
from autogpt.config import Config
from autogpt.logs import logger
CFG = Config()
@@ -22,7 +23,7 @@ def execute_python_file(filename: str) -> str:
Returns:
str: The output of the file
"""
print(f"Executing file '{filename}'")
logger.info(f"Executing file '{filename}'")
if not filename.endswith(".py"):
return "Error: Invalid file type. Only .py files are allowed."
@@ -47,9 +48,11 @@ def execute_python_file(filename: str) -> str:
image_name = "python:3-alpine"
try:
client.images.get(image_name)
print(f"Image '{image_name}' found locally")
logger.warn(f"Image '{image_name}' found locally")
except ImageNotFound:
print(f"Image '{image_name}' not found locally, pulling from Docker Hub")
logger.info(
f"Image '{image_name}' not found locally, pulling from Docker Hub"
)
# Use the low-level API to stream the pull response
low_level_client = docker.APIClient()
for line in low_level_client.pull(image_name, stream=True, decode=True):
@@ -57,9 +60,9 @@ def execute_python_file(filename: str) -> str:
status = line.get("status")
progress = line.get("progress")
if status and progress:
print(f"{status}: {progress}")
logger.info(f"{status}: {progress}")
elif status:
print(status)
logger.info(status)
container = client.containers.run(
image_name,
f"python {Path(filename).relative_to(CFG.workspace_path)}",
@@ -85,7 +88,7 @@ def execute_python_file(filename: str) -> str:
return logs
except docker.errors.DockerException as e:
print(
logger.warn(
"Could not run the script in a container. If you haven't already, please install Docker https://docs.docker.com/get-docker/"
)
return f"Error: {str(e)}"
@@ -118,7 +121,9 @@ def execute_shell(command_line: str) -> str:
if not current_dir.is_relative_to(CFG.workspace_path):
os.chdir(CFG.workspace_path)
print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'")
logger.info(
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
)
result = subprocess.run(command_line, capture_output=True, shell=True)
output = f"STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}"
@@ -154,7 +159,9 @@ def execute_shell_popen(command_line) -> str:
if CFG.workspace_path not in current_dir:
os.chdir(CFG.workspace_path)
print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'")
logger.info(
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
)
do_not_show_output = subprocess.DEVNULL
process = subprocess.Popen(

View File

@@ -11,6 +11,7 @@ from requests.adapters import HTTPAdapter, Retry
from autogpt.commands.command import command
from autogpt.config import Config
from autogpt.logs import logger
from autogpt.spinner import Spinner
from autogpt.utils import readable_file_size
@@ -106,25 +107,25 @@ def ingest_file(
:param overlap: The number of overlapping characters between chunks, default is 200
"""
try:
print(f"Working with file {filename}")
logger.info(f"Working with file {filename}")
content = read_file(filename)
content_length = len(content)
print(f"File length: {content_length} characters")
logger.info(f"File length: {content_length} characters")
chunks = list(split_file(content, max_length=max_length, overlap=overlap))
num_chunks = len(chunks)
for i, chunk in enumerate(chunks):
print(f"Ingesting chunk {i + 1} / {num_chunks} into memory")
logger.info(f"Ingesting chunk {i + 1} / {num_chunks} into memory")
memory_to_add = (
f"Filename: {filename}\n" f"Content part#{i + 1}/{num_chunks}: {chunk}"
)
memory.add(memory_to_add)
print(f"Done ingesting {num_chunks} chunks from {filename}.")
logger.info(f"Done ingesting {num_chunks} chunks from {filename}.")
except Exception as e:
print(f"Error while ingesting file '{filename}': {str(e)}")
logger.info(f"Error while ingesting file '{filename}': {str(e)}")
@command("write_to_file", "Write to file", '"filename": "<filename>", "text": "<text>"')

View File

@@ -9,6 +9,7 @@ from PIL import Image
from autogpt.commands.command import command
from autogpt.config import Config
from autogpt.logs import logger
CFG = Config()
@@ -69,7 +70,7 @@ def generate_image_with_hf(prompt: str, filename: str) -> str:
)
image = Image.open(io.BytesIO(response.content))
print(f"Image Generated for prompt:{prompt}")
logger.info(f"Image Generated for prompt:{prompt}")
image.save(filename)
@@ -91,7 +92,7 @@ def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str:
# Check for supported image sizes
if size not in [256, 512, 1024]:
closest = min([256, 512, 1024], key=lambda x: abs(x - size))
print(
logger.info(
f"DALL-E only supports image sizes of 256x256, 512x512, or 1024x1024. Setting to {closest}, was {size}."
)
size = closest
@@ -104,7 +105,7 @@ def generate_image_with_dalle(prompt: str, filename: str, size: int) -> str:
api_key=CFG.openai_api_key,
)
print(f"Image Generated for prompt:{prompt}")
logger.info(f"Image Generated for prompt:{prompt}")
image_data = b64decode(response["data"][0]["b64_json"])
@@ -153,7 +154,7 @@ def generate_image_with_sd_webui(
},
)
print(f"Image Generated for prompt:{prompt}")
logger.info(f"Image Generated for prompt:{prompt}")
# Save the image to disk
response = response.json()

View File

@@ -1,10 +1,12 @@
"""Web scraping commands using Playwright"""
from __future__ import annotations
from autogpt.logs import logger
try:
from playwright.sync_api import sync_playwright
except ImportError:
print(
logger.info(
"Playwright not installed. Please install it with 'pip install playwright' to use."
)
from bs4 import BeautifulSoup

View File

@@ -9,6 +9,7 @@ from typing import Optional
from autogpt.config import Config
from autogpt.json_utils.utilities import extract_char_position
from autogpt.logs import logger
CFG = Config()
@@ -33,8 +34,7 @@ def fix_invalid_escape(json_to_load: str, error_message: str) -> str:
json.loads(json_to_load)
return json_to_load
except json.JSONDecodeError as e:
if CFG.debug_mode:
print("json loads error - fix invalid escape", e)
logger.debug("json loads error - fix invalid escape", e)
error_message = str(e)
return json_to_load
@@ -98,13 +98,11 @@ def correct_json(json_to_load: str) -> str:
"""
try:
if CFG.debug_mode:
print("json", json_to_load)
logger.debug("json", json_to_load)
json.loads(json_to_load)
return json_to_load
except json.JSONDecodeError as e:
if CFG.debug_mode:
print("json loads error", e)
logger.debug("json loads error", e)
error_message = str(e)
if error_message.startswith("Invalid \\escape"):
json_to_load = fix_invalid_escape(json_to_load, error_message)
@@ -116,8 +114,7 @@ def correct_json(json_to_load: str) -> str:
json.loads(json_to_load)
return json_to_load
except json.JSONDecodeError as e:
if CFG.debug_mode:
print("json loads error - add quotes", e)
logger.debug("json loads error - add quotes", e)
error_message = str(e)
if balanced_str := balance_braces(json_to_load):
return balanced_str

View File

@@ -49,9 +49,8 @@ def validate_json(json_object: object, schema_name: str) -> dict | None:
for error in errors:
logger.error(f"Error: {error.message}")
return None
if CFG.debug_mode:
print("The JSON object is valid.")
else:
logger.debug("The JSON object is valid.")
return json_object

View File

@@ -185,9 +185,8 @@ def chat_with_ai(
[create_chat_message("system", plugin_response)], model
)
if current_tokens_used + tokens_to_add > send_token_limit:
if cfg.debug_mode:
print("Plugin response too long, skipping:", plugin_response)
print("Plugins remaining at stop:", plugin_count - i)
logger.debug("Plugin response too long, skipping:", plugin_response)
logger.debug("Plugins remaining at stop:", plugin_count - i)
break
current_context.append(create_chat_message("system", plugin_response))
@@ -227,5 +226,5 @@ def chat_with_ai(
return assistant_reply
except RateLimitError:
# TODO: When we switch to langchain, this is built in
print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
logger.warn("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
time.sleep(10)

View File

@@ -128,10 +128,9 @@ def create_chat_completion(
num_retries = 10
warned_user = False
if cfg.debug_mode:
print(
f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}"
)
logger.debug(
f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}"
)
for plugin in cfg.plugins:
if plugin.can_handle_chat_completion(
messages=messages,
@@ -169,10 +168,9 @@ def create_chat_completion(
)
break
except RateLimitError:
if cfg.debug_mode:
print(
f"{Fore.RED}Error: ", f"Reached rate limit, passing...{Fore.RESET}"
)
logger.debug(
f"{Fore.RED}Error: ", f"Reached rate limit, passing...{Fore.RESET}"
)
if not warned_user:
logger.double_check(
f"Please double check that you have setup a {Fore.CYAN + Style.BRIGHT}PAID{Style.RESET_ALL} OpenAI API Account. "
@@ -184,11 +182,10 @@ def create_chat_completion(
raise
if attempt == num_retries - 1:
raise
if cfg.debug_mode:
print(
f"{Fore.RED}Error: ",
f"API Bad gateway. Waiting {backoff} seconds...{Fore.RESET}",
)
logger.debug(
f"{Fore.RED}Error: ",
f"API Bad gateway. Waiting {backoff} seconds...{Fore.RESET}",
)
time.sleep(backoff)
if response is None:
logger.typewriter_log(

View File

@@ -10,7 +10,6 @@ from colorama import Fore, Style
from autogpt.singleton import Singleton
from autogpt.speech import say_text
from autogpt.utils import send_chat_message_to_user
class Logger(metaclass=Singleton):
@@ -83,8 +82,6 @@ class Logger(metaclass=Singleton):
if speak_text and self.speak_mode:
say_text(f"{title}. {content}")
send_chat_message_to_user(f"{title}. {content}")
if content:
if isinstance(content, list):
content = " ".join(content)
@@ -103,6 +100,14 @@ class Logger(metaclass=Singleton):
):
self._log(title, title_color, message, logging.DEBUG)
def info(
self,
message,
title="",
title_color="",
):
self._log(title, title_color, message, logging.INFO)
def warn(
self,
message,
@@ -114,11 +119,19 @@ class Logger(metaclass=Singleton):
def error(self, title, message=""):
self._log(title, Fore.RED, message, logging.ERROR)
def _log(self, title="", title_color="", message="", level=logging.INFO):
def _log(
self,
title: str = "",
title_color: str = "",
message: str = "",
level=logging.INFO,
):
if message:
if isinstance(message, list):
message = " ".join(message)
self.logger.log(level, message, extra={"title": title, "color": title_color})
self.logger.log(
level, message, extra={"title": str(title), "color": str(title_color)}
)
def set_level(self, level):
self.logger.setLevel(level)

View File

@@ -1,3 +1,4 @@
from autogpt.logs import logger
from autogpt.memory.local import LocalCache
from autogpt.memory.no_memory import NoMemory
@@ -10,7 +11,6 @@ try:
supported_memory.append("redis")
except ImportError:
# print("Redis not installed. Skipping import.")
RedisMemory = None
try:
@@ -18,7 +18,6 @@ try:
supported_memory.append("pinecone")
except ImportError:
# print("Pinecone not installed. Skipping import.")
PineconeMemory = None
try:
@@ -26,7 +25,6 @@ try:
supported_memory.append("weaviate")
except ImportError:
# print("Weaviate not installed. Skipping import.")
WeaviateMemory = None
try:
@@ -34,7 +32,6 @@ try:
supported_memory.append("milvus")
except ImportError:
# print("pymilvus not installed. Skipping import.")
MilvusMemory = None
@@ -42,7 +39,7 @@ def get_memory(cfg, init=False):
memory = None
if cfg.memory_backend == "pinecone":
if not PineconeMemory:
print(
logger.warn(
"Error: Pinecone is not installed. Please install pinecone"
" to use Pinecone as a memory backend."
)
@@ -52,7 +49,7 @@ def get_memory(cfg, init=False):
memory.clear()
elif cfg.memory_backend == "redis":
if not RedisMemory:
print(
logger.warn(
"Error: Redis is not installed. Please install redis-py to"
" use Redis as a memory backend."
)
@@ -60,7 +57,7 @@ def get_memory(cfg, init=False):
memory = RedisMemory(cfg)
elif cfg.memory_backend == "weaviate":
if not WeaviateMemory:
print(
logger.warn(
"Error: Weaviate is not installed. Please install weaviate-client to"
" use Weaviate as a memory backend."
)
@@ -68,7 +65,7 @@ def get_memory(cfg, init=False):
memory = WeaviateMemory(cfg)
elif cfg.memory_backend == "milvus":
if not MilvusMemory:
print(
logger.warn(
"Error: pymilvus sdk is not installed."
"Please install pymilvus to use Milvus or Zilliz Cloud as memory backend."
)

View File

@@ -73,7 +73,7 @@ class RedisMemory(MemoryProviderSingleton):
),
)
except Exception as e:
print("Error creating Redis search index: ", e)
logger.warn("Error creating Redis search index: ", e)
existing_vec_num = self.redis.get(f"{cfg.memory_index}-vec_num")
self.vec_num = int(existing_vec_num.decode("utf-8")) if existing_vec_num else 0
@@ -145,7 +145,7 @@ class RedisMemory(MemoryProviderSingleton):
query, query_params={"vector": query_vector}
)
except Exception as e:
print("Error calling Redis search: ", e)
logger.warn("Error calling Redis search: ", e)
return None
return [result.data for result in results.docs]

View File

@@ -4,6 +4,7 @@ from weaviate.embedded import EmbeddedOptions
from weaviate.util import generate_uuid5
from autogpt.llm import get_ada_embedding
from autogpt.logs import logger
from autogpt.memory.base import MemoryProviderSingleton
@@ -35,7 +36,7 @@ class WeaviateMemory(MemoryProviderSingleton):
)
)
print(
logger.info(
f"Weaviate Embedded running on: {url} with persistence path: {cfg.weaviate_embedded_path}"
)
else:
@@ -116,7 +117,7 @@ class WeaviateMemory(MemoryProviderSingleton):
return []
except Exception as err:
print(f"Unexpected error {err=}, {type(err)=}")
logger.warn(f"Unexpected error {err=}, {type(err)=}")
return []
def get_stats(self):

View File

@@ -15,6 +15,7 @@ from auto_gpt_plugin_template import AutoGPTPluginTemplate
from openapi_python_client.cli import Config as OpenAPIConfig
from autogpt.config import Config
from autogpt.logs import logger
from autogpt.models.base_open_ai_plugin import BaseOpenAIPlugin
@@ -33,11 +34,10 @@ def inspect_zip_for_modules(zip_path: str, debug: bool = False) -> list[str]:
with zipfile.ZipFile(zip_path, "r") as zfile:
for name in zfile.namelist():
if name.endswith("__init__.py"):
if debug:
print(f"Found module '{name}' in the zipfile at: {name}")
logger.debug(f"Found module '{name}' in the zipfile at: {name}")
result.append(name)
if debug and len(result) == 0:
print(f"Module '__init__.py' not found in the zipfile @ {zip_path}.")
if len(result) == 0:
logger.debug(f"Module '__init__.py' not found in the zipfile @ {zip_path}.")
return result
@@ -71,12 +71,12 @@ def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict:
if response.status_code == 200:
manifest = response.json()
if manifest["schema_version"] != "v1":
print(
logger.warn(
f"Unsupported manifest version: {manifest['schem_version']} for {url}"
)
continue
if manifest["api"]["type"] != "openapi":
print(
logger.warn(
f"Unsupported API type: {manifest['api']['type']} for {url}"
)
continue
@@ -84,11 +84,13 @@ def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict:
manifest, f"{openai_plugin_client_dir}/ai-plugin.json"
)
else:
print(f"Failed to fetch manifest for {url}: {response.status_code}")
logger.warn(
f"Failed to fetch manifest for {url}: {response.status_code}"
)
except requests.exceptions.RequestException as e:
print(f"Error while requesting manifest from {url}: {e}")
logger.warn(f"Error while requesting manifest from {url}: {e}")
else:
print(f"Manifest for {url} already exists")
logger.info(f"Manifest for {url} already exists")
manifest = json.load(open(f"{openai_plugin_client_dir}/ai-plugin.json"))
if not os.path.exists(f"{openai_plugin_client_dir}/openapi.json"):
openapi_spec = openapi_python_client._get_document(
@@ -98,7 +100,7 @@ def fetch_openai_plugins_manifest_and_spec(cfg: Config) -> dict:
openapi_spec, f"{openai_plugin_client_dir}/openapi.json"
)
else:
print(f"OpenAPI spec for {url} already exists")
logger.info(f"OpenAPI spec for {url} already exists")
openapi_spec = json.load(open(f"{openai_plugin_client_dir}/openapi.json"))
manifests[url] = {"manifest": manifest, "openapi_spec": openapi_spec}
return manifests
@@ -115,13 +117,13 @@ def create_directory_if_not_exists(directory_path: str) -> bool:
if not os.path.exists(directory_path):
try:
os.makedirs(directory_path)
print(f"Created directory: {directory_path}")
logger.debug(f"Created directory: {directory_path}")
return True
except OSError as e:
print(f"Error creating directory {directory_path}: {e}")
logger.warn(f"Error creating directory {directory_path}: {e}")
return False
else:
print(f"Directory {directory_path} already exists")
logger.info(f"Directory {directory_path} already exists")
return True
@@ -159,7 +161,7 @@ def initialize_openai_plugins(
config=_config,
)
if client_results:
print(
logger.warn(
f"Error creating OpenAPI client: {client_results[0].header} \n"
f" details: {client_results[0].detail}"
)
@@ -212,8 +214,7 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate
for module in moduleList:
plugin = Path(plugin)
module = Path(module)
if debug:
print(f"Plugin: {plugin} Module: {module}")
logger.debug(f"Plugin: {plugin} Module: {module}")
zipped_package = zipimporter(str(plugin))
zipped_module = zipped_package.load_module(str(module.parent))
for key in dir(zipped_module):
@@ -240,9 +241,9 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate
loaded_plugins.append(plugin)
if loaded_plugins:
print(f"\nPlugins found: {len(loaded_plugins)}\n" "--------------------")
logger.info(f"\nPlugins found: {len(loaded_plugins)}\n" "--------------------")
for plugin in loaded_plugins:
print(f"{plugin._name}: {plugin._version} - {plugin._description}")
logger.info(f"{plugin._name}: {plugin._version} - {plugin._description}")
return loaded_plugins

View File

@@ -6,6 +6,7 @@ from selenium.webdriver.remote.webdriver import WebDriver
from autogpt.config import Config
from autogpt.llm import count_message_tokens, create_chat_completion
from autogpt.logs import logger
from autogpt.memory import get_memory
CFG = Config()
@@ -86,7 +87,7 @@ def summarize_text(
model = CFG.fast_llm_model
text_length = len(text)
print(f"Text length: {text_length} characters")
logger.info(f"Text length: {text_length} characters")
summaries = []
chunks = list(
@@ -99,7 +100,7 @@ def summarize_text(
for i, chunk in enumerate(chunks):
if driver:
scroll_to_percentage(driver, scroll_ratio * i)
print(f"Adding chunk {i + 1} / {len(chunks)} to memory")
logger.info(f"Adding chunk {i + 1} / {len(chunks)} to memory")
memory_to_add = f"Source: {url}\n" f"Raw content part#{i + 1}: {chunk}"
@@ -108,7 +109,7 @@ def summarize_text(
messages = [create_message(chunk, question)]
tokens_for_chunk = count_message_tokens(messages, model)
print(
logger.info(
f"Summarizing chunk {i + 1} / {len(chunks)} of length {len(chunk)} characters, or {tokens_for_chunk} tokens"
)
@@ -117,7 +118,7 @@ def summarize_text(
messages=messages,
)
summaries.append(summary)
print(
logger.info(
f"Added chunk {i + 1} summary to memory, of length {len(summary)} characters"
)
@@ -125,7 +126,7 @@ def summarize_text(
memory.add(memory_to_add)
print(f"Summarized {len(chunks)} chunks.")
logger.info(f"Summarized {len(chunks)} chunks.")
combined_summary = "\n".join(summaries)
messages = [create_message(combined_summary, question)]

View File

@@ -119,7 +119,7 @@ def generate_aiconfig_manual() -> AIConfig:
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage"
" multiple businesses autonomously'",
)
print("Enter nothing to load defaults, enter nothing when finished.", flush=True)
logger.info("Enter nothing to load defaults, enter nothing when finished.")
ai_goals = []
for i in range(5):
ai_goal = utils.clean_input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ")
@@ -139,7 +139,7 @@ def generate_aiconfig_manual() -> AIConfig:
Fore.GREEN,
"For example: $1.50",
)
print("Enter nothing to let the AI run without monetary limit", flush=True)
logger.info("Enter nothing to let the AI run without monetary limit")
api_budget_input = utils.clean_input(
f"{Fore.LIGHTBLUE_EX}Budget{Style.RESET_ALL}: $"
)

View File

@@ -69,6 +69,8 @@ class ElevenLabsSpeech(VoiceBase):
Returns:
bool: True if the request was successful, False otherwise
"""
from autogpt.logs import logger
tts_url = (
f"https://api.elevenlabs.io/v1/text-to-speech/{self._voices[voice_index]}"
)
@@ -81,6 +83,6 @@ class ElevenLabsSpeech(VoiceBase):
os.remove("speech.mpeg")
return True
else:
print("Request failed with status code:", response.status_code)
print("Response content:", response.content)
logger.warn("Request failed with status code:", response.status_code)
logger.info("Response content:", response.content)
return False

View File

@@ -5,27 +5,17 @@ import yaml
from colorama import Fore
from git.repo import Repo
from autogpt.logs import logger
# Use readline if available (for clean_input)
try:
import readline
except:
except ImportError:
pass
from autogpt.config import Config
def send_chat_message_to_user(report: str):
cfg = Config()
if not cfg.chat_messages_enabled:
return
for plugin in cfg.plugins:
if not hasattr(plugin, "can_handle_report"):
continue
if not plugin.can_handle_report():
continue
plugin.report(report)
def clean_input(prompt: str = "", talk=False):
try:
cfg = Config()
@@ -58,12 +48,12 @@ def clean_input(prompt: str = "", talk=False):
return plugin_response
# ask for input, default when just pressing Enter is y
print("Asking user via keyboard...")
logger.info("Asking user via keyboard...")
answer = input(prompt)
return answer
except KeyboardInterrupt:
print("You interrupted Auto-GPT")
print("Quitting...")
logger.info("You interrupted Auto-GPT")
logger.info("Quitting...")
exit(0)

View File

@@ -10,11 +10,14 @@ cfg = Config()
def configure_logging():
logging.basicConfig(
filename="log-ingestion.txt",
filemode="a",
format="%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s",
datefmt="%H:%M:%S",
level=logging.DEBUG,
handlers=[
logging.FileHandler(filename="log-ingestion.txt"),
logging.StreamHandler(),
],
)
return logging.getLogger("AutoGPT-Ingestion")
@@ -26,12 +29,13 @@ def ingest_directory(directory, memory, args):
:param directory: The directory containing the files to ingest
:param memory: An object with an add() method to store the chunks in memory
"""
global logger
try:
files = search_files(directory)
for file in files:
ingest_file(file, memory, args.max_length, args.overlap)
except Exception as e:
print(f"Error while ingesting directory '{directory}': {str(e)}")
logger.error(f"Error while ingesting directory '{directory}': {str(e)}")
def main() -> None:
@@ -69,24 +73,22 @@ def main() -> None:
# Initialize memory
memory = get_memory(cfg, init=args.init)
print("Using memory of type: " + memory.__class__.__name__)
logger.debug("Using memory of type: " + memory.__class__.__name__)
if args.file:
try:
ingest_file(args.file, memory, args.max_length, args.overlap)
print(f"File '{args.file}' ingested successfully.")
logger.info(f"File '{args.file}' ingested successfully.")
except Exception as e:
logger.error(f"Error while ingesting file '{args.file}': {str(e)}")
print(f"Error while ingesting file '{args.file}': {str(e)}")
elif args.dir:
try:
ingest_directory(args.dir, memory, args)
print(f"Directory '{args.dir}' ingested successfully.")
logger.info(f"Directory '{args.dir}' ingested successfully.")
except Exception as e:
logger.error(f"Error while ingesting directory '{args.dir}': {str(e)}")
print(f"Error while ingesting directory '{args.dir}': {str(e)}")
else:
print(
logger.warn(
"Please provide either a file path (--file) or a directory name (--dir)"
" inside the auto_gpt_workspace directory as input."
)