mirror of
https://github.com/aljazceru/Auto-GPT.git
synced 2025-12-18 06:24:20 +01:00
Merge branch 'Torantulino:master' into kinance-resolve-debug-config-conflict
This commit is contained in:
@@ -127,6 +127,8 @@ To use the `google_official_search` command, you need to set up your Google API
|
||||
8. Set up your search engine by following the prompts. You can choose to search the entire web or specific sites.
|
||||
9. Once you've created your search engine, click on "Control Panel" and then "Basics". Copy the "Search engine ID" and set it as an environment variable named `CUSTOM_SEARCH_ENGINE_ID` on your machine. See setting up environment variables below.
|
||||
|
||||
*Remember that your free daily custom search quota allows only up to 100 searches. To increase this limit, you need to assign a billing account to the project to profit from up to 10K daily searches.*
|
||||
|
||||
### Setting up environment variables
|
||||
For Windows Users:
|
||||
```
|
||||
|
||||
@@ -3,9 +3,27 @@ import data
|
||||
import os
|
||||
|
||||
class AIConfig:
|
||||
"""Class to store the AI's name, role, and goals."""
|
||||
def __init__(self, ai_name="", ai_role="", ai_goals=[]):
|
||||
"""Initialize the AIConfig class"""
|
||||
"""
|
||||
A class object that contains the configuration information for the AI
|
||||
|
||||
Attributes:
|
||||
ai_name (str): The name of the AI.
|
||||
ai_role (str): The description of the AI's role.
|
||||
ai_goals (list): The list of objectives the AI is supposed to complete.
|
||||
"""
|
||||
|
||||
def __init__(self, ai_name: str="", ai_role: str="", ai_goals: list=[]) -> None:
|
||||
"""
|
||||
Initialize a class instance
|
||||
|
||||
Parameters:
|
||||
ai_name (str): The name of the AI.
|
||||
ai_role (str): The description of the AI's role.
|
||||
ai_goals (list): The list of objectives the AI is supposed to complete.
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
|
||||
self.ai_name = ai_name
|
||||
self.ai_role = ai_role
|
||||
self.ai_goals = ai_goals
|
||||
@@ -14,8 +32,19 @@ class AIConfig:
|
||||
SAVE_FILE = os.path.join(os.path.dirname(__file__), '..', 'ai_settings.yaml')
|
||||
|
||||
@classmethod
|
||||
def load(cls, config_file=SAVE_FILE):
|
||||
"""Load variables from yaml file if it exists, otherwise use defaults."""
|
||||
def load(cls: object, config_file: str=SAVE_FILE) -> object:
|
||||
"""
|
||||
Returns class object with parameters (ai_name, ai_role, ai_goals) loaded from yaml file if yaml file exists,
|
||||
else returns class with no parameters.
|
||||
|
||||
Parameters:
|
||||
cls (class object): An AIConfig Class object.
|
||||
config_file (int): The path to the config yaml file. DEFAULT: "../ai_settings.yaml"
|
||||
|
||||
Returns:
|
||||
cls (object): A instance of given cls object
|
||||
"""
|
||||
|
||||
try:
|
||||
with open(config_file) as file:
|
||||
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
||||
@@ -28,15 +57,32 @@ class AIConfig:
|
||||
|
||||
return cls(ai_name, ai_role, ai_goals)
|
||||
|
||||
def save(self, config_file=SAVE_FILE):
|
||||
"""Save variables to yaml file."""
|
||||
def save(self, config_file: str=SAVE_FILE) -> None:
|
||||
"""
|
||||
Saves the class parameters to the specified file yaml file path as a yaml file.
|
||||
|
||||
Parameters:
|
||||
config_file(str): The path to the config yaml file. DEFAULT: "../ai_settings.yaml"
|
||||
|
||||
Returns:
|
||||
None
|
||||
"""
|
||||
|
||||
config = {"ai_name": self.ai_name, "ai_role": self.ai_role, "ai_goals": self.ai_goals}
|
||||
with open(config_file, "w") as file:
|
||||
yaml.dump(config, file)
|
||||
|
||||
def construct_full_prompt(self) -> str:
|
||||
"""
|
||||
Returns a prompt to the user with the class information in an organized fashion.
|
||||
|
||||
Parameters:
|
||||
None
|
||||
|
||||
Returns:
|
||||
full_prompt (str): A string containing the intitial prompt for the user including the ai_name, ai_role and ai_goals.
|
||||
"""
|
||||
|
||||
def construct_full_prompt(self):
|
||||
"""Construct the full prompt for the AI to use."""
|
||||
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
|
||||
|
||||
# Construct full prompt
|
||||
@@ -46,3 +92,4 @@ class AIConfig:
|
||||
|
||||
full_prompt += f"\n\n{data.load_prompt()}"
|
||||
return full_prompt
|
||||
|
||||
|
||||
@@ -5,9 +5,17 @@ from call_ai_function import call_ai_function
|
||||
from json_parser import fix_and_parse_json
|
||||
cfg = Config()
|
||||
|
||||
# Evaluating code
|
||||
|
||||
def evaluate_code(code: str) -> List[str]:
|
||||
"""Evaluates the given code and returns a list of suggestions for improvements."""
|
||||
"""
|
||||
A function that takes in a string and returns a response from create chat completion api call.
|
||||
|
||||
Parameters:
|
||||
code (str): Code to be evaluated.
|
||||
Returns:
|
||||
A result string from create chat completion. A list of suggestions to improve the code.
|
||||
"""
|
||||
|
||||
function_string = "def analyze_code(code: str) -> List[str]:"
|
||||
args = [code]
|
||||
description_string = """Analyzes the given code and returns a list of suggestions for improvements."""
|
||||
@@ -17,9 +25,17 @@ def evaluate_code(code: str) -> List[str]:
|
||||
return result_string
|
||||
|
||||
|
||||
# Improving code
|
||||
def improve_code(suggestions: List[str], code: str) -> str:
|
||||
"""Improves the provided code based on the suggestions provided, making no other changes."""
|
||||
"""
|
||||
A function that takes in code and suggestions and returns a response from create chat completion api call.
|
||||
|
||||
Parameters:
|
||||
suggestions (List): A list of suggestions around what needs to be improved.
|
||||
code (str): Code to be improved.
|
||||
Returns:
|
||||
A result string from create chat completion. Improved code in response.
|
||||
"""
|
||||
|
||||
function_string = (
|
||||
"def generate_improved_code(suggestions: List[str], code: str) -> str:"
|
||||
)
|
||||
@@ -30,9 +46,18 @@ def improve_code(suggestions: List[str], code: str) -> str:
|
||||
return result_string
|
||||
|
||||
|
||||
# Writing tests
|
||||
|
||||
def write_tests(code: str, focus: List[str]) -> str:
|
||||
"""Generates test cases for the existing code, focusing on specific areas if required."""
|
||||
"""
|
||||
A function that takes in code and focus topics and returns a response from create chat completion api call.
|
||||
|
||||
Parameters:
|
||||
focus (List): A list of suggestions around what needs to be improved.
|
||||
code (str): Code for test cases to be generated against.
|
||||
Returns:
|
||||
A result string from create chat completion. Test cases for the submitted code in response.
|
||||
"""
|
||||
|
||||
function_string = (
|
||||
"def create_test_cases(code: str, focus: Optional[str] = None) -> str:"
|
||||
)
|
||||
@@ -41,5 +66,3 @@ def write_tests(code: str, focus: List[str]) -> str:
|
||||
|
||||
result_string = call_ai_function(function_string, args, description_string)
|
||||
return result_string
|
||||
|
||||
|
||||
|
||||
@@ -64,14 +64,14 @@ def chat_with_ai(
|
||||
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
|
||||
# Reserve 1000 tokens for the response
|
||||
|
||||
if cfg.debug_mode:
|
||||
if cfg.debug:
|
||||
print(f"Token limit: {token_limit}")
|
||||
|
||||
send_token_limit = token_limit - 1000
|
||||
|
||||
relevant_memory = permanent_memory.get_relevant(str(full_message_history[-5:]), 10)
|
||||
|
||||
if cfg.debug_mode:
|
||||
if cfg.debug:
|
||||
print('Memory Stats: ', permanent_memory.get_stats())
|
||||
|
||||
next_message_to_add_index, current_tokens_used, insertion_index, current_context = generate_context(
|
||||
@@ -110,7 +110,7 @@ def chat_with_ai(
|
||||
# assert tokens_remaining >= 0, "Tokens remaining is negative. This should never happen, please submit a bug report at https://www.github.com/Torantulino/Auto-GPT"
|
||||
|
||||
# Debug print the current context
|
||||
if cfg.debug_mode:
|
||||
if cfg.debug:
|
||||
print(f"Token limit: {token_limit}")
|
||||
print(f"Send Token Count: {current_tokens_used}")
|
||||
print(f"Tokens remaining for response: {tokens_remaining}")
|
||||
|
||||
@@ -91,7 +91,7 @@ def fix_json(json_str: str, schema: str) -> str:
|
||||
result_string = call_ai_function(
|
||||
function_string, args, description_string, model=cfg.fast_llm_model
|
||||
)
|
||||
if cfg.debug_mode:
|
||||
if cfg.debug:
|
||||
print("------------ JSON FIX ATTEMPT ---------------")
|
||||
print(f"Original JSON: {json_str}")
|
||||
print("-----------")
|
||||
|
||||
@@ -17,6 +17,8 @@ import traceback
|
||||
import yaml
|
||||
import argparse
|
||||
|
||||
cfg = Config()
|
||||
|
||||
def check_openai_api_key():
|
||||
"""Check if the OpenAI API key is set in config.py or as an environment variable."""
|
||||
if not cfg.openai_api_key:
|
||||
|
||||
Reference in New Issue
Block a user