mirror of
https://github.com/aljazceru/gpt-engineer.git
synced 2025-12-17 20:55:09 +01:00
'Refactored by Sourcery' (#188)
This commit is contained in:
@@ -39,7 +39,7 @@ class AI:
|
|||||||
|
|
||||||
def next(self, messages: list[dict[str, str]], prompt=None):
|
def next(self, messages: list[dict[str, str]], prompt=None):
|
||||||
if prompt:
|
if prompt:
|
||||||
messages = messages + [{"role": "user", "content": prompt}]
|
messages += [{"role": "user", "content": prompt}]
|
||||||
|
|
||||||
logger.debug(f"Creating a new chat completion: {messages}")
|
logger.debug(f"Creating a new chat completion: {messages}")
|
||||||
response = openai.ChatCompletion.create(
|
response = openai.ChatCompletion.create(
|
||||||
@@ -56,6 +56,6 @@ class AI:
|
|||||||
print(msg, end="")
|
print(msg, end="")
|
||||||
chat.append(msg)
|
chat.append(msg)
|
||||||
print()
|
print()
|
||||||
messages = messages + [{"role": "assistant", "content": "".join(chat)}]
|
messages += [{"role": "assistant", "content": "".join(chat)}]
|
||||||
logger.debug(f"Chat completion finished: {messages}")
|
logger.debug(f"Chat completion finished: {messages}")
|
||||||
return messages
|
return messages
|
||||||
|
|||||||
@@ -14,11 +14,10 @@ class DB:
|
|||||||
def __getitem__(self, key):
|
def __getitem__(self, key):
|
||||||
full_path = self.path / key
|
full_path = self.path / key
|
||||||
|
|
||||||
if full_path.is_file():
|
if not full_path.is_file():
|
||||||
with full_path.open("r", encoding="utf-8") as f:
|
|
||||||
return f.read()
|
|
||||||
else:
|
|
||||||
raise KeyError(key)
|
raise KeyError(key)
|
||||||
|
with full_path.open("r", encoding="utf-8") as f:
|
||||||
|
return f.read()
|
||||||
|
|
||||||
def __setitem__(self, key, val):
|
def __setitem__(self, key, val):
|
||||||
full_path = self.path / key
|
full_path = self.path / key
|
||||||
|
|||||||
@@ -33,8 +33,8 @@ def main(
|
|||||||
logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)
|
logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)
|
||||||
|
|
||||||
input_path = Path(project_path).absolute()
|
input_path = Path(project_path).absolute()
|
||||||
memory_path = input_path / (run_prefix + "memory")
|
memory_path = input_path / f"{run_prefix}memory"
|
||||||
workspace_path = input_path / (run_prefix + "workspace")
|
workspace_path = input_path / f"{run_prefix}workspace"
|
||||||
|
|
||||||
if delete_existing:
|
if delete_existing:
|
||||||
# Delete files and subdirectories in paths
|
# Delete files and subdirectories in paths
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ def main(
|
|||||||
benchmarks = []
|
benchmarks = []
|
||||||
for bench_folder in folders:
|
for bench_folder in folders:
|
||||||
if os.path.isdir(bench_folder):
|
if os.path.isdir(bench_folder):
|
||||||
print("Running benchmark for {}".format(bench_folder))
|
print(f"Running benchmark for {bench_folder}")
|
||||||
|
|
||||||
log_path = bench_folder / "log.txt"
|
log_path = bench_folder / "log.txt"
|
||||||
log_file = open(log_path, "w")
|
log_file = open(log_path, "w")
|
||||||
@@ -44,7 +44,7 @@ def main(
|
|||||||
benchmarks.append((bench_folder, process, log_file))
|
benchmarks.append((bench_folder, process, log_file))
|
||||||
|
|
||||||
print("You can stream the log file by running:")
|
print("You can stream the log file by running:")
|
||||||
print("tail -f {}".format(log_path))
|
print(f"tail -f {log_path}")
|
||||||
print()
|
print()
|
||||||
|
|
||||||
for bench_folder, process, file in benchmarks:
|
for bench_folder, process, file in benchmarks:
|
||||||
|
|||||||
Reference in New Issue
Block a user