make pre commit pass in the whole codebase (#149)

This commit is contained in:
Patilla Code
2023-06-18 14:46:03 +02:00
committed by GitHub
parent c77b07a846
commit 084ce1759b
6 changed files with 74 additions and 61 deletions

View File

@@ -8,10 +8,12 @@ class AI:
try:
openai.Model.retrieve("gpt-4")
except openai.error.InvalidRequestError:
print("Model gpt-4 not available for provided api key reverting "
"to gpt-3.5.turbo. Sign up for the gpt-4 wait list here: "
"https://openai.com/waitlist/gpt-4-api")
self.kwargs['model'] = "gpt-3.5-turbo"
print(
"Model gpt-4 not available for provided api key reverting "
"to gpt-3.5.turbo. Sign up for the gpt-4 wait list here: "
"https://openai.com/waitlist/gpt-4-api"
)
self.kwargs["model"] = "gpt-3.5-turbo"
def start(self, system, user):
messages = [
@@ -26,10 +28,10 @@ class AI:
def fuser(self, msg):
return {"role": "user", "content": msg}
def fassistant(self, msg):
return {"role": "assistant", "content": msg}
def next(self, messages: list[dict[str, str]], prompt=None):
if prompt:
messages = messages + [{"role": "user", "content": prompt}]

View File

@@ -18,7 +18,10 @@ def chat(
delete_existing: str = typer.Argument(None, help="delete existing files"),
run_prefix: str = typer.Option(
"",
help="run prefix, if you want to run multiple variants of the same project and later compare them",
help=(
"run prefix, if you want to run multiple variants of the same project and "
"later compare them",
),
),
model: str = "gpt-4",
temperature: float = 0.1,

View File

@@ -2,9 +2,8 @@ import json
import subprocess
from gpt_engineer.ai import AI
from gpt_engineer.chat_to_files import to_files
from gpt_engineer.chat_to_files import parse_chat, to_files
from gpt_engineer.db import DBs
from gpt_engineer.chat_to_files import parse_chat
def setup_sys_prompt(dbs):
@@ -54,7 +53,8 @@ def clarify(ai: AI, dbs: DBs):
def gen_spec(ai: AI, dbs: DBs):
"""
Generate a spec from the main prompt + clarifications and save the results to the workspace
Generate a spec from the main prompt + clarifications and save the results to
the workspace
"""
messages = [
ai.fsystem(setup_sys_prompt(dbs)),
@@ -67,6 +67,7 @@ def gen_spec(ai: AI, dbs: DBs):
return messages
def respec(ai: AI, dbs: DBs):
messages = dbs.logs[gen_spec.__name__]
messages += [ai.fsystem(dbs.identity["respec"])]
@@ -75,10 +76,13 @@ def respec(ai: AI, dbs: DBs):
messages = ai.next(
messages,
(
'Based on the conversation so far, please reiterate the specification for the program. '
'If there are things that can be improved, please incorporate the improvements. '
"If you are satisfied with the specification, just write out the specification word by word again."
)
"Based on the conversation so far, please reiterate the specification for "
"the program. "
"If there are things that can be improved, please incorporate the "
"improvements. "
"If you are satisfied with the specification, just write out the "
"specification word by word again."
),
)
dbs.memory["specification"] = messages[-1]["content"]
@@ -116,6 +120,7 @@ def gen_clarified_code(ai: AI, dbs: DBs):
to_files(messages[-1]["content"], dbs.workspace)
return messages
def gen_code(ai: AI, dbs: DBs):
# get the messages from previous step
@@ -157,8 +162,10 @@ def execute_entrypoint(ai, dbs):
def gen_entrypoint(ai, dbs):
messages = ai.start(
system=(
f"You will get information about a codebase that is currently on disk in the folder {dbs.workspace.path}.\n"
"From this you will answer with code blocks that includes all the necessary Windows, MacOS, and Linux terminal commands to "
"You will get information about a codebase that is currently on disk in "
f"the folder {dbs.workspace.path}.\n"
"From this you will answer with code blocks that includes all the necessary "
"Windows, MacOS, and Linux terminal commands to "
"a) install dependencies "
"b) run all necessary parts of the codebase (in parallell if necessary).\n"
"Do not install globally. Do not use sudo.\n"
@@ -170,11 +177,16 @@ def gen_entrypoint(ai, dbs):
blocks = parse_chat(messages[-1]["content"])
for lang, _ in blocks:
assert lang in ["", "bash", "sh"], "Generated entrypoint command that was not bash"
assert lang in [
"",
"bash",
"sh",
], "Generated entrypoint command that was not bash"
dbs.workspace["run.sh"] = "\n".join(block for lang, block in blocks)
return messages
def use_feedback(ai: AI, dbs: DBs):
messages = [
ai.fsystem(setup_sys_prompt(dbs)),
@@ -182,7 +194,7 @@ def use_feedback(ai: AI, dbs: DBs):
ai.fassistant(dbs.workspace["all_output.txt"]),
ai.fsystem(dbs.identity["use_feedback"]),
]
messages = ai.next(messages, dbs.memory['feedback'])
messages = ai.next(messages, dbs.memory["feedback"])
to_files(messages[-1]["content"], dbs.workspace)
return messages

View File

@@ -2,45 +2,47 @@
# for each folder, run the benchmark
import os
import sys
import subprocess
import time
import datetime
import shutil
import argparse
import json
from pathlib import Path
from typer import run
from itertools import islice
from pathlib import Path
from typer import run
def main(
n_benchmarks: int | None = None,
):
processes = []
files = []
path = Path('benchmark')
path = Path("benchmark")
if n_benchmarks:
benchmarks = islice(path.iterdir(), n_benchmarks)
for folder in benchmarks:
if os.path.isdir(folder):
print('Running benchmark for {}'.format(folder))
print("Running benchmark for {}".format(folder))
log_path = folder / 'log.txt'
log_file = open(log_path, 'w')
processes.append(subprocess.Popen(['python', '-m', 'gpt_engineer.main', folder], stdout=log_file, stderr=log_file, bufsize=0))
log_path = folder / "log.txt"
log_file = open(log_path, "w")
processes.append(
subprocess.Popen(
["python", "-m", "gpt_engineer.main", folder],
stdout=log_file,
stderr=log_file,
bufsize=0,
)
)
files.append(log_file)
print('You can stream the log file by running: tail -f {}'.format(log_path))
print("You can stream the log file by running: tail -f {}".format(log_path))
for process, file in zip(processes, files):
process.wait()
print('process finished with code', process.returncode)
print("process finished with code", process.returncode)
file.close()
if __name__ == '__main__':
if __name__ == "__main__":
run(main)

View File

@@ -2,26 +2,21 @@
# for each folder, run the benchmark
import os
import sys
import subprocess
import time
import datetime
import shutil
import argparse
import json
from pathlib import Path
from typer import run
from itertools import islice
def main(
):
benchmarks = Path('benchmark')
from pathlib import Path
from typer import run
def main():
benchmarks = Path("benchmark")
for benchmark in benchmarks.iterdir():
if benchmark.is_dir():
print(f'Cleaning {benchmark}')
print(f"Cleaning {benchmark}")
for path in benchmark.iterdir():
if path.name == 'main_prompt':
if path.name == "main_prompt":
continue
# Get filename of Path object
@@ -32,7 +27,6 @@ def main(
# delete the file
os.remove(path)
if __name__ == '__main__':
run(main)
if __name__ == "__main__":
run(main)

View File

@@ -3,12 +3,12 @@ from gpt_engineer.db import DB
def test_db():
# use /tmp for testing
db = DB('/tmp/test_db')
db['test'] = 'test'
assert db['test'] == 'test'
db['test'] = 'test2'
assert db['test'] == 'test2'
db['test2'] = 'test2'
assert db['test2'] == 'test2'
assert db['test'] == 'test2'
print('test_db passed')
db = DB("/tmp/test_db")
db["test"] = "test"
assert db["test"] == "test"
db["test"] = "test2"
assert db["test"] == "test2"
db["test2"] = "test2"
assert db["test2"] == "test2"
assert db["test"] == "test2"
print("test_db passed")