mirror of
https://github.com/aljazceru/gpt-engineer.git
synced 2025-12-17 12:45:26 +01:00
* 'main' of github.com:AntonOsika/gpt-engineer: Mark test as failed because it requires OpenAI API access currently `black` Create test_ai.py fix to_files execute_workspace -> gen_entrypoint; execute_entrypoint Ignore my-new-project/ Added CODE_OF_CONDUCT.md to the .github directory (#147) make pre commit pass in the whole codebase (#149) Create ci.yaml Fix linting Add support for directory paths in filenames and improve code splitting - Enforce an explicit markdown code block format - Add a token to split the output to clearly detect when the code blocks start - Save all non-code output to a `README.md` file - Update RegEx to extract and strip text more reliably and clean up the output - Update the identify prompts appropriately Enhance philosophy to include supporting documents - Create instructions for running/compiling the project - Create any package manager files Generate instructions for all platforms - Update prompt to create instructions for all 3 major OS platforms - Fix small typo Add support for directory creation and binary files - Use the `Path` module instead of `os` - Add ability to create any amount of missing directories for a given file - Add ability to save both text and binary files to save images (or other file types) later Add cleanup & move `projects` to their own directory - Add optional argument to clean and delete the working directories of the project before running the prompt - Add `.gitignore` entry to ignore all possible projects - Update readme
68 lines
1.8 KiB
Python
68 lines
1.8 KiB
Python
# list all folders in benchmark folder
|
|
# for each folder, run the benchmark
|
|
|
|
import os
|
|
import subprocess
|
|
|
|
from itertools import islice
|
|
from pathlib import Path
|
|
|
|
from typer import run
|
|
|
|
|
|
def main(
|
|
n_benchmarks: int | None = None,
|
|
):
|
|
path = Path("benchmark")
|
|
|
|
folders = path.iterdir()
|
|
|
|
if n_benchmarks:
|
|
folders = islice(folders, n_benchmarks)
|
|
|
|
benchmarks = []
|
|
for bench_folder in folders:
|
|
if os.path.isdir(bench_folder):
|
|
print("Running benchmark for {}".format(bench_folder))
|
|
|
|
log_path = bench_folder / "log.txt"
|
|
log_file = open(log_path, "w")
|
|
process = subprocess.Popen(
|
|
[
|
|
"python",
|
|
"-u", # Unbuffered output
|
|
"-m",
|
|
"gpt_engineer.main",
|
|
bench_folder,
|
|
"--steps-config",
|
|
"benchmark",
|
|
],
|
|
stdout=log_file,
|
|
stderr=log_file,
|
|
bufsize=0,
|
|
)
|
|
benchmarks.append((bench_folder, process, log_file))
|
|
|
|
print("You can stream the log file by running: tail -f {}".format(log_path))
|
|
|
|
for bench_folder, process, file in benchmarks:
|
|
process.wait()
|
|
file.close()
|
|
|
|
print("process", bench_folder.name, "finished with code", process.returncode)
|
|
print('Running it. Original benchmark prompt:')
|
|
print()
|
|
with open(bench_folder / "main_prompt") as f:
|
|
print(f.read())
|
|
print()
|
|
|
|
try:
|
|
subprocess.run(
|
|
['python', "-m", "gpt_engineer.main", bench_folder, "--steps-config", "execute_only"],
|
|
)
|
|
except KeyboardInterrupt:
|
|
pass
|
|
|
|
if __name__ == "__main__":
|
|
run(main)
|