Merge pull request #52 from patillacode/minor-clean-up

Minor clean up
This commit is contained in:
Anton Osika
2023-06-15 20:20:51 +02:00
committed by GitHub
9 changed files with 133 additions and 43 deletions

32
.gitignore vendored
View File

@@ -1 +1,33 @@
# See https://help.github.com/ignore-files/ for more about ignoring files.
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# Distribution / packaging
dist/
build/
*.egg-info/
*.egg
# Virtual environments
venv/
ENV/
# IDE-specific files
.vscode/
# Compiled Python modules
*.pyc
*.pyo
*.pyd
# macOS specific files
.DS_Store
# Windows specific files
Thumbs.db
# this application's specific files
archive archive

15
ai.py
View File

@@ -1,4 +1,3 @@
import openai import openai
@@ -8,15 +7,15 @@ class AI:
def start(self, system, user): def start(self, system, user):
messages = [ messages = [
{"role": "system", "content": system}, {"role": "system", "content": system},
{"role": "user", "content": user}, {"role": "user", "content": user},
] ]
return self.next(messages) return self.next(messages)
def fsystem(self, msg): def fsystem(self, msg):
return {"role": "system", "content": msg} return {"role": "system", "content": msg}
def fuser(self, msg): def fuser(self, msg):
return {"role": "user", "content": msg} return {"role": "user", "content": msg}
@@ -25,9 +24,7 @@ class AI:
messages = messages + [{"role": "user", "content": prompt}] messages = messages + [{"role": "user", "content": prompt}]
response = openai.ChatCompletion.create( response = openai.ChatCompletion.create(
messages=messages, messages=messages, stream=True, **self.kwargs
stream=True,
**self.kwargs
) )
chat = [] chat = []
@@ -36,4 +33,4 @@ class AI:
msg = delta.get('content', '') msg = delta.get('content', '')
print(msg, end="") print(msg, end="")
chat.append(msg) chat.append(msg)
return messages + [{"role": "assistant", "content": "".join(chat)}] return messages + [{"role": "assistant", "content": "".join(chat)}]

View File

@@ -1,7 +1,7 @@
import re import re
def parse_chat(chat):# -> List[Tuple[str, str]]: def parse_chat(chat): # -> List[Tuple[str, str]]:
# Get all ``` blocks # Get all ``` blocks
regex = r"```(.*?)```" regex = r"```(.*?)```"
@@ -15,7 +15,7 @@ def parse_chat(chat):# -> List[Tuple[str, str]]:
code = "\n".join(code) code = "\n".join(code)
# Add the file to the list # Add the file to the list
files.append((path, code)) files.append((path, code))
return files return files
@@ -24,4 +24,4 @@ def to_files(chat, workspace):
files = parse_chat(chat) files = parse_chat(chat)
for file_name, file_content in files: for file_name, file_content in files:
workspace[file_name] = file_content workspace[file_name] = file_content

4
db.py
View File

@@ -1,6 +1,6 @@
import os
from dataclasses import dataclass from dataclasses import dataclass
import os
from pathlib import Path from pathlib import Path
@@ -25,4 +25,4 @@ class DBs:
logs: DB logs: DB
identity: DB identity: DB
input: DB input: DB
workspace: DB workspace: DB

18
main.py
View File

@@ -1,14 +1,11 @@
import json import json
import os
import pathlib import pathlib
from typing import Optional
import openai
from chat_to_files import to_files
from ai import AI
from steps import STEPS
from db import DB, DBs
import typer import typer
from ai import AI
from db import DB, DBs
from steps import STEPS
app = typer.Typer() app = typer.Typer()
@@ -16,11 +13,13 @@ app = typer.Typer()
@app.command() @app.command()
def chat( def chat(
project_path: str = typer.Argument(None, help="path"), project_path: str = typer.Argument(None, help="path"),
run_prefix: str = typer.Option("", help="run prefix, if you want to run multiple variants of the same project and later compare them"), run_prefix: str = typer.Option(
"",
help="run prefix, if you want to run multiple variants of the same project and later compare them",
),
model: str = "gpt-4", model: str = "gpt-4",
temperature: float = 0.1, temperature: float = 0.1,
): ):
if project_path is None: if project_path is None:
project_path = str(pathlib.Path(__file__).parent / "example") project_path = str(pathlib.Path(__file__).parent / "example")
@@ -41,7 +40,6 @@ def chat(
identity=DB(pathlib.Path(__file__).parent / "identity"), identity=DB(pathlib.Path(__file__).parent / "identity"),
) )
for step in STEPS: for step in STEPS:
messages = step(ai, dbs) messages = step(ai, dbs)
dbs.logs[step.__name__] = json.dumps(messages) dbs.logs[step.__name__] = json.dumps(messages)

59
pyproject.toml Normal file
View File

@@ -0,0 +1,59 @@
# https://beta.ruff.rs/docs/configuration/#using-rufftoml
[tool.ruff]
select = ["F", "E", "W", "I001"]
line-length = 90
show-fixes = false
target-version = "py311"
task-tags = ["TODO", "FIXME"]
exclude = [
".bzr",
".direnv",
".eggs",
".git",
".ruff_cache",
".svn",
".tox",
".venv",
"__pypackages__",
"_build",
"buck-out",
"build",
"dist",
"node_modules",
"venv",
]
[tool.ruff.isort]
known-first-party = []
known-third-party = []
section-order = [
"future",
"standard-library",
"third-party",
"first-party",
"local-folder",
]
combine-as-imports = true
split-on-trailing-comma = false
lines-between-types = 1
# https://black.readthedocs.io/en/stable/usage_and_configuration/the_basics.html
[tool.black]
line-length = 90
target-version = ["py311"]
include = '\.pyi?$'
exclude = '''
(
/(
\.direnv
| \.eggs
| \.git
| \.tox
| \.venv
| _build
| build
| dist
| venv
)/
)
'''

View File

@@ -1,2 +1,4 @@
openai black==23.3.0
typer openai==0.27.8
ruff==0.0.272
typer==0.9.0

View File

@@ -17,7 +17,6 @@ def chat(
temperature: float = 0.1, temperature: float = 0.1,
max_tokens: int = 4096, max_tokens: int = 4096,
): ):
ai = AI( ai = AI(
model=model, model=model,
temperature=temperature, temperature=temperature,

View File

@@ -1,20 +1,28 @@
import json
from ai import AI from ai import AI
from chat_to_files import to_files from chat_to_files import to_files
from db import DBs from db import DBs
import json
def setup_sys_prompt(dbs): def setup_sys_prompt(dbs):
return dbs.identity['setup'] + '\nUseful to know:\n' + dbs.identity['philosophy'] return dbs.identity['setup'] + '\nUseful to know:\n' + dbs.identity['philosophy']
def run(ai: AI, dbs: DBs): def run(ai: AI, dbs: DBs):
'''Run the AI on the main prompt and save the results''' '''Run the AI on the main prompt and save the results'''
messages = ai.start(setup_sys_prompt(dbs), dbs.input['main_prompt']) messages = ai.start(
setup_sys_prompt(dbs),
dbs.input['main_prompt'],
)
to_files(messages[-1]['content'], dbs.workspace) to_files(messages[-1]['content'], dbs.workspace)
return messages return messages
def clarify(ai: AI, dbs: DBs): def clarify(ai: AI, dbs: DBs):
'''Ask the user if they want to clarify anything and save the results to the workspace''' '''
Ask the user if they want to clarify anything and save the results to the workspace
'''
messages = [ai.fsystem(dbs.identity['qa'])] messages = [ai.fsystem(dbs.identity['qa'])]
user = dbs.input['main_prompt'] user = dbs.input['main_prompt']
while True: while True:
@@ -31,35 +39,30 @@ def clarify(ai: AI, dbs: DBs):
break break
user += ( user += (
'\n\n' '\n\n'
'Is anything else unclear? If yes, only answer in the form:\n' 'Is anything else unclear? If yes, only answer in the form:\n'
'{remaining unclear areas} remaining questions.\n' '{remaining unclear areas} remaining questions.\n'
'{Next question}\n' '{Next question}\n'
'If everything is sufficiently clear, only answer "no".' 'If everything is sufficiently clear, only answer "no".'
) )
print() print()
return messages return messages
def run_clarified(ai: AI, dbs: DBs): def run_clarified(ai: AI, dbs: DBs):
# get the messages from previous step # get the messages from previous step
messages = json.loads(dbs.logs[clarify.__name__]) messages = json.loads(dbs.logs[clarify.__name__])
messages = ( messages = [
[ ai.fsystem(setup_sys_prompt(dbs)),
ai.fsystem(setup_sys_prompt(dbs)), ] + messages[1:]
] +
messages[1:]
)
messages = ai.next(messages, dbs.identity['use_qa']) messages = ai.next(messages, dbs.identity['use_qa'])
to_files(messages[-1]['content'], dbs.workspace) to_files(messages[-1]['content'], dbs.workspace)
return messages return messages
STEPS=[ STEPS = [clarify, run_clarified]
clarify,
run_clarified
]
# Future steps that can be added: # Future steps that can be added:
# improve_files, # improve_files,