mirror of
https://github.com/aljazceru/gpt-engineer.git
synced 2025-12-17 12:45:26 +01:00
Langchain integration (#512)
* Added LangChain integration * Fixed issue created by git checkin process * Added ':' to characters to remove from end of file path * Tested initial migration to LangChain, removed comments and logging used for debugging * Tested initial migration to LangChain, removed comments and logging used for debugging * Converted camelCase to snake_case * Turns out we need the exception handling * Testing Hugging Face Integrations via LangChain * Added LangChain loadable models * Renames "qa" prompt to "clarify", since it's used in the "clarify" step, asking for clarification * Fixed loading model yaml files * Fixed streaming * Added modeldir cli option * Fixed typing * Fixed interaction with token logging * Fix spelling + dependency issues + typing * Fix spelling + tests * Removed unneeded logging which caused test to fail * Cleaned up code * Incorporated feedback - deleted unnecessary functions & logger.info - used LangChain ChatLLM instead of LLM to naturally communicate with gpt-4 - deleted loading model from yaml file, as LC doesn't offer this for ChatModels * Update gpt_engineer/steps.py Co-authored-by: Anton Osika <anton.osika@gmail.com> * Incorporated feedback - Fixed failing test - Removed parsing complexity by using # type: ignore - Replace every ocurence of ai.last_message_content with its content * Fixed test * Update gpt_engineer/steps.py --------- Co-authored-by: H <holden.robbins@gmail.com> Co-authored-by: Anton Osika <anton.osika@gmail.com>
This commit is contained in:
@@ -100,11 +100,11 @@ def check_consent():
|
||||
path = Path(".gpte_consent")
|
||||
if path.exists() and path.read_text() == "true":
|
||||
return
|
||||
ans = input("Is it ok if we store your prompts to learn? (y/n)")
|
||||
while ans.lower() not in ("y", "n"):
|
||||
ans = input("Invalid input. Please enter y or n: ")
|
||||
answer = input("Is it ok if we store your prompts to learn? (y/n)")
|
||||
while answer.lower() not in ("y", "n"):
|
||||
answer = input("Invalid input. Please enter y or n: ")
|
||||
|
||||
if ans.lower() == "y":
|
||||
if answer.lower() == "y":
|
||||
path.write_text("true")
|
||||
print(colored("Thank you️", "light_green"))
|
||||
print()
|
||||
@@ -153,21 +153,14 @@ def ask_if_can_store() -> bool:
|
||||
return can_store == "y"
|
||||
|
||||
|
||||
def logs_to_string(steps: List[Step], logs: DB):
|
||||
def logs_to_string(steps: List[Step], logs: DB) -> str:
|
||||
chunks = []
|
||||
for step in steps:
|
||||
chunks.append(f"--- {step.__name__} ---\n")
|
||||
messages = json.loads(logs[step.__name__])
|
||||
chunks.append(format_messages(messages))
|
||||
chunks.append(logs[step.__name__])
|
||||
return "\n".join(chunks)
|
||||
|
||||
|
||||
def format_messages(messages: List[dict]) -> str:
|
||||
return "\n".join(
|
||||
[f"{message['role']}:\n\n{message['content']}" for message in messages]
|
||||
)
|
||||
|
||||
|
||||
def extract_learning(
|
||||
model: str, temperature: float, steps: List[Step], dbs: DBs, steps_file_hash
|
||||
) -> Learning:
|
||||
|
||||
Reference in New Issue
Block a user