Files
gpt-engineer/tests/test_collect.py
UmerHA 19a4c10b6e Langchain integration (#512)
* Added LangChain integration

* Fixed issue created by git checkin process

* Added ':' to characters to remove from end of file path

* Tested initial migration to LangChain, removed comments and logging used for debugging

* Tested initial migration to LangChain, removed comments and logging used for debugging

* Converted camelCase to snake_case

* Turns out we need the exception handling

* Testing Hugging Face Integrations via LangChain

* Added LangChain loadable models

* Renames "qa" prompt to "clarify", since it's used in the "clarify" step, asking for clarification

* Fixed loading model yaml files

* Fixed streaming

* Added modeldir cli option

* Fixed typing

* Fixed interaction with token logging

* Fix spelling + dependency issues + typing

* Fix spelling + tests

* Removed unneeded logging which caused test to fail

* Cleaned up code

* Incorporated feedback

- deleted unnecessary functions & logger.info
- used LangChain ChatLLM instead of LLM to naturally communicate with gpt-4
- deleted loading model from yaml file, as LC doesn't offer this for ChatModels

* Update gpt_engineer/steps.py

Co-authored-by: Anton Osika <anton.osika@gmail.com>

* Incorporated feedback

- Fixed failing test
- Removed parsing complexity by using # type: ignore
- Replace every ocurence of ai.last_message_content with its content

* Fixed test

* Update gpt_engineer/steps.py

---------

Co-authored-by: H <holden.robbins@gmail.com>
Co-authored-by: Anton Osika <anton.osika@gmail.com>
2023-07-23 23:30:09 +02:00

52 lines
1.6 KiB
Python

import json
import os
from unittest.mock import MagicMock
import pytest
import rudderstack.analytics as rudder_analytics
from gpt_engineer.collect import collect_learnings, steps_file_hash
from gpt_engineer.db import DB, DBs
from gpt_engineer.learning import extract_learning
from gpt_engineer.steps import gen_code
def test_collect_learnings(monkeypatch):
monkeypatch.setattr(os, "environ", {"COLLECT_LEARNINGS_OPT_IN": "true"})
monkeypatch.setattr(rudder_analytics, "track", MagicMock())
model = "test_model"
temperature = 0.5
steps = [gen_code]
dbs = DBs(DB("/tmp"), DB("/tmp"), DB("/tmp"), DB("/tmp"), DB("/tmp"), DB("/tmp"))
dbs.input = {
"prompt": "test prompt\n with newlines",
"feedback": "test feedback",
}
code = "this is output\n\nit contains code"
dbs.logs = {gen_code.__name__: json.dumps([{"role": "system", "content": code}])}
dbs.workspace = {"all_output.txt": "test workspace\n" + code}
collect_learnings(model, temperature, steps, dbs)
learnings = extract_learning(
model, temperature, steps, dbs, steps_file_hash=steps_file_hash()
)
assert rudder_analytics.track.call_count == 1
assert rudder_analytics.track.call_args[1]["event"] == "learning"
a = {
k: v
for k, v in rudder_analytics.track.call_args[1]["properties"].items()
if k != "timestamp"
}
b = {k: v for k, v in learnings.to_dict().items() if k != "timestamp"}
assert a == b
assert json.dumps(code) in learnings.logs
assert code in learnings.workspace
if __name__ == "__main__":
pytest.main(["-v"])