diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 67e845d..c62554b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,7 +10,7 @@ jobs: strategy: fail-fast: false matrix: - group: [1, 2] + group: [1, 2, 3, 4, 5] steps: - uses: actions/checkout@v2 - name: Set up Python 3.8 @@ -27,7 +27,9 @@ jobs: - name: Test id: test run: | - pytest -v -s -m "not gpu" --splits 9 --group ${{ matrix.group }} --splitting-algorithm least_duration test/ - timeout-minutes: 20 + pytest -v -s -m "not gpu" --splits 5 --group ${{ matrix.group }} --splitting-algorithm least_duration test/ + timeout-minutes: 10 env: OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + SCENEX_API_KEY: ${{ secrets.SCENEX_API_KEY }} + WHISPER_API_KEY: ${{ secrets.WHISPER_API_KEY }} diff --git a/.test_durations b/.test_durations new file mode 100644 index 0000000..9ee8723 --- /dev/null +++ b/.test_durations @@ -0,0 +1,9 @@ +{ + "test/test_generator.py::test_generation_level_0": 100, + "test/test_generator.py::test_generation_level_1": 100, + "test/test_generator.py::test_generation_level_2": 100, + "test/test_generator.py::test_generation_level_3": 100, + "test/test_generator.py::test_generation_level_4": 100, + "test/test_hub.py::test_is_microservice_in_hub": 1, + "test/test_strings.py::test_clean_color_codes": 1 +} \ No newline at end of file diff --git a/src/apis/gpt.py b/src/apis/gpt.py index 74b0875..c386f6a 100644 --- a/src/apis/gpt.py +++ b/src/apis/gpt.py @@ -32,9 +32,8 @@ If you have updated it already, please restart your terminal. openai.api_key = os.environ['OPENAI_API_KEY'] class GPTSession: - def __init__(self, task_description, test_description, model: str = 'gpt-4', ): + def __init__(self, task_description, model: str = 'gpt-4', ): self.task_description = task_description - self.test_description = test_description if model == 'gpt-4' and self.is_gpt4_available(): self.pricing_prompt = PRICING_GPT4_PROMPT self.pricing_generation = PRICING_GPT4_GENERATION diff --git a/src/cli.py b/src/cli.py index 1a6eeac..4ff16e3 100644 --- a/src/cli.py +++ b/src/cli.py @@ -51,13 +51,11 @@ def main(ctx): @openai_api_key_needed @main.command() @click.option('--description', required=False, help='Description of the microservice.') -@click.option('--test', required=False, help='Test scenario for the microservice.') @click.option('--model', default='gpt-4', help='GPT model to use (default: gpt-4).') @click.option('--verbose', default=False, is_flag=True, help='Verbose mode.') # only for development @path_param def generate( description, - test, model, verbose, path, @@ -71,7 +69,7 @@ def generate( return from src.options.generate.generator import Generator - generator = Generator(description, test, path=path, model=model) + generator = Generator(description, path=path, model=model) generator.generate() @openai_api_key_needed diff --git a/src/options/generate/generator.py b/src/options/generate/generator.py index f9c824a..f4170bb 100644 --- a/src/options/generate/generator.py +++ b/src/options/generate/generator.py @@ -37,9 +37,9 @@ class TaskSpecification: test: Optional[Text] class Generator: - def __init__(self, task_description, test_description, path, model='gpt-4'): - self.gpt_session = gpt.GPTSession(task_description, test_description, model=model) - self.microservice_specification = TaskSpecification(task=task_description, test=test_description) + def __init__(self, task_description, path, model='gpt-4'): + self.gpt_session = gpt.GPTSession(task_description, model=model) + self.microservice_specification = TaskSpecification(task=task_description, test=None) self.microservice_root_path = path def extract_content_from_result(self, plain_text, file_name, match_single_block=False, can_contain_code_block=True): diff --git a/src/options/generate/templates_user.py b/src/options/generate/templates_user.py index c878d43..9e078c3 100644 --- a/src/options/generate/templates_user.py +++ b/src/options/generate/templates_user.py @@ -372,5 +372,6 @@ Or write the summarized microservice{_optional_test} description like this: Note that your response must be either prompt.txt or final.txt. You must not write both. Note that you must obey the double asterisk and tripple backtick syntax from above. Note that prompt.txt must not only contain one question. +Note that if urls, secrets, database names, etc. are mentioned, they must be part of the summary. ''' ) diff --git a/test/.test_durations b/test/.test_durations deleted file mode 100644 index 496f234..0000000 --- a/test/.test_durations +++ /dev/null @@ -1,3 +0,0 @@ -{ - "test/test_generator.py::test_generator": 246.233993379 -} \ No newline at end of file diff --git a/test/test_generator.py b/test/test_generator.py index 9ad2276..098dc18 100644 --- a/test/test_generator.py +++ b/test/test_generator.py @@ -1,7 +1,144 @@ import os + +import pytest + from src.options.generate.generator import Generator -def test_generator(tmpdir): + +# The cognitive difficulty level is determined by the number of requirements the microservice has. + +def test_generation_level_0(tmpdir): + """ + Requirements: + coding challenge: ❌ + pip packages: ❌ + environment: ❌ + GPT-3.5-turbo: ❌ + APIs: ❌ + Databases: ❌ + """ os.environ['VERBOSE'] = 'true' - generator = Generator("The microservice is very simple, it does not take anything as input and only outputs the word 'test'", "my test", str(tmpdir) + 'microservice', 'gpt-3.5-turbo') + generator = Generator( + "The microservice is very simple, it does not take anything as input and only outputs the word 'test'", + str(tmpdir) + 'microservice', + 'gpt-3.5-turbo' + ) + generator.generate() + +@pytest.mark.skip(reason="not possible") +def test_generation_level_1(tmpdir): + """ + Requirements: + coding challenge: ❌ + pip packages: ❌ + environment: ❌ + GPT-3.5-turbo: ✅ (for summarizing the text) + APIs: ❌ + Databases: ❌ + """ + os.environ['VERBOSE'] = 'true' + generator = Generator( + ''' +Input is a tweet that might contain passive aggressive language like: +'When your coworker microwaves fish in the break room... AGAIN. 🐟🤢 But hey, at least SOMEONE's enjoying their lunch. #officelife' +The output is a tweet that is not passive aggressive like: +'Hi coworker, +I hope you're having an amazing day! +Just a quick note: sometimes microwaving fish can create an interesting aroma in the break room. +If you're up for trying different lunch options, that could be a fun way to mix things up. +Enjoy your day! #variety' +''', + str(tmpdir) + 'microservice', + 'gpt-3.5-turbo' + ) + generator.generate() + +@pytest.mark.skip(reason="not possible") +def test_generation_level_2(tmpdir): + """ + Requirements: + coding challenge: ❌ + pip packages: ✅ (pdf parser) + environment: ❌ + GPT-3.5-turbo: ✅ (for summarizing the text) + APIs: ❌ + Databases: ❌ + """ + os.environ['VERBOSE'] = 'true' + generator = Generator( + "The input is a PDF like https://www.africau.edu/images/default/sample.pdf and the output the summarized text.", + str(tmpdir) + 'microservice', + 'gpt-3.5-turbo' + ) + generator.generate() + +@pytest.mark.skip(reason="not possible") +def test_generation_level_3(tmpdir): + """ + Requirements: + coding challenge: ❌ + pip packages: ✅ (text to speech) + environment: ❌ + GPT-3.5-turbo: ✅ (summarizing the text) + APIs: ✅ (whisper for speech to text) + Databases: ❌ + """ + os.environ['VERBOSE'] = 'true' + generator = Generator( + f'''Given an audio file of speech like https://www.signalogic.com/melp/EngSamples/Orig/ENG_M.wav, +get convert it to text using the following api: +import requests +url = "https://transcribe.whisperapi.com" +headers = {{ +'Authorization': 'Bearer {os.environ['WHISPER_API_KEY']}' +}} +data = {{ + "url": "URL_OF_STORED_AUDIO_FILE" +}} +response = requests.post(url, headers=headers, files=file, data=data) +print(response.text) +Summarize the text. +Create an audio file of the summarized text. +''', + str(tmpdir) + 'microservice', + 'gpt-3.5-turbo' + ) + generator.generate() + +@pytest.mark.skip(reason="not possible") +def test_generation_level_4(tmpdir): + """ + Requirements: + coding challenge: ✅ (putting text on the image) + pip packages: ✅ (Pillow for image processing) + environment: ❌ + GPT-3.5-turbo: ✅ (for writing the joke) + APIs: ✅ (scenex for image description) + Databases: ❌ + """ + os.environ['VERBOSE'] = 'true' + generator = Generator(f''' +The input is an image like this: https://upload.wikimedia.org/wikipedia/commons/thumb/4/47/PNG_transparency_demonstration_1.png/560px-PNG_transparency_demonstration_1.png. +Use the following api to get the description of the image: +Request: +curl "https://us-central1-causal-diffusion.cloudfunctions.net/describe" \\ + -H "x-api-key: token {os.environ['SCENEX_API_KEY']}" \\ + -H "content-type: application/json" \\ + --data '{{"data":[ + {{"image": "", "features": []}} + ]}}' +Result format: +{{ + "result": [ + {{ + "text": "" + }} + ] +}} +The description is then used to generate a joke. +The joke is the put on the image. +The output is the image with the joke on it.''', + str(tmpdir) + 'microservice', + 'gpt-3.5-turbo' + ) generator.generate()