diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 67e845d..cb0ca1f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -10,7 +10,7 @@ jobs: strategy: fail-fast: false matrix: - group: [1, 2] + group: [1, 2, 3, 4] steps: - uses: actions/checkout@v2 - name: Set up Python 3.8 @@ -27,7 +27,7 @@ jobs: - name: Test id: test run: | - pytest -v -s -m "not gpu" --splits 9 --group ${{ matrix.group }} --splitting-algorithm least_duration test/ - timeout-minutes: 20 + pytest -v -s -m "not gpu" --splits 4 --group ${{ matrix.group }} --splitting-algorithm least_duration test/ + timeout-minutes: 10 env: OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} diff --git a/src/apis/gpt.py b/src/apis/gpt.py index 74b0875..c386f6a 100644 --- a/src/apis/gpt.py +++ b/src/apis/gpt.py @@ -32,9 +32,8 @@ If you have updated it already, please restart your terminal. openai.api_key = os.environ['OPENAI_API_KEY'] class GPTSession: - def __init__(self, task_description, test_description, model: str = 'gpt-4', ): + def __init__(self, task_description, model: str = 'gpt-4', ): self.task_description = task_description - self.test_description = test_description if model == 'gpt-4' and self.is_gpt4_available(): self.pricing_prompt = PRICING_GPT4_PROMPT self.pricing_generation = PRICING_GPT4_GENERATION diff --git a/src/cli.py b/src/cli.py index 1a6eeac..4ff16e3 100644 --- a/src/cli.py +++ b/src/cli.py @@ -51,13 +51,11 @@ def main(ctx): @openai_api_key_needed @main.command() @click.option('--description', required=False, help='Description of the microservice.') -@click.option('--test', required=False, help='Test scenario for the microservice.') @click.option('--model', default='gpt-4', help='GPT model to use (default: gpt-4).') @click.option('--verbose', default=False, is_flag=True, help='Verbose mode.') # only for development @path_param def generate( description, - test, model, verbose, path, @@ -71,7 +69,7 @@ def generate( return from src.options.generate.generator import Generator - generator = Generator(description, test, path=path, model=model) + generator = Generator(description, path=path, model=model) generator.generate() @openai_api_key_needed diff --git a/src/options/generate/generator.py b/src/options/generate/generator.py index a82f0cb..f4170bb 100644 --- a/src/options/generate/generator.py +++ b/src/options/generate/generator.py @@ -39,7 +39,7 @@ class TaskSpecification: class Generator: def __init__(self, task_description, path, model='gpt-4'): self.gpt_session = gpt.GPTSession(task_description, model=model) - self.microservice_specification = TaskSpecification(task=task_description) + self.microservice_specification = TaskSpecification(task=task_description, test=None) self.microservice_root_path = path def extract_content_from_result(self, plain_text, file_name, match_single_block=False, can_contain_code_block=True): diff --git a/test/.test_durations b/test/.test_durations index 496f234..7a73a41 100644 --- a/test/.test_durations +++ b/test/.test_durations @@ -1,3 +1,2 @@ { - "test/test_generator.py::test_generator": 246.233993379 } \ No newline at end of file diff --git a/test/test_generator.py b/test/test_generator.py index 454aa19..b54d3c3 100644 --- a/test/test_generator.py +++ b/test/test_generator.py @@ -1,11 +1,14 @@ import os + from src.options.generate.generator import Generator -# The cognitive difficulty level is determined by the number of Requirements the microservice has. + +# The cognitive difficulty level is determined by the number of requirements the microservice has. def test_generation_level_0(tmpdir): """ Requirements: + coding: ❌ pip packages: ❌ environment: ❌ GPT-3.5-turbo: ❌ @@ -13,18 +16,66 @@ def test_generation_level_0(tmpdir): Databases: ❌ """ os.environ['VERBOSE'] = 'true' - generator = Generator("The microservice is very simple, it does not take anything as input and only outputs the word 'test'", str(tmpdir) + 'microservice', 'gpt-3.5-turbo') + generator = Generator( + "The microservice is very simple, it does not take anything as input and only outputs the word 'test'", + str(tmpdir) + 'microservice', + 'gpt-3.5-turbo' + ) generator.generate() + def test_generation_level_1(tmpdir): """ Requirements: - pip packages: ✅ + coding: ❌ + pip packages: ✅ (pdf parser) environment: ❌ GPT-3.5-turbo: ❌ APIs: ❌ Databases: ❌ """ os.environ['VERBOSE'] = 'true' - generator = Generator("The input is a PDF like https://www.africau.edu/images/default/sample.pdf and the output the parsed text", str(tmpdir) + 'microservice', 'gpt-3.5-turbo') - generator.generate() \ No newline at end of file + generator = Generator( + "The input is a PDF like https://www.africau.edu/images/default/sample.pdf and the output the parsed text", + str(tmpdir) + 'microservice', + 'gpt-3.5-turbo' + ) + generator.generate() + + +def test_generation_level_4(tmpdir): + """ + Requirements: + coding: ✅ (putting text on the image) + pip packages: ✅ (Pillow for image processing) + environment: ❌ + GPT-3.5-turbo: ✅ (for writing the joke) + APIs: ✅ (scenex for image description) + Databases: ❌ + """ + os.environ['VERBOSE'] = 'true' + generator = Generator(f''' +The input is an image like this: https://upload.wikimedia.org/wikipedia/commons/thumb/4/47/PNG_transparency_demonstration_1.png/560px-PNG_transparency_demonstration_1.png. +Use the following api to get the description of the image: +Request: +curl "https://us-central1-causal-diffusion.cloudfunctions.net/describe" \ + -H "x-api-key: token {os.environ['SCENEX_API_KEY']}" \ + -H "content-type: application/json" \ + --data '{{"data":[ + {{"image": "", "features": []}} + ]}}' +Result format: +{ + "result": [ + { + "text": "" + } + ] +} +The description is then used to generate a joke. +The joke is the put on the image. +The output is the image with the joke on it.''', + str(tmpdir) + 'microservice', + 'gpt-3.5-turbo' + ) + generator.generate()