🧪 test: level 4

This commit is contained in:
Florian Hönicke
2023-04-28 23:58:35 +02:00
parent aab91a4077
commit 3dd5daf2bf
6 changed files with 62 additions and 15 deletions

View File

@@ -10,7 +10,7 @@ jobs:
strategy:
fail-fast: false
matrix:
group: [1, 2]
group: [1, 2, 3, 4]
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.8
@@ -27,7 +27,7 @@ jobs:
- name: Test
id: test
run: |
pytest -v -s -m "not gpu" --splits 9 --group ${{ matrix.group }} --splitting-algorithm least_duration test/
timeout-minutes: 20
pytest -v -s -m "not gpu" --splits 4 --group ${{ matrix.group }} --splitting-algorithm least_duration test/
timeout-minutes: 10
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}

View File

@@ -32,9 +32,8 @@ If you have updated it already, please restart your terminal.
openai.api_key = os.environ['OPENAI_API_KEY']
class GPTSession:
def __init__(self, task_description, test_description, model: str = 'gpt-4', ):
def __init__(self, task_description, model: str = 'gpt-4', ):
self.task_description = task_description
self.test_description = test_description
if model == 'gpt-4' and self.is_gpt4_available():
self.pricing_prompt = PRICING_GPT4_PROMPT
self.pricing_generation = PRICING_GPT4_GENERATION

View File

@@ -51,13 +51,11 @@ def main(ctx):
@openai_api_key_needed
@main.command()
@click.option('--description', required=False, help='Description of the microservice.')
@click.option('--test', required=False, help='Test scenario for the microservice.')
@click.option('--model', default='gpt-4', help='GPT model to use (default: gpt-4).')
@click.option('--verbose', default=False, is_flag=True, help='Verbose mode.') # only for development
@path_param
def generate(
description,
test,
model,
verbose,
path,
@@ -71,7 +69,7 @@ def generate(
return
from src.options.generate.generator import Generator
generator = Generator(description, test, path=path, model=model)
generator = Generator(description, path=path, model=model)
generator.generate()
@openai_api_key_needed

View File

@@ -39,7 +39,7 @@ class TaskSpecification:
class Generator:
def __init__(self, task_description, path, model='gpt-4'):
self.gpt_session = gpt.GPTSession(task_description, model=model)
self.microservice_specification = TaskSpecification(task=task_description)
self.microservice_specification = TaskSpecification(task=task_description, test=None)
self.microservice_root_path = path
def extract_content_from_result(self, plain_text, file_name, match_single_block=False, can_contain_code_block=True):

View File

@@ -1,3 +1,2 @@
{
"test/test_generator.py::test_generator": 246.233993379
}

View File

@@ -1,11 +1,14 @@
import os
from src.options.generate.generator import Generator
# The cognitive difficulty level is determined by the number of Requirements the microservice has.
# The cognitive difficulty level is determined by the number of requirements the microservice has.
def test_generation_level_0(tmpdir):
"""
Requirements:
coding: ❌
pip packages: ❌
environment: ❌
GPT-3.5-turbo: ❌
@@ -13,18 +16,66 @@ def test_generation_level_0(tmpdir):
Databases: ❌
"""
os.environ['VERBOSE'] = 'true'
generator = Generator("The microservice is very simple, it does not take anything as input and only outputs the word 'test'", str(tmpdir) + 'microservice', 'gpt-3.5-turbo')
generator = Generator(
"The microservice is very simple, it does not take anything as input and only outputs the word 'test'",
str(tmpdir) + 'microservice',
'gpt-3.5-turbo'
)
generator.generate()
def test_generation_level_1(tmpdir):
"""
Requirements:
pip packages:
coding:
pip packages: ✅ (pdf parser)
environment: ❌
GPT-3.5-turbo: ❌
APIs: ❌
Databases: ❌
"""
os.environ['VERBOSE'] = 'true'
generator = Generator("The input is a PDF like https://www.africau.edu/images/default/sample.pdf and the output the parsed text", str(tmpdir) + 'microservice', 'gpt-3.5-turbo')
generator.generate()
generator = Generator(
"The input is a PDF like https://www.africau.edu/images/default/sample.pdf and the output the parsed text",
str(tmpdir) + 'microservice',
'gpt-3.5-turbo'
)
generator.generate()
def test_generation_level_4(tmpdir):
"""
Requirements:
coding: ✅ (putting text on the image)
pip packages: ✅ (Pillow for image processing)
environment: ❌
GPT-3.5-turbo: ✅ (for writing the joke)
APIs: ✅ (scenex for image description)
Databases: ❌
"""
os.environ['VERBOSE'] = 'true'
generator = Generator(f'''
The input is an image like this: https://upload.wikimedia.org/wikipedia/commons/thumb/4/47/PNG_transparency_demonstration_1.png/560px-PNG_transparency_demonstration_1.png.
Use the following api to get the description of the image:
Request:
curl "https://us-central1-causal-diffusion.cloudfunctions.net/describe" \
-H "x-api-key: token {os.environ['SCENEX_API_KEY']}" \
-H "content-type: application/json" \
--data '{{"data":[
{{"image": "<image url here>", "features": []}}
]}}'
Result format:
{
"result": [
{
"text": "<image description>"
}
]
}
The description is then used to generate a joke.
The joke is the put on the image.
The output is the image with the joke on it.''',
str(tmpdir) + 'microservice',
'gpt-3.5-turbo'
)
generator.generate()