mirror of
https://github.com/aljazceru/dev-gpt.git
synced 2025-12-19 06:34:21 +01:00
🪓 feat: sub task refinement
This commit is contained in:
@@ -1,3 +1,4 @@
|
|||||||
|
import json
|
||||||
import os
|
import os
|
||||||
from copy import deepcopy
|
from copy import deepcopy
|
||||||
from time import sleep
|
from time import sleep
|
||||||
@@ -17,7 +18,7 @@ from urllib3.exceptions import InvalidChunkLength
|
|||||||
from dev_gpt.constants import PRICING_GPT4_PROMPT, PRICING_GPT4_GENERATION, PRICING_GPT3_5_TURBO_PROMPT, \
|
from dev_gpt.constants import PRICING_GPT4_PROMPT, PRICING_GPT4_GENERATION, PRICING_GPT3_5_TURBO_PROMPT, \
|
||||||
PRICING_GPT3_5_TURBO_GENERATION, CHARS_PER_TOKEN
|
PRICING_GPT3_5_TURBO_GENERATION, CHARS_PER_TOKEN
|
||||||
from dev_gpt.options.generate.templates_system import template_system_message_base
|
from dev_gpt.options.generate.templates_system import template_system_message_base
|
||||||
from dev_gpt.utils.string_tools import print_colored
|
from dev_gpt.utils.string_tools import print_colored, get_template_parameters
|
||||||
|
|
||||||
|
|
||||||
def configure_openai_api_key():
|
def configure_openai_api_key():
|
||||||
@@ -32,8 +33,17 @@ If you have updated it already, please restart your terminal.
|
|||||||
openai.api_key = os.environ['OPENAI_API_KEY']
|
openai.api_key = os.environ['OPENAI_API_KEY']
|
||||||
|
|
||||||
class GPTSession:
|
class GPTSession:
|
||||||
def __init__(self, task_description, model: str = 'gpt-4', ):
|
_instance = None
|
||||||
self.task_description = task_description
|
_initialized = False
|
||||||
|
|
||||||
|
def __new__(cls, *args, **kwargs):
|
||||||
|
if cls._instance is None:
|
||||||
|
cls._instance = super(GPTSession, cls).__new__(cls)
|
||||||
|
return cls._instance
|
||||||
|
|
||||||
|
def __init__(self, model: str = 'gpt-4', ):
|
||||||
|
if GPTSession._initialized:
|
||||||
|
return
|
||||||
if model == 'gpt-4' and self.is_gpt4_available():
|
if model == 'gpt-4' and self.is_gpt4_available():
|
||||||
self.pricing_prompt = PRICING_GPT4_PROMPT
|
self.pricing_prompt = PRICING_GPT4_PROMPT
|
||||||
self.pricing_generation = PRICING_GPT4_GENERATION
|
self.pricing_generation = PRICING_GPT4_GENERATION
|
||||||
@@ -46,6 +56,7 @@ class GPTSession:
|
|||||||
self.model_name = model
|
self.model_name = model
|
||||||
self.chars_prompt_so_far = 0
|
self.chars_prompt_so_far = 0
|
||||||
self.chars_generation_so_far = 0
|
self.chars_generation_so_far = 0
|
||||||
|
GPTSession._initialized = True
|
||||||
|
|
||||||
def get_conversation(self, messages: List[BaseMessage] = [], print_stream: bool = True, print_costs: bool = True):
|
def get_conversation(self, messages: List[BaseMessage] = [], print_stream: bool = True, print_costs: bool = True):
|
||||||
messages = deepcopy(messages)
|
messages = deepcopy(messages)
|
||||||
@@ -151,3 +162,22 @@ class _GPTConversation:
|
|||||||
test_description=test_description,
|
test_description=test_description,
|
||||||
)
|
)
|
||||||
return SystemMessage(content=system_message)
|
return SystemMessage(content=system_message)
|
||||||
|
|
||||||
|
|
||||||
|
def ask_gpt(prompt_template, parser, **kwargs):
|
||||||
|
template_parameters = get_template_parameters(prompt_template)
|
||||||
|
if set(template_parameters) != set(kwargs.keys()):
|
||||||
|
raise ValueError(f'Prompt template parameters {get_template_parameters(prompt_template)} do not match '
|
||||||
|
f'provided parameters {kwargs.keys()}')
|
||||||
|
for key, value in kwargs.items():
|
||||||
|
if isinstance(value, dict):
|
||||||
|
kwargs[key] = json.dumps(value, indent=4)
|
||||||
|
prompt = prompt_template.format(**kwargs)
|
||||||
|
conversation = GPTSession().get_conversation(
|
||||||
|
[],
|
||||||
|
print_stream=os.environ['VERBOSE'].lower() == 'true',
|
||||||
|
print_costs=False
|
||||||
|
)
|
||||||
|
agent_response_raw = conversation.chat(prompt, role='user')
|
||||||
|
agent_response = parser(agent_response_raw)
|
||||||
|
return agent_response
|
||||||
|
|||||||
@@ -35,6 +35,9 @@ def path_param(func):
|
|||||||
def wrapper(*args, **kwargs):
|
def wrapper(*args, **kwargs):
|
||||||
path = os.path.expanduser(kwargs['path'])
|
path = os.path.expanduser(kwargs['path'])
|
||||||
path = os.path.abspath(path)
|
path = os.path.abspath(path)
|
||||||
|
if os.path.exists(path) and os.listdir(path):
|
||||||
|
click.echo(f"Error: The path {path} you provided via --path is not empty. Please choose a directory that does not exist or is empty.")
|
||||||
|
exit(1)
|
||||||
kwargs['path'] = path
|
kwargs['path'] = path
|
||||||
return func(*args, **kwargs)
|
return func(*args, **kwargs)
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|||||||
22
dev_gpt/options/generate/chains/condition.py
Normal file
22
dev_gpt/options/generate/chains/condition.py
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
from dev_gpt.apis.gpt import ask_gpt
|
||||||
|
from dev_gpt.options.generate.chains.prompt_factory import context_to_string
|
||||||
|
from dev_gpt.options.generate.parser import boolean_parser
|
||||||
|
|
||||||
|
|
||||||
|
def is_true(question):
|
||||||
|
def fn(context):
|
||||||
|
prompt = question_prompt.format(
|
||||||
|
question=question,
|
||||||
|
context_string=context_to_string(context)
|
||||||
|
)
|
||||||
|
return ask_gpt(prompt, boolean_parser)
|
||||||
|
return fn
|
||||||
|
|
||||||
|
def is_false(question):
|
||||||
|
return lambda context: not is_true(question)(context)
|
||||||
|
|
||||||
|
question_prompt = '''\
|
||||||
|
{context_string}
|
||||||
|
{question}
|
||||||
|
Note: You must answer "yes" or "no".
|
||||||
|
'''
|
||||||
21
dev_gpt/options/generate/chains/get_user_input_if_neede.py
Normal file
21
dev_gpt/options/generate/chains/get_user_input_if_neede.py
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
from dev_gpt.apis.gpt import ask_gpt
|
||||||
|
from dev_gpt.options.generate.chains.prompt_factory import context_to_string
|
||||||
|
from dev_gpt.options.generate.parser import identity_parser
|
||||||
|
|
||||||
|
|
||||||
|
def get_user_input_if_needed(context, conditions, question_gen_prompt_part):
|
||||||
|
if all([c(context) for c in conditions]):
|
||||||
|
return ask_gpt(
|
||||||
|
generate_question_for_file_input_prompt,
|
||||||
|
identity_parser,
|
||||||
|
context_string=context_to_string(context),
|
||||||
|
question_gen_prompt_part=question_gen_prompt_part
|
||||||
|
)
|
||||||
|
return None
|
||||||
|
|
||||||
|
generate_question_for_file_input_prompt = '''\
|
||||||
|
{context_string}
|
||||||
|
|
||||||
|
{question_gen_prompt_part}
|
||||||
|
Note: you must only output the question.
|
||||||
|
'''
|
||||||
17
dev_gpt/options/generate/chains/prompt_factory.py
Normal file
17
dev_gpt/options/generate/chains/prompt_factory.py
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
import json
|
||||||
|
|
||||||
|
|
||||||
|
def context_to_string(context):
|
||||||
|
context_strings = []
|
||||||
|
for k, v in context.items():
|
||||||
|
if isinstance(v, dict):
|
||||||
|
v = json.dumps(v, indent=4)
|
||||||
|
v = v.replace('{', '{{').replace('}', '}}')
|
||||||
|
context_strings.append(f'''\
|
||||||
|
{k}:
|
||||||
|
```
|
||||||
|
{v}
|
||||||
|
```
|
||||||
|
'''
|
||||||
|
)
|
||||||
|
return '\n'.join(context_strings)
|
||||||
@@ -7,28 +7,24 @@ from typing import Callable
|
|||||||
from typing import List, Text, Optional
|
from typing import List, Text, Optional
|
||||||
|
|
||||||
from langchain import PromptTemplate
|
from langchain import PromptTemplate
|
||||||
from langchain.schema import SystemMessage, HumanMessage, AIMessage
|
|
||||||
from pydantic.dataclasses import dataclass
|
from pydantic.dataclasses import dataclass
|
||||||
|
|
||||||
from dev_gpt.apis import gpt
|
from dev_gpt.apis import gpt
|
||||||
from dev_gpt.apis.gpt import _GPTConversation
|
from dev_gpt.apis.gpt import _GPTConversation
|
||||||
from dev_gpt.apis.jina_cloud import process_error_message, push_executor, is_executor_in_hub
|
from dev_gpt.apis.jina_cloud import process_error_message, push_executor, is_executor_in_hub
|
||||||
from dev_gpt.apis.pypi import is_package_on_pypi, get_latest_package_version, clean_requirements_txt
|
from dev_gpt.apis.pypi import is_package_on_pypi, clean_requirements_txt
|
||||||
from dev_gpt.constants import FILE_AND_TAG_PAIRS, NUM_IMPLEMENTATION_STRATEGIES, MAX_DEBUGGING_ITERATIONS, \
|
from dev_gpt.constants import FILE_AND_TAG_PAIRS, NUM_IMPLEMENTATION_STRATEGIES, MAX_DEBUGGING_ITERATIONS, \
|
||||||
BLACKLISTED_PACKAGES, EXECUTOR_FILE_NAME, TEST_EXECUTOR_FILE_NAME, TEST_EXECUTOR_FILE_TAG, \
|
BLACKLISTED_PACKAGES, EXECUTOR_FILE_NAME, TEST_EXECUTOR_FILE_NAME, TEST_EXECUTOR_FILE_TAG, \
|
||||||
REQUIREMENTS_FILE_NAME, REQUIREMENTS_FILE_TAG, DOCKER_FILE_NAME, IMPLEMENTATION_FILE_NAME, \
|
REQUIREMENTS_FILE_NAME, REQUIREMENTS_FILE_TAG, DOCKER_FILE_NAME, IMPLEMENTATION_FILE_NAME, \
|
||||||
IMPLEMENTATION_FILE_TAG, LANGUAGE_PACKAGES, UNNECESSARY_PACKAGES, DOCKER_BASE_IMAGE_VERSION
|
IMPLEMENTATION_FILE_TAG, LANGUAGE_PACKAGES, UNNECESSARY_PACKAGES, DOCKER_BASE_IMAGE_VERSION
|
||||||
from dev_gpt.options.generate.pm import PM
|
from dev_gpt.options.generate.pm.pm import PM
|
||||||
from dev_gpt.options.generate.templates_system import system_task_iteration, system_task_introduction, system_test_iteration
|
|
||||||
from dev_gpt.options.generate.templates_user import template_generate_microservice_name, \
|
from dev_gpt.options.generate.templates_user import template_generate_microservice_name, \
|
||||||
template_generate_possible_packages, \
|
template_generate_possible_packages, \
|
||||||
template_solve_code_issue, \
|
template_solve_code_issue, \
|
||||||
template_solve_pip_dependency_issue, template_is_dependency_issue, template_generate_playground, \
|
template_solve_pip_dependency_issue, template_is_dependency_issue, template_generate_playground, \
|
||||||
template_generate_function, template_generate_test, template_generate_requirements, \
|
template_generate_function, template_generate_test, template_generate_requirements, \
|
||||||
template_chain_of_thought, template_summarize_error, \
|
template_chain_of_thought, template_summarize_error, \
|
||||||
template_solve_apt_get_dependency_issue, template_pm_task_iteration, \
|
template_solve_apt_get_dependency_issue
|
||||||
template_pm_test_iteration
|
|
||||||
from dev_gpt.options.generate.ui import get_random_employee
|
|
||||||
from dev_gpt.utils.io import persist_file, get_all_microservice_files_with_content, get_microservice_path
|
from dev_gpt.utils.io import persist_file, get_all_microservice_files_with_content, get_microservice_path
|
||||||
from dev_gpt.utils.string_tools import print_colored
|
from dev_gpt.utils.string_tools import print_colored
|
||||||
|
|
||||||
@@ -41,7 +37,7 @@ class TaskSpecification:
|
|||||||
|
|
||||||
class Generator:
|
class Generator:
|
||||||
def __init__(self, task_description, path, model='gpt-4'):
|
def __init__(self, task_description, path, model='gpt-4'):
|
||||||
self.gpt_session = gpt.GPTSession(task_description, model=model)
|
self.gpt_session = gpt.GPTSession(model=model)
|
||||||
self.microservice_specification = TaskSpecification(task=task_description, test=None)
|
self.microservice_specification = TaskSpecification(task=task_description, test=None)
|
||||||
self.microservice_root_path = path
|
self.microservice_root_path = path
|
||||||
|
|
||||||
@@ -376,9 +372,6 @@ pytest
|
|||||||
class MaxDebugTimeReachedException(BaseException):
|
class MaxDebugTimeReachedException(BaseException):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
class TaskRefinementException(BaseException):
|
|
||||||
pass
|
|
||||||
|
|
||||||
def is_dependency_issue(self, summarized_error, dock_req_string: str, package_manager: str):
|
def is_dependency_issue(self, summarized_error, dock_req_string: str, package_manager: str):
|
||||||
# a few heuristics to quickly jump ahead
|
# a few heuristics to quickly jump ahead
|
||||||
if any([error_message in summarized_error for error_message in
|
if any([error_message in summarized_error for error_message in
|
||||||
@@ -425,9 +418,10 @@ pytest
|
|||||||
packages_list = self.filter_packages_list(packages_list)
|
packages_list = self.filter_packages_list(packages_list)
|
||||||
packages_list = packages_list[:NUM_IMPLEMENTATION_STRATEGIES]
|
packages_list = packages_list[:NUM_IMPLEMENTATION_STRATEGIES]
|
||||||
return packages_list
|
return packages_list
|
||||||
|
# '/private/var/folders/f5/whmffl4d7q79s29jpyb6719m0000gn/T/pytest-of-florianhonicke/pytest-128/test_generation_level_0_mock_i0'
|
||||||
|
# '/private/var/folders/f5/whmffl4d7q79s29jpyb6719m0000gn/T/pytest-of-florianhonicke/pytest-129/test_generation_level_0_mock_i0'
|
||||||
def generate(self):
|
def generate(self):
|
||||||
self.refine_specification()
|
self.microservice_specification.task, self.microservice_specification.test = PM().refine_specification(self.microservice_specification.task)
|
||||||
os.makedirs(self.microservice_root_path)
|
os.makedirs(self.microservice_root_path)
|
||||||
generated_name = self.generate_microservice_name(self.microservice_specification.task)
|
generated_name = self.generate_microservice_name(self.microservice_specification.task)
|
||||||
microservice_name = f'{generated_name}{random.randint(0, 10_000_000)}'
|
microservice_name = f'{generated_name}{random.randint(0, 10_000_000)}'
|
||||||
@@ -458,92 +452,6 @@ dev-gpt deploy --path {self.microservice_root_path}
|
|||||||
error_summary = conversation.chat(template_summarize_error.format(error=error))
|
error_summary = conversation.chat(template_summarize_error.format(error=error))
|
||||||
return error_summary
|
return error_summary
|
||||||
|
|
||||||
def refine_specification(self):
|
|
||||||
pm = get_random_employee('pm')
|
|
||||||
print(f'{pm.emoji}👋 Hi, I\'m {pm.name}, a PM at Jina AI. Gathering the requirements for our engineers.')
|
|
||||||
original_task = self.microservice_specification.task
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
self.microservice_specification.test = None
|
|
||||||
if not original_task:
|
|
||||||
self.microservice_specification.task = self.get_user_input(pm, 'What should your microservice do?')
|
|
||||||
|
|
||||||
self.microservice_specification.task = PM(self.gpt_session).refine(self.microservice_specification.task)
|
|
||||||
|
|
||||||
self.refine_requirements(
|
|
||||||
pm,
|
|
||||||
[
|
|
||||||
SystemMessage(content=system_task_introduction + system_test_iteration),
|
|
||||||
],
|
|
||||||
'test',
|
|
||||||
'''Note that the test scenario must not contain information that was already mentioned in the microservice description.
|
|
||||||
Note that you must not ask for information that were already mentioned before.''',
|
|
||||||
template_pm_test_iteration,
|
|
||||||
micro_service_initial_description=f'''Microservice original description:
|
|
||||||
```
|
|
||||||
{original_task}
|
|
||||||
```
|
|
||||||
Microservice refined description:
|
|
||||||
```
|
|
||||||
{self.microservice_specification.task}
|
|
||||||
```
|
|
||||||
''',
|
|
||||||
)
|
|
||||||
break
|
|
||||||
except self.TaskRefinementException as e:
|
|
||||||
|
|
||||||
print_colored('', f'{pm.emoji} Could not refine your requirements. Please try again...', 'red')
|
|
||||||
|
|
||||||
print(f'''
|
|
||||||
{pm.emoji} 👍 Great, I will handover the following requirements to our engineers:
|
|
||||||
Description of the microservice:
|
|
||||||
{self.microservice_specification.task}
|
|
||||||
Test scenario:
|
|
||||||
{self.microservice_specification.test}
|
|
||||||
''')
|
|
||||||
|
|
||||||
def refine_requirements(self, pm, messages, refinement_type, custom_suffix, template_pm_iteration,
|
|
||||||
micro_service_initial_description=None):
|
|
||||||
user_input = self.microservice_specification.task
|
|
||||||
num_parsing_tries = 0
|
|
||||||
while True:
|
|
||||||
conversation = self.gpt_session.get_conversation(messages,
|
|
||||||
print_stream=os.environ['VERBOSE'].lower() == 'true',
|
|
||||||
print_costs=False)
|
|
||||||
agent_response_raw = conversation.chat(
|
|
||||||
template_pm_iteration.format(
|
|
||||||
custom_suffix=custom_suffix,
|
|
||||||
micro_service_initial_description=micro_service_initial_description if len(messages) == 1 else '',
|
|
||||||
),
|
|
||||||
role='user'
|
|
||||||
)
|
|
||||||
messages.append(HumanMessage(content=user_input))
|
|
||||||
agent_question = self.extract_content_from_result(agent_response_raw, 'prompt.json',
|
|
||||||
can_contain_code_block=False)
|
|
||||||
final = self.extract_content_from_result(agent_response_raw, 'final.json', can_contain_code_block=False)
|
|
||||||
if final:
|
|
||||||
messages.append(AIMessage(content=final))
|
|
||||||
setattr(self.microservice_specification, refinement_type, final)
|
|
||||||
break
|
|
||||||
elif agent_question:
|
|
||||||
question_parsed = json.loads(agent_question)['question']
|
|
||||||
messages.append(AIMessage(content=question_parsed))
|
|
||||||
user_input = self.get_user_input(pm, question_parsed)
|
|
||||||
else:
|
|
||||||
if num_parsing_tries > 2:
|
|
||||||
raise self.TaskRefinementException()
|
|
||||||
num_parsing_tries += 1
|
|
||||||
messages.append(AIMessage(content=agent_response_raw))
|
|
||||||
messages.append(
|
|
||||||
SystemMessage(content='You did not put your answer into the right format using *** and ```.'))
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def get_user_input(employee, prompt_to_user):
|
|
||||||
val = input(f'{employee.emoji}❓ {prompt_to_user}\nyou: ')
|
|
||||||
print()
|
|
||||||
while not val:
|
|
||||||
val = input('you: ')
|
|
||||||
return val
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def replace_with_gpt_3_5_turbo_if_possible(pkg):
|
def replace_with_gpt_3_5_turbo_if_possible(pkg):
|
||||||
@@ -572,3 +480,16 @@ Test scenario:
|
|||||||
] for packages in packages_list
|
] for packages in packages_list
|
||||||
]
|
]
|
||||||
return packages_list
|
return packages_list
|
||||||
|
|
||||||
|
def create_prototype_implementation(self):
|
||||||
|
microservice_py_lines = ['''\
|
||||||
|
Class {microservice_name}:''']
|
||||||
|
for sub_task in self.pm.iterate_over_sub_tasks_pydantic(self.sub_task_tree):
|
||||||
|
microservice_py_lines.append(f' {sub_task.python_fn_signature}')
|
||||||
|
microservice_py_lines.append(f' """')
|
||||||
|
microservice_py_lines.append(f' {sub_task.python_fn_docstring}')
|
||||||
|
microservice_py_lines.append(f' """')
|
||||||
|
microservice_py_lines.append(f' raise NotImplementedError')
|
||||||
|
microservice_py_str = '\n'.join(microservice_py_lines)
|
||||||
|
persist_file(os.path.join(self.microservice_root_path, 'microservice.py'), microservice_py_str)
|
||||||
|
|
||||||
|
|||||||
15
dev_gpt/options/generate/parser.py
Normal file
15
dev_gpt/options/generate/parser.py
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
import json
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
def identity_parser(x):
|
||||||
|
return x
|
||||||
|
|
||||||
|
def boolean_parser(x):
|
||||||
|
return 'yes' in x.lower()
|
||||||
|
|
||||||
|
def json_parser(x):
|
||||||
|
if '```' in x:
|
||||||
|
pattern = r'```(.+)```'
|
||||||
|
x = re.findall(pattern, x, re.DOTALL)[-1]
|
||||||
|
return json.loads(x)
|
||||||
@@ -1,346 +0,0 @@
|
|||||||
import json
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
|
|
||||||
from dev_gpt.apis import gpt
|
|
||||||
|
|
||||||
|
|
||||||
class PM:
|
|
||||||
def __init__(self, gpt_session):
|
|
||||||
self.gpt_session = gpt_session
|
|
||||||
|
|
||||||
def refine(self, microservice_description):
|
|
||||||
# microservice_description = self.refine_description(microservice_description)
|
|
||||||
sub_task_tree = self.construct_sub_task_tree(microservice_description)
|
|
||||||
|
|
||||||
def construct_sub_task_tree(self, microservice_description):
|
|
||||||
"""
|
|
||||||
takes a microservice description an recursively constructs a tree of sub-tasks that need to be done to implement the microservice
|
|
||||||
Example1:
|
|
||||||
Input: "I want to implement a microservice that takes a list of numbers and returns the sum of the numbers"
|
|
||||||
Output:
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"task": "I want to implement a microservice that takes a list of numbers and returns the sum of the numbers",
|
|
||||||
"request_json_schema": {
|
|
||||||
"type": "array",
|
|
||||||
"items": {
|
|
||||||
"type": "number"
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"response_json_schema": {
|
|
||||||
"type": "number"
|
|
||||||
},
|
|
||||||
"sub_tasks": [
|
|
||||||
{
|
|
||||||
"task": "Calculate the sum of the numbers",
|
|
||||||
"python_fn_signature": "def calculate_sum(numbers: List[float]) -> float:",
|
|
||||||
"python_fn_docstring": "Calculates the sum of the numbers\n\nArgs:\n numbers: a list of numbers\n\nReturns:\n the sum of the numbers",",
|
|
||||||
"sub_tasks": []
|
|
||||||
},
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
Example2: "Input is a list of emails. For all the companies from the emails belonging to, it gets the company's logo. All logos are arranged in a collage and returned."
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"task": "Extract company domains from the list of emails",
|
|
||||||
"sub_tasks": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"task": "Retrieve company logos for the extracted domains",
|
|
||||||
"sub_tasks": [
|
|
||||||
{
|
|
||||||
"task": "Find logo URL for each company domain",
|
|
||||||
"sub_tasks": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"task": "Download company logos from the URLs",
|
|
||||||
"sub_tasks": []
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"task": "Create a collage of company logos",
|
|
||||||
"sub_tasks": [
|
|
||||||
{
|
|
||||||
"task": "Determine collage layout and dimensions",
|
|
||||||
"sub_tasks": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"task": "Position and resize logos in the collage",
|
|
||||||
"sub_tasks": []
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"task": "Combine logos into a single image",
|
|
||||||
"sub_tasks": []
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"task": "Return the collage of company logos",
|
|
||||||
"sub_tasks": []
|
|
||||||
}
|
|
||||||
]
|
|
||||||
"""
|
|
||||||
microservice_description = self.refine_description(microservice_description)
|
|
||||||
sub_task_tree = self.ask_gpt(construct_sub_task_tree_prompt, json_parser,
|
|
||||||
microservice_description=microservice_description)
|
|
||||||
# reflections = self.ask_gpt(sub_task_tree_reflections_prompt, identity_parser, microservice_description=microservice_description, sub_task_tree=sub_task_tree)
|
|
||||||
# solutions = self.ask_gpt(sub_task_tree_solutions_prompt, identity_parser, microservice_description=microservice_description, sub_task_tree=sub_task_tree, reflections=reflections)
|
|
||||||
# sub_task_tree_updated = self.ask_gpt(sub_task_tree_update_prompt, json_parser, microservice_description=microservice_description, sub_task_tree=sub_task_tree, solutions=solutions)
|
|
||||||
# return sub_task_tree_updated
|
|
||||||
return sub_task_tree
|
|
||||||
|
|
||||||
|
|
||||||
def refine_description(self, microservice_description):
|
|
||||||
microservice_description = self.ask_gpt(better_description_prompt, identity_parser, microservice_description=microservice_description)
|
|
||||||
request_schema = self.ask_gpt(generate_request_schema_prompt, identity_parser,
|
|
||||||
microservice_description=microservice_description)
|
|
||||||
response_schema = self.ask_gpt(generate_output_schema_prompt, identity_parser,
|
|
||||||
microservice_description=microservice_description, request_schema=request_schema)
|
|
||||||
# additional_specifications = self.add_additional_specifications(microservice_description, request_schema,
|
|
||||||
# response_schema)
|
|
||||||
microservice_description = self.ask_gpt(summarize_description_and_schemas_prompt, identity_parser,
|
|
||||||
microservice_description=microservice_description,
|
|
||||||
request_schema=request_schema,
|
|
||||||
response_schema=response_schema,
|
|
||||||
# additional_specifications=additional_specifications
|
|
||||||
)
|
|
||||||
|
|
||||||
while (user_feedback := self.get_user_feedback(microservice_description)):
|
|
||||||
microservice_description = self.ask_gpt(add_feedback_prompt, identity_parser,
|
|
||||||
microservice_description=microservice_description,
|
|
||||||
user_feedback=user_feedback)
|
|
||||||
return microservice_description
|
|
||||||
|
|
||||||
def add_additional_specifications(self, microservice_description, request_schema, response_schema):
|
|
||||||
questions = self.ask_gpt(
|
|
||||||
ask_questions_prompt, identity_parser,
|
|
||||||
microservice_description=microservice_description,
|
|
||||||
request_schema=request_schema, response_schema=response_schema)
|
|
||||||
additional_specifications = self.ask_gpt(
|
|
||||||
answer_questions_prompt,
|
|
||||||
identity_parser,
|
|
||||||
microservice_description=microservice_description,
|
|
||||||
request_schema=request_schema,
|
|
||||||
response_schema=response_schema,
|
|
||||||
questions=questions
|
|
||||||
)
|
|
||||||
return additional_specifications
|
|
||||||
|
|
||||||
def get_user_feedback(self, microservice_description):
|
|
||||||
while True:
|
|
||||||
user_feedback = input(
|
|
||||||
f'I suggest that we implement the following microservice:\n{microservice_description}\nDo you agree? [y/n]')
|
|
||||||
if user_feedback.lower() in ['y', 'yes', 'yeah', 'yep', 'yup', 'sure', 'ok', 'okay']:
|
|
||||||
print('Great! I will hand this over to the developers!')
|
|
||||||
return None
|
|
||||||
elif user_feedback.lower() in ['n', 'no', 'nope', 'nah', 'nay', 'not']:
|
|
||||||
return input('What do you want to change?')
|
|
||||||
# return self.refine_user_feedback(microservice_description)
|
|
||||||
|
|
||||||
# Prompting
|
|
||||||
def ask_gpt(self, prompt_template, parser, **kwargs):
|
|
||||||
prompt = prompt_template.format(**kwargs)
|
|
||||||
conversation = self.gpt_session.get_conversation(
|
|
||||||
[],
|
|
||||||
print_stream=os.environ['VERBOSE'].lower() == 'true',
|
|
||||||
print_costs=False
|
|
||||||
)
|
|
||||||
agent_response_raw = conversation.chat(prompt, role='user')
|
|
||||||
agent_response = parser(agent_response_raw)
|
|
||||||
return agent_response
|
|
||||||
|
|
||||||
# def refine_user_feedback(self, microservice_description):
|
|
||||||
# while True:
|
|
||||||
# user_feedback = input('What do you want to change?')
|
|
||||||
# if self.ask_gpt(is_feedback_valuable_prompt, boolean_parser, user_feedback=user_feedback,
|
|
||||||
# microservice_description=microservice_description):
|
|
||||||
# return user_feedback
|
|
||||||
# else:
|
|
||||||
# print('Sorry, I can not handle this feedback. Please formulate it more precisely.')
|
|
||||||
|
|
||||||
|
|
||||||
def identity_parser(x):
|
|
||||||
return x
|
|
||||||
|
|
||||||
|
|
||||||
def boolean_parser(x):
|
|
||||||
return 'yes' in x.lower()
|
|
||||||
|
|
||||||
|
|
||||||
def json_parser(x):
|
|
||||||
if '```' in x:
|
|
||||||
pattern = r'```(.+)```'
|
|
||||||
x = re.findall(pattern, x, re.DOTALL)[-1]
|
|
||||||
return json.loads(x)
|
|
||||||
|
|
||||||
|
|
||||||
client_description = '''\
|
|
||||||
Microservice description:
|
|
||||||
```
|
|
||||||
{microservice_description}
|
|
||||||
```'''
|
|
||||||
|
|
||||||
better_description_prompt = client_description + '''
|
|
||||||
Update the description of the Microservice to make it more precise without adding or removing information.
|
|
||||||
Note: the output must be a list of tasks the Microservice has to perform.
|
|
||||||
Example for the description: "return the average temperature of the 5 days weather forecast for a given location."
|
|
||||||
1. get the 5 days weather forcast from the https://openweathermap.org/ API
|
|
||||||
2. extract the temperature from the response
|
|
||||||
3. calculate the average temperature'''
|
|
||||||
|
|
||||||
# better_description_prompt = client_description + '''
|
|
||||||
# Update the description of the Microservice to make it more precise without adding or removing information.'''
|
|
||||||
|
|
||||||
generate_request_schema_prompt = client_description + '''
|
|
||||||
Generate the lean request json schema of the Microservice.
|
|
||||||
Note: If you are not sure about the details, the come up with the minimal number of parameters possible.'''
|
|
||||||
|
|
||||||
generate_output_schema_prompt = client_description + '''
|
|
||||||
request json schema:
|
|
||||||
```
|
|
||||||
{request_schema}
|
|
||||||
```
|
|
||||||
Generate the lean response json schema for the Microservice.
|
|
||||||
Note: If you are not sure about the details, the come up with the minimal number of parameters possible.'''
|
|
||||||
|
|
||||||
# If we want to activate this back, then it first needs to work. Currently, it outputs "no" for too many cases.
|
|
||||||
# is_feedback_valuable_prompt = client_description + '''
|
|
||||||
# User feedback:
|
|
||||||
# ```
|
|
||||||
# {user_feedback}
|
|
||||||
# ```
|
|
||||||
# Can this feedback be used to update the microservice description?
|
|
||||||
# Note: You must either answer "yes" or "no".
|
|
||||||
# Note: If the user does not want to provide feedback, then you must answer "no".'''
|
|
||||||
|
|
||||||
|
|
||||||
summarize_description_and_schemas_prompt = client_description + '''
|
|
||||||
Request json schema:
|
|
||||||
```
|
|
||||||
{request_schema}
|
|
||||||
```
|
|
||||||
Response json schema:
|
|
||||||
```
|
|
||||||
{response_schema}
|
|
||||||
```
|
|
||||||
Write an updated microservice description by incorporating information about the request and response parameters in a concise way without losing any information.
|
|
||||||
Note: You must not mention any details about algorithms or the technical implementation.
|
|
||||||
Note: You must not mention that there is a request and response JSON schema
|
|
||||||
Note: You must not use any formatting like triple backticks.'''
|
|
||||||
|
|
||||||
add_feedback_prompt = client_description + '''
|
|
||||||
User feedback:
|
|
||||||
```
|
|
||||||
{user_feedback}
|
|
||||||
```
|
|
||||||
Update the microservice description by incorporating the user feedback in a concise way without losing any information.'''
|
|
||||||
|
|
||||||
summarize_description_prompt = client_description + '''
|
|
||||||
Make the description more concise without losing any information.
|
|
||||||
Note: You must not mention any details about algorithms or the technical implementation.
|
|
||||||
Note: You must ignore facts that are not specified.
|
|
||||||
Note: You must ignore facts that are not relevant.
|
|
||||||
Note: You must ignore facts that are unknown.
|
|
||||||
Note: You must ignore facts that are unclear.'''
|
|
||||||
|
|
||||||
construct_sub_task_tree_prompt = client_description + '''\
|
|
||||||
Recursively constructs a tree of sub-tasks that need to be done to implement the microservice
|
|
||||||
Example1:
|
|
||||||
Input: "I want to implement a microservice that takes a list of numbers and returns the sum of the numbers"
|
|
||||||
Output:
|
|
||||||
[
|
|
||||||
{{
|
|
||||||
"task": "I want to implement a microservice that takes a list of numbers and returns the sum of the numbers",
|
|
||||||
"request_json_schema": {{
|
|
||||||
"type": "array",
|
|
||||||
"items": {{
|
|
||||||
"type": "number"
|
|
||||||
}}
|
|
||||||
}},
|
|
||||||
"response_json_schema": {{
|
|
||||||
"type": "number"
|
|
||||||
}},
|
|
||||||
"sub_tasks": [
|
|
||||||
{{
|
|
||||||
"task": "Calculate the sum of the numbers",
|
|
||||||
"python_fn_signature": "def calculate_sum(numbers: List[float]) -> float:",
|
|
||||||
"python_fn_docstring": "Calculates the sum of the numbers\\n\\nArgs:\\n numbers: a list of numbers\\n\\nReturns:\\n the sum of the numbers",
|
|
||||||
"sub_tasks": []
|
|
||||||
}}
|
|
||||||
]
|
|
||||||
}}
|
|
||||||
]
|
|
||||||
Note: you must only output the json string - nothing else.
|
|
||||||
Note: you must pretty print the json string.'''
|
|
||||||
|
|
||||||
sub_task_tree_reflections_prompt = client_description + '''\
|
|
||||||
Sub task tree:
|
|
||||||
```
|
|
||||||
{sub_task_tree}
|
|
||||||
```
|
|
||||||
Reflect on the sub task tree and write up to 10 constructive criticisms (5 words) about it.'''
|
|
||||||
|
|
||||||
sub_task_tree_solutions_prompt = client_description + '''\
|
|
||||||
Sub task tree:
|
|
||||||
```
|
|
||||||
{sub_task_tree}
|
|
||||||
```
|
|
||||||
Reflections:
|
|
||||||
```
|
|
||||||
{reflections}
|
|
||||||
```
|
|
||||||
For each constructive criticism, write a solution (5 words) that address the criticism.'''
|
|
||||||
|
|
||||||
sub_task_tree_update_prompt = client_description + '''\
|
|
||||||
Sub task tree:
|
|
||||||
```
|
|
||||||
{sub_task_tree}
|
|
||||||
```
|
|
||||||
Solutions:
|
|
||||||
```
|
|
||||||
{solutions}
|
|
||||||
```
|
|
||||||
Update the sub task tree by applying the solutions. (pritty print the json string)'''
|
|
||||||
|
|
||||||
|
|
||||||
ask_questions_prompt = client_description + '''
|
|
||||||
Request json schema:
|
|
||||||
```
|
|
||||||
{request_schema}
|
|
||||||
```
|
|
||||||
Response json schema:
|
|
||||||
```
|
|
||||||
{response_schema}
|
|
||||||
```
|
|
||||||
Ask the user up to 5 unique detailed questions (5 words) about the microservice description that are not yet answered.
|
|
||||||
'''
|
|
||||||
|
|
||||||
answer_questions_prompt = client_description + '''
|
|
||||||
Request json schema:
|
|
||||||
```
|
|
||||||
{request_schema}
|
|
||||||
```
|
|
||||||
Response json schema:
|
|
||||||
```
|
|
||||||
{response_schema}
|
|
||||||
```
|
|
||||||
Questions:
|
|
||||||
```
|
|
||||||
{questions}
|
|
||||||
```
|
|
||||||
Answer all questions where you can think of a plausible answer.
|
|
||||||
Note: You must not answer questions with something like "...is not specified", "I don't know" or "Unknown".
|
|
||||||
'''
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
gpt_session = gpt.GPTSession(None, 'GPT-3.5-turbo')
|
|
||||||
first_question = 'Please specify your microservice.'
|
|
||||||
initial_description = 'mission generator'
|
|
||||||
# initial_description = 'convert png to svg'
|
|
||||||
initial_description = "Input is a list of emails. For all the companies from the emails belonging to, it gets the company's logo. All logos are arranged in a collage and returned."
|
|
||||||
initial_description = "Given an image, write a joke on it that is relevant to the image."
|
|
||||||
PM(gpt_session).refine(initial_description)
|
|
||||||
# PM(gpt_session).construct_sub_task_tree(initial_description)#.refine(initial_description)
|
|
||||||
0
dev_gpt/options/generate/pm/__init__.py
Normal file
0
dev_gpt/options/generate/pm/__init__.py
Normal file
437
dev_gpt/options/generate/pm/pm.py
Normal file
437
dev_gpt/options/generate/pm/pm.py
Normal file
@@ -0,0 +1,437 @@
|
|||||||
|
import json
|
||||||
|
import re
|
||||||
|
from typing import Generator
|
||||||
|
|
||||||
|
from dev_gpt.apis import gpt
|
||||||
|
from dev_gpt.apis.gpt import ask_gpt
|
||||||
|
from dev_gpt.options.generate.chains.condition import is_false, is_true
|
||||||
|
from dev_gpt.options.generate.chains.get_user_input_if_neede import get_user_input_if_needed
|
||||||
|
from dev_gpt.options.generate.parser import identity_parser, boolean_parser, json_parser
|
||||||
|
from dev_gpt.options.generate.pm.task_tree_schema import TaskTree
|
||||||
|
from dev_gpt.options.generate.ui import get_random_employee
|
||||||
|
|
||||||
|
|
||||||
|
class PM:
|
||||||
|
def refine_specification(self, microservice_description) -> TaskTree:
|
||||||
|
pm = get_random_employee('pm')
|
||||||
|
print(f'{pm.emoji}👋 Hi, I\'m {pm.name}, a PM at Jina AI. Gathering the requirements for our engineers.')
|
||||||
|
original_task = microservice_description
|
||||||
|
if not original_task:
|
||||||
|
microservice_description = self.get_user_input(pm, 'What should your microservice do?')
|
||||||
|
microservice_description, test_description = self.refine(microservice_description)
|
||||||
|
print(f'''
|
||||||
|
{pm.emoji} 👍 Great, I will handover the following requirements to our engineers:
|
||||||
|
Description of the microservice:
|
||||||
|
{microservice_description}
|
||||||
|
''')
|
||||||
|
return microservice_description, test_description
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_user_input(employee, prompt_to_user):
|
||||||
|
val = input(f'{employee.emoji}❓ {prompt_to_user}\nyou: ')
|
||||||
|
print()
|
||||||
|
while not val:
|
||||||
|
val = input('you: ')
|
||||||
|
return val
|
||||||
|
|
||||||
|
def refine(self, microservice_description) -> TaskTree:
|
||||||
|
microservice_description, test_description = self.refine_description(microservice_description)
|
||||||
|
return microservice_description, test_description
|
||||||
|
# sub_task_tree = self.construct_sub_task_tree(microservice_description)
|
||||||
|
# return sub_task_tree
|
||||||
|
|
||||||
|
def get_nlp_fns(self, microservice_description):
|
||||||
|
return ask_gpt(
|
||||||
|
get_nlp_fns_prompt,
|
||||||
|
json_parser,
|
||||||
|
microservice_description=microservice_description
|
||||||
|
)
|
||||||
|
|
||||||
|
def construct_sub_task_tree(self, microservice_description):
|
||||||
|
"""
|
||||||
|
takes a microservice description and recursively constructs a tree of sub-tasks that need to be done to implement the microservice
|
||||||
|
"""
|
||||||
|
#
|
||||||
|
# nlp_fns = self.get_nlp_fns(
|
||||||
|
# microservice_description
|
||||||
|
# )
|
||||||
|
|
||||||
|
sub_task_tree_dict = ask_gpt(
|
||||||
|
construct_sub_task_tree_prompt, json_parser,
|
||||||
|
microservice_description=microservice_description,
|
||||||
|
# nlp_fns=nlp_fns
|
||||||
|
)
|
||||||
|
reflections = ask_gpt(
|
||||||
|
sub_task_tree_reflections_prompt, identity_parser,
|
||||||
|
microservice_description=microservice_description,
|
||||||
|
# nlp_fns=nlp_fns,
|
||||||
|
sub_task_tree=sub_task_tree_dict,
|
||||||
|
)
|
||||||
|
solutions = ask_gpt(
|
||||||
|
sub_task_tree_solutions_prompt, identity_parser,
|
||||||
|
# nlp_fns=nlp_fns,
|
||||||
|
microservice_description=microservice_description, sub_task_tree=sub_task_tree_dict,
|
||||||
|
reflections=reflections,
|
||||||
|
)
|
||||||
|
sub_task_tree_updated = ask_gpt(
|
||||||
|
sub_task_tree_update_prompt,
|
||||||
|
json_parser,
|
||||||
|
microservice_description=microservice_description,
|
||||||
|
# nlp_fns=nlp_fns,
|
||||||
|
sub_task_tree=sub_task_tree_dict, solutions=solutions
|
||||||
|
)
|
||||||
|
# for task_dict in self.iterate_over_sub_tasks(sub_task_tree_updated):
|
||||||
|
# task_dict.update(self.get_additional_task_info(task_dict['task']))
|
||||||
|
|
||||||
|
sub_task_tree = TaskTree.parse_obj(sub_task_tree_updated)
|
||||||
|
return sub_task_tree
|
||||||
|
|
||||||
|
def get_additional_task_info(self, sub_task_description):
|
||||||
|
additional_info_dict = self.get_additional_infos(
|
||||||
|
description=sub_task_description,
|
||||||
|
parameter={
|
||||||
|
'display_name': 'Task description',
|
||||||
|
'text': sub_task_description,
|
||||||
|
},
|
||||||
|
potentially_required_information_list=[
|
||||||
|
{
|
||||||
|
'field_name': 'api_key',
|
||||||
|
'display_name': 'valid API key',
|
||||||
|
}, {
|
||||||
|
'field_name': 'database_access',
|
||||||
|
'display_name': 'database access',
|
||||||
|
}, {
|
||||||
|
'field_name': 'documentation',
|
||||||
|
'display_name': 'documentation',
|
||||||
|
}, {
|
||||||
|
'field_name': 'example_api_call',
|
||||||
|
'display_name': 'curl command or sample code for api call',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
|
||||||
|
)
|
||||||
|
return additional_info_dict
|
||||||
|
|
||||||
|
def get_additional_infos(self, description, parameter, potentially_required_information_list):
|
||||||
|
additional_info_dict = {}
|
||||||
|
for potentially_required_information in potentially_required_information_list:
|
||||||
|
is_task_requiring_information = ask_gpt(
|
||||||
|
is_task_requiring_information_template,
|
||||||
|
boolean_parser,
|
||||||
|
description=description,
|
||||||
|
description_title=parameter['display_name'],
|
||||||
|
description_text=parameter['text'],
|
||||||
|
potentially_required_information=potentially_required_information
|
||||||
|
)
|
||||||
|
if is_task_requiring_information:
|
||||||
|
generated_question = ask_gpt(
|
||||||
|
generate_question_for_required_information_template,
|
||||||
|
identity_parser,
|
||||||
|
description=description,
|
||||||
|
description_title=parameter['display_name'],
|
||||||
|
description_text=parameter['text'],
|
||||||
|
potentially_required_information=potentially_required_information
|
||||||
|
)
|
||||||
|
user_answer = input(generated_question)
|
||||||
|
additional_info_dict[potentially_required_information] = user_answer
|
||||||
|
return additional_info_dict
|
||||||
|
|
||||||
|
def iterate_over_sub_tasks(self, sub_task_tree_updated):
|
||||||
|
sub_tasks = sub_task_tree_updated['sub_tasks'] if 'sub_tasks' in sub_task_tree_updated else []
|
||||||
|
for sub_task in sub_tasks:
|
||||||
|
yield sub_task
|
||||||
|
yield from self.iterate_over_sub_tasks(sub_task)
|
||||||
|
|
||||||
|
def iterate_over_sub_tasks_pydantic(self, sub_task_tree: TaskTree) -> Generator[TaskTree, None, None]:
|
||||||
|
sub_tasks = sub_task_tree.sub_fns
|
||||||
|
for sub_task in sub_tasks:
|
||||||
|
yield sub_task
|
||||||
|
yield from self.iterate_over_sub_tasks_pydantic(sub_task)
|
||||||
|
|
||||||
|
def refine_description(self, microservice_description):
|
||||||
|
microservice_description = ask_gpt(better_description_prompt, identity_parser,
|
||||||
|
microservice_description=microservice_description)
|
||||||
|
request_schema = ask_gpt(generate_request_schema_prompt, identity_parser,
|
||||||
|
microservice_description=microservice_description)
|
||||||
|
response_schema = ask_gpt(generate_output_schema_prompt, identity_parser,
|
||||||
|
microservice_description=microservice_description, request_schema=request_schema)
|
||||||
|
# additional_specifications = self.add_additional_specifications(microservice_description, request_schema,
|
||||||
|
# response_schema)
|
||||||
|
microservice_description = ask_gpt(summarize_description_and_schemas_prompt, identity_parser,
|
||||||
|
microservice_description=microservice_description,
|
||||||
|
request_schema=request_schema,
|
||||||
|
response_schema=response_schema,
|
||||||
|
# additional_specifications=additional_specifications
|
||||||
|
)
|
||||||
|
|
||||||
|
while (user_feedback := self.get_user_feedback(microservice_description)):
|
||||||
|
microservice_description = ask_gpt(add_feedback_prompt, identity_parser,
|
||||||
|
microservice_description=microservice_description,
|
||||||
|
user_feedback=user_feedback)
|
||||||
|
test_description = ask_gpt(
|
||||||
|
generate_test_description_prompt,
|
||||||
|
identity_parser,
|
||||||
|
microservice_description=microservice_description,
|
||||||
|
request_schema=request_schema,
|
||||||
|
response_schema=response_schema
|
||||||
|
)
|
||||||
|
example_file_url = get_user_input_if_needed(
|
||||||
|
context={
|
||||||
|
'Microservice description': microservice_description,
|
||||||
|
'Request schema': request_schema,
|
||||||
|
'Response schema': response_schema,
|
||||||
|
},
|
||||||
|
conditions=[
|
||||||
|
is_true('Does request schema contain an example file url?'),
|
||||||
|
is_false('Is input url specified in the description?'),
|
||||||
|
],
|
||||||
|
question_gen_prompt_part="Generate a question that asks for an example file url.",
|
||||||
|
)
|
||||||
|
if example_file_url:
|
||||||
|
test_description += f'\nInput Example: {example_file_url}'
|
||||||
|
|
||||||
|
return microservice_description, test_description
|
||||||
|
|
||||||
|
def add_additional_specifications(self, microservice_description, request_schema, response_schema):
|
||||||
|
questions = ask_gpt(
|
||||||
|
ask_questions_prompt, identity_parser,
|
||||||
|
microservice_description=microservice_description,
|
||||||
|
request_schema=request_schema, response_schema=response_schema)
|
||||||
|
additional_specifications = ask_gpt(
|
||||||
|
answer_questions_prompt,
|
||||||
|
identity_parser,
|
||||||
|
microservice_description=microservice_description,
|
||||||
|
request_schema=request_schema,
|
||||||
|
response_schema=response_schema,
|
||||||
|
questions=questions
|
||||||
|
)
|
||||||
|
return additional_specifications
|
||||||
|
|
||||||
|
def get_user_feedback(self, microservice_description):
|
||||||
|
while True:
|
||||||
|
user_feedback = input(
|
||||||
|
f'I suggest that we implement the following microservice:\n{microservice_description}\nDo you agree? [y/n]')
|
||||||
|
if user_feedback.lower() in ['y', 'yes', 'yeah', 'yep', 'yup', 'sure', 'ok', 'okay']:
|
||||||
|
print('Great! I will hand this over to the developers!')
|
||||||
|
return None
|
||||||
|
elif user_feedback.lower() in ['n', 'no', 'nope', 'nah', 'nay', 'not']:
|
||||||
|
return input('What do you want to change?')
|
||||||
|
# return self.refine_user_feedback(microservice_description)
|
||||||
|
|
||||||
|
# def refine_user_feedback(self, microservice_description):
|
||||||
|
# while True:
|
||||||
|
# user_feedback = input('What do you want to change?')
|
||||||
|
# if ask_gpt(is_feedback_valuable_prompt, boolean_parser, user_feedback=user_feedback,
|
||||||
|
# microservice_description=microservice_description):
|
||||||
|
# return user_feedback
|
||||||
|
# else:
|
||||||
|
# print('Sorry, I can not handle this feedback. Please formulate it more precisely.')
|
||||||
|
|
||||||
|
|
||||||
|
client_description = '''\
|
||||||
|
Microservice description:
|
||||||
|
```
|
||||||
|
{microservice_description}
|
||||||
|
```'''
|
||||||
|
|
||||||
|
better_description_prompt = client_description + '''
|
||||||
|
Update the description of the Microservice to make it more precise without adding or removing information.
|
||||||
|
Note: the output must be a list of tasks the Microservice has to perform.
|
||||||
|
Example for the description: "return the average temperature of the 5 days weather forecast for a given location."
|
||||||
|
1. get the 5 days weather forcast from the https://openweathermap.org/ API
|
||||||
|
2. extract the temperature from the response
|
||||||
|
3. calculate the average temperature'''
|
||||||
|
|
||||||
|
# better_description_prompt = client_description + '''
|
||||||
|
# Update the description of the Microservice to make it more precise without adding or removing information.'''
|
||||||
|
|
||||||
|
generate_request_schema_prompt = client_description + '''
|
||||||
|
Generate the lean request json schema of the Microservice.
|
||||||
|
Note: If you are not sure about the details, the come up with the minimal number of parameters possible.'''
|
||||||
|
|
||||||
|
generate_output_schema_prompt = client_description + '''
|
||||||
|
request json schema:
|
||||||
|
```
|
||||||
|
{request_schema}
|
||||||
|
```
|
||||||
|
Generate the lean response json schema for the Microservice.
|
||||||
|
Note: If you are not sure about the details, the come up with the minimal number of parameters possible.'''
|
||||||
|
|
||||||
|
# If we want to activate this back, then it first needs to work. Currently, it outputs "no" for too many cases.
|
||||||
|
# is_feedback_valuable_prompt = client_description + '''
|
||||||
|
# User feedback:
|
||||||
|
# ```
|
||||||
|
# {user_feedback}
|
||||||
|
# ```
|
||||||
|
# Can this feedback be used to update the microservice description?
|
||||||
|
# Note: You must either answer "yes" or "no".
|
||||||
|
# Note: If the user does not want to provide feedback, then you must answer "no".'''
|
||||||
|
|
||||||
|
|
||||||
|
summarize_description_and_schemas_prompt = client_description + '''
|
||||||
|
Request json schema:
|
||||||
|
```
|
||||||
|
{request_schema}
|
||||||
|
```
|
||||||
|
Response json schema:
|
||||||
|
```
|
||||||
|
{response_schema}
|
||||||
|
```
|
||||||
|
Write an updated microservice description by incorporating information about the request and response parameters in a concise way without losing any information.
|
||||||
|
Note: You must not mention any details about algorithms or the technical implementation.
|
||||||
|
Note: You must not mention that there is a request and response JSON schema
|
||||||
|
Note: You must not use any formatting like triple backticks.'''
|
||||||
|
|
||||||
|
add_feedback_prompt = client_description + '''
|
||||||
|
User feedback:
|
||||||
|
```
|
||||||
|
{user_feedback}
|
||||||
|
```
|
||||||
|
Update the microservice description by incorporating the user feedback in a concise way without losing any information.'''
|
||||||
|
|
||||||
|
summarize_description_prompt = client_description + '''
|
||||||
|
Make the description more concise without losing any information.
|
||||||
|
Note: You must not mention any details about algorithms or the technical implementation.
|
||||||
|
Note: You must ignore facts that are not specified.
|
||||||
|
Note: You must ignore facts that are not relevant.
|
||||||
|
Note: You must ignore facts that are unknown.
|
||||||
|
Note: You must ignore facts that are unclear.'''
|
||||||
|
|
||||||
|
construct_sub_task_tree_prompt = client_description + '''
|
||||||
|
Recursively constructs a tree of functions that need to be implemented for the endpoint_function that retrieves a json string and returns a json string.
|
||||||
|
Example:
|
||||||
|
Input: "Input: list of integers, Output: Audio file of short story where each number is mentioned exactly once."
|
||||||
|
Output:
|
||||||
|
{{
|
||||||
|
"description": "Create an audio file containing a short story in which each integer from the provided list is seamlessly incorporated, ensuring that every integer is mentioned exactly once.",
|
||||||
|
"python_fn_signature": "def generate_integer_story_audio(numbers: List[int]) -> str:",
|
||||||
|
"sub_fns": [
|
||||||
|
{{
|
||||||
|
"description": "Generate sentence from integer.",
|
||||||
|
"python_fn_signature": "def generate_sentence_from_integer(number: int) -> int:",
|
||||||
|
"sub_fns": []
|
||||||
|
}},
|
||||||
|
{{
|
||||||
|
"description": "Convert the story into an audio file.",
|
||||||
|
"python_fn_signature": "def convert_story_to_audio(story: str) -> bytes:",
|
||||||
|
"sub_fns": []
|
||||||
|
}}
|
||||||
|
]
|
||||||
|
}}
|
||||||
|
|
||||||
|
Note: you must only output the json string - nothing else.
|
||||||
|
Note: you must pretty print the json string.'''
|
||||||
|
|
||||||
|
sub_task_tree_reflections_prompt = client_description + '''
|
||||||
|
Sub task tree:
|
||||||
|
```
|
||||||
|
{sub_task_tree}
|
||||||
|
```
|
||||||
|
Write down 3 arguments why the sub task tree might not perfectly represents the information mentioned in the microservice description. (5 words per argument)'''
|
||||||
|
|
||||||
|
sub_task_tree_solutions_prompt = client_description + '''
|
||||||
|
Sub task tree:
|
||||||
|
```
|
||||||
|
{sub_task_tree}
|
||||||
|
```
|
||||||
|
Reflections:
|
||||||
|
```
|
||||||
|
{reflections}
|
||||||
|
```
|
||||||
|
For each constructive criticism, write a solution (5 words) that address the criticism.'''
|
||||||
|
|
||||||
|
sub_task_tree_update_prompt = client_description + '''
|
||||||
|
Sub task tree:
|
||||||
|
```
|
||||||
|
{sub_task_tree}
|
||||||
|
```
|
||||||
|
Solutions:
|
||||||
|
```
|
||||||
|
{solutions}
|
||||||
|
```
|
||||||
|
Update the sub task tree by applying the solutions. (pretty print the json string)'''
|
||||||
|
|
||||||
|
ask_questions_prompt = client_description + '''
|
||||||
|
Request json schema:
|
||||||
|
```
|
||||||
|
{request_schema}
|
||||||
|
```
|
||||||
|
Response json schema:
|
||||||
|
```
|
||||||
|
{response_schema}
|
||||||
|
```
|
||||||
|
Ask the user up to 5 unique detailed questions (5 words) about the microservice description that are not yet answered.
|
||||||
|
'''
|
||||||
|
|
||||||
|
answer_questions_prompt = client_description + '''
|
||||||
|
Request json schema:
|
||||||
|
```
|
||||||
|
{request_schema}
|
||||||
|
```
|
||||||
|
Response json schema:
|
||||||
|
```
|
||||||
|
{response_schema}
|
||||||
|
```
|
||||||
|
Questions:
|
||||||
|
```
|
||||||
|
{questions}
|
||||||
|
```
|
||||||
|
Answer all questions where you can think of a plausible answer.
|
||||||
|
Note: You must not answer questions with something like "...is not specified", "I don't know" or "Unknown".
|
||||||
|
'''
|
||||||
|
|
||||||
|
is_task_requiring_information_template = '''\
|
||||||
|
{description_title}
|
||||||
|
```
|
||||||
|
{description_text}
|
||||||
|
```
|
||||||
|
Does the implementation of the {description_title} require information about "{potentially_required_information}"?
|
||||||
|
Note: You must either answer "yes" or "no".'''
|
||||||
|
|
||||||
|
generate_question_for_required_information_template = '''\
|
||||||
|
{description_title}
|
||||||
|
```
|
||||||
|
{description_text}
|
||||||
|
```
|
||||||
|
Generate a question that asks for the information "{potentially_required_information}" regarding "{description_title}".
|
||||||
|
Note: you must only output the question - nothing else.'''
|
||||||
|
|
||||||
|
get_nlp_fns_prompt = client_description + '''
|
||||||
|
Respond with all code parts that could be accomplished by GPT 3.
|
||||||
|
Example for "Take a video and/or a pdf as input, extract the subtitles from the video and the text from the pdf, \
|
||||||
|
summarize the extracted text and translate it to German":
|
||||||
|
```
|
||||||
|
[
|
||||||
|
"summarize the text",
|
||||||
|
"translate the text to German"
|
||||||
|
]
|
||||||
|
```
|
||||||
|
Note: only list code parts that could be expressed as a function that takes a string as input and returns a string as output.
|
||||||
|
Note: the output must be parsable by the python function json.loads.'''
|
||||||
|
|
||||||
|
generate_test_description_prompt = client_description + '''
|
||||||
|
Request json schema:
|
||||||
|
```
|
||||||
|
{request_schema}
|
||||||
|
```
|
||||||
|
Response json schema:
|
||||||
|
```
|
||||||
|
{response_schema}
|
||||||
|
```
|
||||||
|
Generate the description of the test scenario for the microservice.
|
||||||
|
Note: you must only output the test description - nothing else.
|
||||||
|
Note: you must not use any formatting like triple backticks.
|
||||||
|
Note: the test must insert data in defined in the request schema and validate that the type of the response is matching with the response schema.
|
||||||
|
'''
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
gpt_session = gpt.GPTSession('GPT-3.5-turbo')
|
||||||
|
first_question = 'Please specify your microservice.'
|
||||||
|
initial_description = 'mission generator'
|
||||||
|
# initial_description = 'convert png to svg'
|
||||||
|
# initial_description = "Input is a list of emails. For all the companies from the emails belonging to, it gets the company's logo. All logos are arranged in a collage and returned."
|
||||||
|
# initial_description = "Given an image, write a joke on it that is relevant to the image."
|
||||||
|
# initial_description = "This microservice receives an image as input and generates a joke based on its content and context. The input must be a binary string of the image. The output is an image with the generated joke overlaid on it."
|
||||||
|
initial_description = 'Build me a serch system for lottiefiles animations'
|
||||||
|
PM().refine(initial_description)
|
||||||
|
# PM(gpt_session).construct_sub_task_tree(initial_description)#.refine(initial_description)
|
||||||
22
dev_gpt/options/generate/pm/task_tree_schema.py
Normal file
22
dev_gpt/options/generate/pm/task_tree_schema.py
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
from typing import Dict, List, Union, Optional
|
||||||
|
from pydantic import BaseModel, Field
|
||||||
|
|
||||||
|
class JSONSchema(BaseModel):
|
||||||
|
type: str
|
||||||
|
format: Union[str, None] = None
|
||||||
|
items: Union['JSONSchema', None] = None
|
||||||
|
properties: Dict[str, 'JSONSchema'] = Field(default_factory=dict)
|
||||||
|
additionalProperties: Union[bool, 'JSONSchema'] = True
|
||||||
|
required: List[str] = Field(default_factory=list)
|
||||||
|
|
||||||
|
class Config:
|
||||||
|
arbitrary_types_allowed = True
|
||||||
|
|
||||||
|
class TaskTree(BaseModel):
|
||||||
|
description: Optional[str]
|
||||||
|
python_fn_signature: str
|
||||||
|
sub_fns: List['TaskTree']
|
||||||
|
|
||||||
|
JSONSchema.update_forward_refs()
|
||||||
|
TaskTree.update_forward_refs()
|
||||||
|
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
import os
|
import os
|
||||||
import platform
|
import platform
|
||||||
|
import string
|
||||||
|
|
||||||
if platform.system() == "Windows":
|
if platform.system() == "Windows":
|
||||||
os.system("color")
|
os.system("color")
|
||||||
@@ -27,3 +28,15 @@ def print_colored(headline, text, color_code, end='\n'):
|
|||||||
if headline:
|
if headline:
|
||||||
print(f"{bold_start}{color_start}{headline}{reset}")
|
print(f"{bold_start}{color_start}{headline}{reset}")
|
||||||
print(f"{color_start}{text}{reset}", end=end)
|
print(f"{color_start}{text}{reset}", end=end)
|
||||||
|
|
||||||
|
|
||||||
|
def get_template_parameters(formatted_string):
|
||||||
|
formatter = string.Formatter()
|
||||||
|
parsed = formatter.parse(formatted_string)
|
||||||
|
parameters = []
|
||||||
|
|
||||||
|
for literal_text, field_name, format_spec, conversion in parsed:
|
||||||
|
if field_name is not None:
|
||||||
|
parameters.append(field_name)
|
||||||
|
|
||||||
|
return parameters
|
||||||
@@ -5,4 +5,5 @@ openai>=0.27.5
|
|||||||
psutil
|
psutil
|
||||||
jcloud
|
jcloud
|
||||||
jina-hubble-sdk
|
jina-hubble-sdk
|
||||||
langchain==0.0.153
|
langchain==0.0.153
|
||||||
|
pydantic==1.10.7
|
||||||
31
test/conftest.py
Normal file
31
test/conftest.py
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
import os
|
||||||
|
from typing import List, Generator
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
|
def input_generator(input_sequence: list) -> Generator[str, None, None]:
|
||||||
|
"""
|
||||||
|
Creates a generator that yields input strings from the given sequence.
|
||||||
|
|
||||||
|
:param input_sequence: A list of input strings.
|
||||||
|
:return: A generator that yields input strings.
|
||||||
|
"""
|
||||||
|
yield from input_sequence
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def mock_input_sequence(request, monkeypatch) -> None:
|
||||||
|
gen = input_generator(request.param)
|
||||||
|
monkeypatch.setattr("builtins.input", lambda _: next(gen))
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def microservice_dir(tmpdir) -> str:
|
||||||
|
"""
|
||||||
|
Creates a temporary directory for a microservice.
|
||||||
|
|
||||||
|
:param tmpdir: A temporary directory.
|
||||||
|
:return: The path of the temporary directory.
|
||||||
|
"""
|
||||||
|
return os.path.join(str(tmpdir), "microservice")
|
||||||
|
|
||||||
@@ -7,7 +7,8 @@ from dev_gpt.options.generate.generator import Generator
|
|||||||
|
|
||||||
# The cognitive difficulty level is determined by the number of requirements the microservice has.
|
# The cognitive difficulty level is determined by the number of requirements the microservice has.
|
||||||
|
|
||||||
def test_generation_level_0(tmpdir):
|
@pytest.mark.parametrize('mock_input_sequence', [['y']], indirect=True)
|
||||||
|
def test_generation_level_0(microservice_dir, mock_input_sequence):
|
||||||
"""
|
"""
|
||||||
Requirements:
|
Requirements:
|
||||||
coding challenge: ❌
|
coding challenge: ❌
|
||||||
@@ -20,15 +21,15 @@ def test_generation_level_0(tmpdir):
|
|||||||
os.environ['VERBOSE'] = 'true'
|
os.environ['VERBOSE'] = 'true'
|
||||||
generator = Generator(
|
generator = Generator(
|
||||||
"The microservice is very simple, it does not take anything as input and only outputs the word 'test'",
|
"The microservice is very simple, it does not take anything as input and only outputs the word 'test'",
|
||||||
str(tmpdir),
|
microservice_dir,
|
||||||
'gpt-3.5-turbo'
|
'gpt-3.5-turbo'
|
||||||
)
|
)
|
||||||
assert generator.generate() == 0
|
assert generator.generate() == 0
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('mock_input_sequence', [['y']], indirect=True)
|
||||||
def test_generation_level_1(tmpdir):
|
def test_generation_level_1(microservice_dir):
|
||||||
"""
|
"""
|
||||||
Requirements:
|
Requirements:
|
||||||
coding challenge: ❌
|
coding challenge: ❌
|
||||||
@@ -44,13 +45,14 @@ def test_generation_level_1(tmpdir):
|
|||||||
Example tweet:
|
Example tweet:
|
||||||
\'When your coworker microwaves fish in the break room... AGAIN. 🐟🤢
|
\'When your coworker microwaves fish in the break room... AGAIN. 🐟🤢
|
||||||
But hey, at least SOMEONE's enjoying their lunch. #officelife\'''',
|
But hey, at least SOMEONE's enjoying their lunch. #officelife\'''',
|
||||||
str(tmpdir),
|
str(microservice_dir),
|
||||||
'gpt-3.5-turbo'
|
'gpt-3.5-turbo'
|
||||||
)
|
)
|
||||||
assert generator.generate() == 0
|
assert generator.generate() == 0
|
||||||
|
|
||||||
|
|
||||||
def test_generation_level_2(tmpdir):
|
@pytest.mark.parametrize('mock_input_sequence', [['y']], indirect=True)
|
||||||
|
def test_generation_level_2(microservice_dir):
|
||||||
"""
|
"""
|
||||||
Requirements:
|
Requirements:
|
||||||
coding challenge: ❌
|
coding challenge: ❌
|
||||||
@@ -63,12 +65,13 @@ def test_generation_level_2(tmpdir):
|
|||||||
os.environ['VERBOSE'] = 'true'
|
os.environ['VERBOSE'] = 'true'
|
||||||
generator = Generator(
|
generator = Generator(
|
||||||
"The input is a PDF like https://www.africau.edu/images/default/sample.pdf and the output the summarized text (50 words).",
|
"The input is a PDF like https://www.africau.edu/images/default/sample.pdf and the output the summarized text (50 words).",
|
||||||
str(tmpdir),
|
str(microservice_dir),
|
||||||
'gpt-3.5-turbo'
|
'gpt-3.5-turbo'
|
||||||
)
|
)
|
||||||
assert generator.generate() == 0
|
assert generator.generate() == 0
|
||||||
|
|
||||||
def test_generation_level_3(tmpdir):
|
@pytest.mark.parametrize('mock_input_sequence', [['y']], indirect=True)
|
||||||
|
def test_generation_level_3(microservice_dir):
|
||||||
"""
|
"""
|
||||||
Requirements:
|
Requirements:
|
||||||
coding challenge: ✅ (calculate the average closing price)
|
coding challenge: ✅ (calculate the average closing price)
|
||||||
@@ -87,12 +90,13 @@ def test_generation_level_3(tmpdir):
|
|||||||
4. Return the summary as a string.
|
4. Return the summary as a string.
|
||||||
Example input: 'AAPL'
|
Example input: 'AAPL'
|
||||||
''',
|
''',
|
||||||
str(tmpdir),
|
str(microservice_dir),
|
||||||
'gpt-3.5-turbo'
|
'gpt-3.5-turbo'
|
||||||
)
|
)
|
||||||
assert generator.generate() == 0
|
assert generator.generate() == 0
|
||||||
|
|
||||||
def test_generation_level_4(tmpdir):
|
@pytest.mark.parametrize('mock_input_sequence', [['y']], indirect=True)
|
||||||
|
def test_generation_level_4(microservice_dir):
|
||||||
"""
|
"""
|
||||||
Requirements:
|
Requirements:
|
||||||
coding challenge: ❌
|
coding challenge: ❌
|
||||||
@@ -123,13 +127,13 @@ print('This is the text from the audio file:', response.json()['text'])
|
|||||||
4. Return the the audio file as base64 encoded binary.
|
4. Return the the audio file as base64 encoded binary.
|
||||||
Example input file: https://www.signalogic.com/melp/EngSamples/Orig/ENG_M.wav
|
Example input file: https://www.signalogic.com/melp/EngSamples/Orig/ENG_M.wav
|
||||||
''',
|
''',
|
||||||
str(tmpdir),
|
str(microservice_dir),
|
||||||
'gpt-4'
|
'gpt-4'
|
||||||
)
|
)
|
||||||
assert generator.generate() == 0
|
assert generator.generate() == 0
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('mock_input_sequence', [['y']], indirect=True)
|
||||||
def test_generation_level_5(tmpdir):
|
def test_generation_level_5(microservice_dir):
|
||||||
"""
|
"""
|
||||||
Requirements:
|
Requirements:
|
||||||
coding challenge: ✅ (putting text on the image)
|
coding challenge: ✅ (putting text on the image)
|
||||||
@@ -163,15 +167,30 @@ The joke is the put on the image.
|
|||||||
The output is the image with the joke on it.
|
The output is the image with the joke on it.
|
||||||
Example input image: https://upload.wikimedia.org/wikipedia/commons/thumb/4/47/PNG_transparency_demonstration_1.png/560px-PNG_transparency_demonstration_1.png
|
Example input image: https://upload.wikimedia.org/wikipedia/commons/thumb/4/47/PNG_transparency_demonstration_1.png/560px-PNG_transparency_demonstration_1.png
|
||||||
''',
|
''',
|
||||||
str(tmpdir),
|
str(microservice_dir),
|
||||||
'gpt-3.5-turbo'
|
'gpt-3.5-turbo'
|
||||||
)
|
)
|
||||||
assert generator.generate() == 0
|
assert generator.generate() == 0
|
||||||
|
|
||||||
@pytest.fixture
|
# @pytest.fixture
|
||||||
def tmpdir():
|
# def microservice_dir():
|
||||||
return 'microservice'
|
# return 'microservice'
|
||||||
|
|
||||||
|
|
||||||
# further ideas:
|
# # further ideas:
|
||||||
# Create a wrapper around google called Joogle. It modifies the page summary preview text of the search results to insert the word Jina as much as possible.
|
# # Create a wrapper around google called Joogle. It modifies the page summary preview text of the search results to insert the word Jina as much as possible.
|
||||||
|
#
|
||||||
|
# import pytest
|
||||||
|
#
|
||||||
|
# # This is your fixture which can accept parameters
|
||||||
|
# @pytest.fixture
|
||||||
|
# def my_fixture(microservice_dir, request,):
|
||||||
|
# return request.param # request.param will contain the parameter value
|
||||||
|
#
|
||||||
|
# # Here you parameterize the fixture for the test
|
||||||
|
# @pytest.mark.parametrize('my_fixture', ['param1', 'param2', 'param3'], indirect=True)
|
||||||
|
# def test_my_function(my_fixture, microservice_dir):
|
||||||
|
# # 'my_fixture' now contains the value 'param1', 'param2', or 'param3'
|
||||||
|
# # depending on the iteration
|
||||||
|
# # Here you can write your test
|
||||||
|
# ...
|
||||||
|
|||||||
11
test/unit/test_construct_sub_task_tree.py
Normal file
11
test/unit/test_construct_sub_task_tree.py
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
import os
|
||||||
|
|
||||||
|
from dev_gpt.apis import gpt
|
||||||
|
from dev_gpt.options.generate.pm.pm import PM
|
||||||
|
|
||||||
|
def test_construct_sub_task_tree():
|
||||||
|
os.environ['VERBOSE'] = 'true'
|
||||||
|
gpt_session = gpt.GPTSession('test', model='gpt-3.5-turbo')
|
||||||
|
pm = PM(gpt_session)
|
||||||
|
microservice_description = 'This microservice receives an image as input and generates a joke based on what is depicted on the image. The input must be a binary string of the image. The output is an image with the generated joke overlaid on it.'
|
||||||
|
sub_task_tree = pm.construct_sub_task_tree(microservice_description)
|
||||||
Reference in New Issue
Block a user