Merge branch 'main' of github.com:jina-ai/microchain into feat_search_api

This commit is contained in:
Florian Hönicke
2023-05-16 12:53:24 +02:00
4 changed files with 273 additions and 93 deletions

View File

@@ -49,9 +49,8 @@ Your imagination is the limit!
<a href="https://pypistats.org/packages/dev-gpt" target="_blank">
<img src="https://img.shields.io/pypi/dm/dev-gpt?color=%2334D058&label=pypi%20downloads" alt="Downloads">
</a>
<a href="https://discord.gg/tBrFhx384D" target="_blank">
<img src="https://img.shields.io/badge/chat_on-Discord-7289DA?logo=discord&logoColor=white" alt="Discord Chat">
</a>
<a href="https://discord.jina.ai"><img src="https://img.shields.io/discord/1106542220112302130?logo=discord&logoColor=white&style=flat-square"></a>
</p>

View File

@@ -7,6 +7,7 @@ from typing import Callable
from typing import List, Text, Optional
from langchain import PromptTemplate
from langchain.schema import SystemMessage, AIMessage
from pydantic.dataclasses import dataclass
from dev_gpt.apis import gpt
@@ -20,11 +21,14 @@ from dev_gpt.constants import FILE_AND_TAG_PAIRS, NUM_IMPLEMENTATION_STRATEGIES,
from dev_gpt.options.generate.pm.pm import PM
from dev_gpt.options.generate.templates_user import template_generate_microservice_name, \
template_generate_possible_packages, \
template_solve_code_issue, \
template_implement_solution_code_issue, \
template_solve_pip_dependency_issue, template_is_dependency_issue, template_generate_playground, \
template_generate_function, template_generate_test, template_generate_requirements, \
template_chain_of_thought, template_summarize_error, \
template_solve_apt_get_dependency_issue
template_solve_apt_get_dependency_issue, \
template_suggest_solutions_code_issue, template_was_error_seen_before, \
template_was_solution_tried_before, response_format_was_error_seen_before, \
response_format_was_solution_tried_before, response_format_suggest_solutions
from dev_gpt.utils.io import persist_file, get_all_microservice_files_with_content, get_microservice_path
from dev_gpt.utils.string_tools import print_colored
@@ -40,6 +44,11 @@ class Generator:
self.gpt_session = gpt.GPTSession(model=model)
self.microservice_specification = TaskSpecification(task=task_description, test=None)
self.microservice_root_path = path
self.microservice_name = None
self.previous_microservice_path = None
self.cur_microservice_path = None
self.previous_errors = []
self.previous_solutions = []
@staticmethod
def extract_content_from_result(plain_text, file_name, match_single_block=False, can_contain_code_block=True):
@@ -88,9 +97,11 @@ metas:
self,
section_title: str,
template: PromptTemplate,
destination_folder: str,
destination_folder: str = None,
file_name_s: List[str] = None,
parse_result_fn: Callable = None,
use_custom_system_message: bool = True,
response_format_example: str = None,
**template_kwargs
):
"""This function generates file(s) using the given template and persists it/them in the given destination folder.
@@ -99,22 +110,32 @@ metas:
Args:
section_title (str): The title of the section to be printed in the console.
template (PromptTemplate): The template to be used for generating the file(s).
destination_folder (str): The destination folder where the generated file(s) should be persisted.
destination_folder (str): The destination folder where the generated file(s) should be persisted. If None,
the current microservice path is used. Defaults to None.
file_name_s (List[str], optional): The name of the file(s) to be generated. Defaults to None.
parse_result_fn (Callable, optional): A function that parses the generated content and returns a dictionary
mapping file_name to its content. If no content could be extract, it returns an empty dictionary.
Defaults to None. If None, default parsing is used which uses the file_name to extract from the generated content.
use_custom_system_message (bool, optional): whether to use custom system message or not. Defaults to True.
**template_kwargs: The keyword arguments to be passed to the template.
"""
if destination_folder is None:
destination_folder = self.cur_microservice_path
if parse_result_fn is None:
parse_result_fn = self.get_default_parse_result_fn(file_name_s)
print_colored('', f'\n\n############# {section_title} #############', 'blue')
system_introduction_message = _GPTConversation._create_system_message(
self.microservice_specification.task,
self.microservice_specification.test
if use_custom_system_message:
system_introduction_message = _GPTConversation._create_system_message(
self.microservice_specification.task,
self.microservice_specification.test
)
else:
system_introduction_message = SystemMessage(content='You are a helpful assistant.')
conversation = self.gpt_session.get_conversation(
messages=[system_introduction_message] if use_custom_system_message else []
)
conversation = self.gpt_session.get_conversation(messages=[system_introduction_message])
template_kwargs = {k: v for k, v in template_kwargs.items() if k in template.input_variables}
if 'file_name' in template.input_variables and len(file_name_s) == 1:
template_kwargs['file_name'] = file_name_s[0]
@@ -125,8 +146,33 @@ metas:
)
content = parse_result_fn(content_raw)
if content == {}:
conversation = self.gpt_session.get_conversation(
messages=[SystemMessage(content='You are a helpful assistant.'), AIMessage(content=content_raw)]
)
if response_format_example is not None:
file_wrapping_example = response_format_example
elif len(file_name_s) == 1:
file_ending = file_name_s[0].split('.')[-1]
if file_ending == 'py':
tag = 'python'
elif file_ending == 'json':
tag = 'json'
else:
tag = ''
file_wrapping_example = f'''**{file_name_s[0]}**
```{tag}
<content_of_file>
```'''
else:
file_wrapping_example = '''**file_name.file_ending**
```<json|py|...
<content_of_file>
```'''
content_raw = conversation.chat(
'You must add the content in the format shown above' + (f' for {file_name_s[0]}' if len(file_name_s) == 1 else ''))
'Based on your previous response, only output the content' + (f' for `{file_name_s[0]}`' if len(file_name_s) == 1 else '') +
'. Like this:\n' +
file_wrapping_example
)
content = parse_result_fn(content_raw)
for _file_name, _file_content in content.items():
persist_file(_file_content, os.path.join(destination_folder, _file_name))
@@ -134,27 +180,26 @@ metas:
def generate_microservice(
self,
microservice_name,
packages,
num_approach,
):
MICROSERVICE_FOLDER_v1 = get_microservice_path(self.microservice_root_path, microservice_name, packages,
num_approach, 1)
os.makedirs(MICROSERVICE_FOLDER_v1)
self.cur_microservice_path = get_microservice_path(
self.microservice_root_path, self.microservice_name, packages, num_approach, 1
)
os.makedirs(self.cur_microservice_path)
with open(os.path.join(os.path.dirname(__file__), 'static_files', 'microservice', 'jina_wrapper.py'), 'r', encoding='utf-8') as f:
microservice_executor_boilerplate = f.read()
microservice_executor_code = microservice_executor_boilerplate.replace('class DevGPTExecutor(Executor):',
f'class {microservice_name}(Executor):')
persist_file(microservice_executor_code, os.path.join(MICROSERVICE_FOLDER_v1, EXECUTOR_FILE_NAME))
microservice_executor_code = microservice_executor_boilerplate \
.replace('class DevGPTExecutor(Executor):', f'class {self.microservice_name}(Executor):')
persist_file(microservice_executor_code, os.path.join(self.cur_microservice_path, EXECUTOR_FILE_NAME))
with open(os.path.join(os.path.dirname(__file__), 'static_files', 'microservice', 'apis.py'), 'r', encoding='utf-8') as f:
persist_file(f.read(), os.path.join(MICROSERVICE_FOLDER_v1, 'apis.py'))
persist_file(f.read(), os.path.join(self.cur_microservice_path, 'apis.py'))
microservice_content = self.generate_and_persist_file(
section_title='Microservice',
template=template_generate_function,
destination_folder=MICROSERVICE_FOLDER_v1,
microservice_description=self.microservice_specification.task,
test_description=self.microservice_specification.test,
packages=packages,
@@ -166,9 +211,8 @@ metas:
test_microservice_content = self.generate_and_persist_file(
'Test Microservice',
template_generate_test,
MICROSERVICE_FOLDER_v1,
code_files_wrapped=self.files_to_string({EXECUTOR_FILE_NAME: microservice_content}),
microservice_name=microservice_name,
microservice_name=self.microservice_name,
microservice_description=self.microservice_specification.task,
test_description=self.microservice_specification.test,
file_name_purpose=TEST_EXECUTOR_FILE_NAME,
@@ -176,10 +220,9 @@ metas:
file_name_s=[TEST_EXECUTOR_FILE_NAME],
)[TEST_EXECUTOR_FILE_NAME]
requirements_content = self.generate_and_persist_file(
self.generate_and_persist_file(
'Requirements',
template_generate_requirements,
MICROSERVICE_FOLDER_v1,
code_files_wrapped=self.files_to_string({
IMPLEMENTATION_FILE_NAME: microservice_content,
TEST_EXECUTOR_FILE_NAME: test_microservice_content,
@@ -188,21 +231,7 @@ metas:
file_name_s=[REQUIREMENTS_FILE_NAME],
parse_result_fn=self.parse_result_fn_requirements,
tag_name=REQUIREMENTS_FILE_TAG,
)[REQUIREMENTS_FILE_NAME]
# I deactivated this because 3.5-turbo was hallucinating packages that were not needed
# now, in the first iteration the default dockerfile is used
# self.generate_and_persist_file(
# section_title='Generate Dockerfile',
# template=template_generate_apt_get_install,
# destination_folder=MICROSERVICE_FOLDER_v1,
# file_name_s=None,
# parse_result_fn=self.parse_result_fn_dockerfile,
# docker_file_wrapped=self.read_docker_template(),
# requirements_file_wrapped=self.files_to_string({
# REQUIREMENTS_FILE_NAME: requirements_content,
# })
# )
)
with open(os.path.join(os.path.dirname(__file__), 'static_files', 'microservice', 'Dockerfile'), 'r',
encoding='utf-8') as f:
@@ -212,9 +241,9 @@ metas:
for line in docker_file_template_lines
]
docker_file_content = '\n'.join(docker_file_template_lines)
persist_file(docker_file_content, os.path.join(MICROSERVICE_FOLDER_v1, 'Dockerfile'))
persist_file(docker_file_content, os.path.join(self.cur_microservice_path, 'Dockerfile'))
self.write_config_yml(microservice_name, MICROSERVICE_FOLDER_v1)
self.write_config_yml(self.microservice_name, self.cur_microservice_path)
print('\nFirst version of the microservice generated. Start iterating on it to make the tests pass...')
@@ -244,15 +273,15 @@ pytest
{os.linesep.join(lines)}'''
return {REQUIREMENTS_FILE_NAME: content_modified}
def generate_playground(self, microservice_name, microservice_path):
def generate_playground(self):
print_colored('', '\n\n############# Playground #############', 'blue')
file_name_to_content = get_all_microservice_files_with_content(microservice_path)
file_name_to_content = get_all_microservice_files_with_content(self.cur_microservice_path)
conversation = self.gpt_session.get_conversation()
conversation.chat(
template_generate_playground.format(
code_files_wrapped=self.files_to_string(file_name_to_content, ['test_microservice.py']),
microservice_name=microservice_name,
microservice_name=self.microservice_name,
)
)
playground_content_raw = conversation.chat(
@@ -269,12 +298,12 @@ pytest
content_raw, 'app.py', match_single_block=True
)
gateway_path = os.path.join(microservice_path, 'gateway')
gateway_path = os.path.join(self.cur_microservice_path, 'gateway')
shutil.copytree(os.path.join(os.path.dirname(__file__), 'static_files', 'gateway'), gateway_path)
persist_file(playground_content, os.path.join(gateway_path, 'app.py'))
# fill-in name of microservice
gateway_name = f'Gateway{microservice_name}'
gateway_name = f'Gateway{self.microservice_name}'
custom_gateway_path = os.path.join(gateway_path, 'custom_gateway.py')
with open(custom_gateway_path, 'r', encoding='utf-8') as f:
custom_gateway_content = f.read()
@@ -292,40 +321,38 @@ pytest
print('Final step...')
hubble_log = push_executor(gateway_path)
if not is_executor_in_hub(gateway_name):
raise Exception(f'{microservice_name} not in hub. Hubble logs: {hubble_log}')
raise Exception(f'{self.microservice_name} not in hub. Hubble logs: {hubble_log}')
def debug_microservice(self, microservice_name, num_approach, packages):
def debug_microservice(self, num_approach, packages):
for i in range(1, MAX_DEBUGGING_ITERATIONS):
print('Debugging iteration', i)
print('Trying to debug the microservice. Might take a while...')
previous_microservice_path = get_microservice_path(self.microservice_root_path, microservice_name, packages,
num_approach, i)
next_microservice_path = get_microservice_path(self.microservice_root_path, microservice_name, packages,
num_approach, i + 1)
clean_requirements_txt(previous_microservice_path)
log_hubble = push_executor(previous_microservice_path)
clean_requirements_txt(self.cur_microservice_path)
log_hubble = push_executor(self.cur_microservice_path)
error = process_error_message(log_hubble)
if error:
print('An error occurred during the build process. Feeding the error back to the assistant...')
self.do_debug_iteration(error, next_microservice_path, previous_microservice_path)
self.previous_microservice_path = self.cur_microservice_path
self.cur_microservice_path = get_microservice_path(
self.microservice_root_path, self.microservice_name, packages, num_approach, i + 1
)
os.makedirs(self.cur_microservice_path)
self.do_debug_iteration(error)
if i == MAX_DEBUGGING_ITERATIONS - 1:
raise self.MaxDebugTimeReachedException('Could not debug the microservice.')
else:
# at the moment, there can be cases where no error log is extracted but the executor is still not published
# it leads to problems later on when someone tries a run or deployment
if is_executor_in_hub(microservice_name):
if is_executor_in_hub(self.microservice_name):
print('Successfully build microservice.')
break
else:
raise Exception(f'{microservice_name} not in hub. Hubble logs: {log_hubble}')
raise Exception(f'{self.microservice_name} not in hub. Hubble logs: {log_hubble}')
return get_microservice_path(self.microservice_root_path, microservice_name, packages, num_approach, i)
def do_debug_iteration(self, error, next_microservice_path, previous_microservice_path):
os.makedirs(next_microservice_path)
file_name_to_content = get_all_microservice_files_with_content(previous_microservice_path)
def do_debug_iteration(self, error):
file_name_to_content = get_all_microservice_files_with_content(self.previous_microservice_path)
for file_name, content in file_name_to_content.items():
persist_file(content, os.path.join(next_microservice_path, file_name))
persist_file(content, os.path.join(self.cur_microservice_path, file_name))
summarized_error = self.summarize_error(error)
dock_req_string = self.files_to_string({
@@ -338,7 +365,6 @@ pytest
self.generate_and_persist_file(
section_title='Debugging apt-get dependency issue',
template=template_solve_apt_get_dependency_issue,
destination_folder=next_microservice_path,
file_name_s=['apt-get-packages.json'],
parse_result_fn=self.parse_result_fn_dockerfile,
summarized_error=summarized_error,
@@ -351,24 +377,86 @@ pytest
self.generate_and_persist_file(
section_title='Debugging pip dependency issue',
template=template_solve_pip_dependency_issue,
destination_folder=next_microservice_path,
file_name_s=[REQUIREMENTS_FILE_NAME],
summarized_error=summarized_error,
all_files_string=dock_req_string,
)
else:
all_files_string = self.files_to_string(
{key: val for key, val in file_name_to_content.items() if key != EXECUTOR_FILE_NAME}
)
suggested_solution = self.generate_solution_suggestion(summarized_error, all_files_string)
self.generate_and_persist_file(
section_title='Debugging code issue',
template=template_solve_code_issue,
destination_folder=next_microservice_path,
section_title='Implementing suggestion solution for code issue',
template=template_implement_solution_code_issue,
file_name_s=[IMPLEMENTATION_FILE_NAME, TEST_EXECUTOR_FILE_NAME, REQUIREMENTS_FILE_NAME],
summarized_error=summarized_error,
task_description=self.microservice_specification.task,
test_description=self.microservice_specification.test,
all_files_string=self.files_to_string(
{key: val for key, val in file_name_to_content.items() if key != EXECUTOR_FILE_NAME}),
all_files_string=all_files_string,
suggested_solution=suggested_solution,
)
self.previous_errors.append(summarized_error)
self.previous_solutions.append(suggested_solution)
def generate_solution_suggestion(self, summarized_error, all_files_string):
suggested_solutions = json.loads(
self.generate_and_persist_file(
section_title='Suggest solution for code issue',
template=template_suggest_solutions_code_issue,
file_name_s=['solutions.json'],
summarized_error=summarized_error,
task_description=self.microservice_specification.task,
test_description=self.microservice_specification.test,
all_files_string=all_files_string,
response_format_example=response_format_suggest_solutions,
)['solutions.json']
)
if len(self.previous_errors) > 0:
was_error_seen_before = json.loads(
self.generate_and_persist_file(
section_title='Check if error was seen before',
template=template_was_error_seen_before,
file_name_s=['was_error_seen_before.json'],
summarized_error=summarized_error,
previous_errors='- "' + f'"{os.linesep}- "'.join(self.previous_errors) + '"',
use_custom_system_message=False,
response_format_example=response_format_was_error_seen_before,
)['was_error_seen_before.json']
)['was_error_seen_before'].lower() == 'yes'
suggested_solution = None
if was_error_seen_before:
for _num_solution in range(1, len(suggested_solutions) + 1):
_suggested_solution = suggested_solutions[str(_num_solution)]
was_solution_tried_before = json.loads(
self.generate_and_persist_file(
section_title='Check if solution was tried before',
template=template_was_solution_tried_before,
file_name_s=['will_lead_to_different_actions.json'],
tried_solutions='- "' + f'"{os.linesep}- "'.join(self.previous_solutions) + '"',
suggested_solution=_suggested_solution,
use_custom_system_message=False,
response_format_example=response_format_was_solution_tried_before,
)['will_lead_to_different_actions.json']
)['will_lead_to_different_actions'].lower() == 'no'
if not was_solution_tried_before:
suggested_solution = _suggested_solution
break
else:
suggested_solution = suggested_solutions['1']
if suggested_solution is None:
suggested_solution = f"solve error: {summarized_error}"
else:
suggested_solution = suggested_solutions['1']
return suggested_solution
class MaxDebugTimeReachedException(BaseException):
pass
@@ -425,13 +513,13 @@ pytest
self.microservice_specification.task, self.microservice_specification.test = PM().refine_specification(self.microservice_specification.task)
os.makedirs(self.microservice_root_path)
generated_name = self.generate_microservice_name(self.microservice_specification.task)
microservice_name = f'{generated_name}{random.randint(0, 10_000_000)}'
self.microservice_name = f'{generated_name}{random.randint(0, 10_000_000)}'
packages_list = self.get_possible_packages()
for num_approach, packages in enumerate(packages_list):
try:
self.generate_microservice(microservice_name, packages, num_approach)
final_version_path = self.debug_microservice(microservice_name, num_approach, packages)
self.generate_playground(microservice_name, final_version_path)
self.generate_microservice(packages, num_approach)
self.debug_microservice(num_approach, packages)
self.generate_playground()
except self.MaxDebugTimeReachedException:
print('Could not debug the Microservice with the approach:', packages)
if num_approach == len(packages_list) - 1:

View File

@@ -37,10 +37,15 @@ The executor name must fulfill the following criteria:
- only consists of lower and upper case characters
- end with Executor.
The output is a the raw string wrapped into ``` and starting with **name.txt** like this:
Your response must exactly match the following block code format (double asterisks for the file name and triple backticks for the file block):
**name.txt**
```
PDFParserExecutor
<name here>
```
Example for: "Get a png as input and return a vectorized version as svg.":
**name.txt**
```
PngToSvgExecutor
```'''
)
@@ -62,7 +67,7 @@ e) the implementation of the core problem using the package would obey the follo
When answering, just write "yes" or "no".
4. For each approach, list the required python package combinations as discibed in the following.
You must output the package combinations as json wrapped into tripple backticks ``` and name it **strategies.json**. \
You must output the package combinations as json wrapped into triple backticks ``` and name it **strategies.json**. \
Note that you can also leave a list empty to indicate that one of the strategies does not require any package and can be done in plain python.
Write the output using double asterisks and triple backticks like this:
**strategies.json**
@@ -78,7 +83,7 @@ Write the output using double asterisks and triple backticks like this:
template_code_wrapping_string = '''The code will go into {file_name_purpose}.
Note that you must obey the double asterisk and tripple backtick syntax from like this:
Note that you must obey the double asterisk and triple backtick syntax from like this:
**{file_name}**
```{tag_name}
...code...
@@ -206,10 +211,11 @@ The output would be:
template_summarize_error = PromptTemplate.from_template(
'''Here is an error message I encountered during the docker build process:
'''Your task is to condense an error encountered during the docker build process. The error message is as follows:
"{error}"
Your task is to summarize the error message as compact and informative as possible \
while maintaining all information necessary to debug the core issue (100 words).
It should also provide some additional context regarding the specific file and line number where the error occurred. \
Note that you must not suggest a solution to the error.
Warnings are not worth mentioning.'''
)
@@ -234,7 +240,7 @@ Is this error happening because a PACKAGE_MANAGER package is missing or failed t
```json
{{"dependency_installation_failure": "<yes/no>"}}
```
Note that you must obey the double asterisk and tripple backtick syntax from above.
Note that you must obey the double asterisk and triple backtick syntax from above.
'''
)
@@ -294,14 +300,23 @@ The output is:
```json
{{"packages": [libgl1-mesa-glx]}}
```
Note that you must not output the content of any other files like the Dockerfile or requirements.txt.
Only output the apt-get-packages.json file.
Note that the first line you output must be: **apt-get-packages.json**
Only output content of the apt-get-packages.json file. Ensure the response can be parsed by Python json.loads
Note: you must not output the content of any other. Especially don't output the Dockerfile or requirements.txt.
Note: the first line you output must be: **apt-get-packages.json**
'''
)
template_solve_code_issue = PromptTemplate.from_template(
response_format_suggest_solutions = '''**solutions.json**
```json
{{
"1": "<best solution>",
"2": "<2nd best solution>"
}}
```'''
template_suggest_solutions_code_issue = PromptTemplate.from_template(
'''General rules:
''' + not_allowed_function_string + '''
@@ -317,14 +332,72 @@ Here are all the files I use:
Here is the summary of the error that occurred:
{summarized_error}
To solve this error, you should:
1. Suggest 3 to 5 possible solutions on how to solve it. You have no access to the documentation of the package.
2. Decide for the best solution and explain it in detail.
3. Write down the files that need to be changed, but not files that don't need to be changed.
Note that any changes needed to make the test pass must be written under the constraint that ''' + IMPLEMENTATION_FILE_NAME + ''' will be used in a different file as well.
You should suggest 3 to 5 possible solution approaches on how to solve it.
Obey the following rules:
Do not implement the solution.
You have no access to the documentation of the package.
You must not change the Dockerfile.
Note that any changes needed to make the test pass must be written under the constraint that ''' + IMPLEMENTATION_FILE_NAME + ''' will be used in a different file as well.
''' + f'{not_allowed_function_string}\n{not_allowed_docker_string}\n{gpt_35_turbo_usage_string}' + '''
After thinking about the possible solutions, output them as JSON ranked from best to worst. Like this:
''' + response_format_suggest_solutions + '''
Ensure the response can be parsed by Python json.loads'''
)
response_format_was_error_seen_before = '''**was_error_seen_before.json**
```json
{{"was_error_seen_before": "<yes/no>"}}
```'''
template_was_error_seen_before = PromptTemplate.from_template(
'''Previously encountered error messages:
{previous_errors}
Now encountered error message: "{summarized_error}"
Was this error message encountered before?
Write down your final answer as json in the following format:
''' + response_format_was_error_seen_before + '''
Note that you must obey the double asterisk and triple backtick syntax from above. Ensure the response can be parsed by Python json.loads
'''
)
response_format_was_solution_tried_before = '''**will_lead_to_different_actions.json**
```json
{{"will_lead_to_different_actions": "<yes/no>"}}
```'''
template_was_solution_tried_before = PromptTemplate.from_template(
'''Previously tried solutions:
{tried_solutions}
Suggested solution: "{suggested_solution}"
Will the suggested solution lead to different actions than the previously tried solutions?
Write down your final answer as json in the following format:
''' + response_format_was_solution_tried_before + '''
Note that you must obey the double asterisk and triple backtick syntax from above. Ensure the response can be parsed by Python json.loads'''
)
template_implement_solution_code_issue = PromptTemplate.from_template(
'''Here is the description of the task the function must solve:
{task_description}
Here is the test scenario the function must pass:
{test_description}
Here are all the files I use:
{all_files_string}
Implemented the suggested solution: {suggested_solution}
Output all the files that need change. You must not change the Dockerfile.
Don't output files that don't need change. If you output a file, then write the complete file.
Use the exact following syntax to wrap the code:
@@ -336,7 +409,7 @@ Use the exact following syntax to wrap the code:
Example:
**microservice.py**
**implementation.py**
```python
import json
@@ -427,7 +500,7 @@ Or write the detailed microservice description all mentioned code samples, docum
}}
```
Note that your response must be either prompt.json or final.json. You must not write both.
Note that you must obey the double asterisk and tripple backtick syntax from above.
Note that you must obey the double asterisk and triple backtick syntax from above.
Note that the last sequence of characters in your response must be ``` (triple backtick).
Note that prompt.json must not only contain one question.
Note that if urls, secrets, database names, etc. are mentioned, they must be part of the summary.
@@ -471,7 +544,7 @@ Example for the case where the example is already mentioned in the refined descr
}}
```
Note that your response must be either prompt.json or final.json. You must not write both.
Note that you must obey the double asterisk and tripple backtick syntax from above.
Note that you must obey the double asterisk and triple backtick syntax from above.
Note that the last sequence of characters in your response must be ``` (triple backtick).
Note that your response must start with the character sequence ** (double asterisk).
Note that prompt.json must only contain one question.

View File

@@ -70,6 +70,26 @@ def test_generation_level_2(microservice_dir, mock_input_sequence):
)
assert generator.generate() == 0
@pytest.mark.parametrize('mock_input_sequence', [['y', 'https://upload.wikimedia.org/wikipedia/commons/4/47/PNG_transparency_demonstration_1.png']], indirect=True)
def test_generation_level_2_svg(microservice_dir, mock_input_sequence):
"""
Requirements:
coding challenge: ✅
pip packages: ✅
environment: ❌
GPT-3.5-turbo: ❌
APIs: ❌
Databases: ❌
"""
os.environ['VERBOSE'] = 'true'
generator = Generator(
"Get a png as input and return a vectorized version as svg.",
str(microservice_dir),
'gpt-3.5-turbo'
)
assert generator.generate() == 0
@pytest.mark.parametrize('mock_input_sequence', [['y', 'yfinance.Ticker("MSFT").info']], indirect=True)
def test_generation_level_3(microservice_dir, mock_input_sequence):
"""