mirror of
https://github.com/aljazceru/dev-gpt.git
synced 2025-12-24 17:14:18 +01:00
Merge branch 'main' of github.com:jina-ai/microchain into feat_pm_role
Conflicts: src/apis/gpt.py src/options/generate/generator.py
This commit is contained in:
@@ -16,8 +16,7 @@ from urllib3.exceptions import InvalidChunkLength
|
||||
|
||||
from src.constants import PRICING_GPT4_PROMPT, PRICING_GPT4_GENERATION, PRICING_GPT3_5_TURBO_PROMPT, \
|
||||
PRICING_GPT3_5_TURBO_GENERATION, CHARS_PER_TOKEN
|
||||
from src.options.generate.templates_system import template_system_message_base, executor_example, docarray_example, \
|
||||
client_example, gpt_example
|
||||
from src.options.generate.templates_system import template_system_message_base
|
||||
from src.utils.string_tools import print_colored
|
||||
|
||||
|
||||
@@ -143,3 +142,14 @@ class _GPTConversation:
|
||||
self.cost_callback(sum([len(m.content) for m in self.messages]), len(response.content), self.print_costs)
|
||||
self.messages.append(response)
|
||||
return response.content
|
||||
|
||||
@staticmethod
|
||||
def _create_system_message(task_description, test_description, system_definition_examples: List[str] = []) -> SystemMessage:
|
||||
if system_definition_examples is None:
|
||||
return None
|
||||
|
||||
system_message = PromptTemplate.from_template(template_system_message_base).format(
|
||||
task_description=task_description,
|
||||
test_description=test_description,
|
||||
)
|
||||
return SystemMessage(content=system_message)
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
EXECUTOR_FILE_NAME = 'microservice.py'
|
||||
EXECUTOR_FILE_NAME = '__init__.py'
|
||||
IMPLEMENTATION_FILE_NAME = 'implementation.py'
|
||||
TEST_EXECUTOR_FILE_NAME = 'test_microservice.py'
|
||||
REQUIREMENTS_FILE_NAME = 'requirements.txt'
|
||||
DOCKER_FILE_NAME = 'Dockerfile'
|
||||
@@ -6,6 +7,7 @@ CLIENT_FILE_NAME = 'client.py'
|
||||
STREAMLIT_FILE_NAME = 'streamlit.py'
|
||||
|
||||
EXECUTOR_FILE_TAG = 'python'
|
||||
IMPLEMENTATION_FILE_TAG = 'python'
|
||||
TEST_EXECUTOR_FILE_TAG = 'python'
|
||||
REQUIREMENTS_FILE_TAG = ''
|
||||
DOCKER_FILE_TAG = 'dockerfile'
|
||||
@@ -15,6 +17,7 @@ STREAMLIT_FILE_TAG = 'python'
|
||||
|
||||
FILE_AND_TAG_PAIRS = [
|
||||
(EXECUTOR_FILE_NAME, EXECUTOR_FILE_TAG),
|
||||
(IMPLEMENTATION_FILE_NAME, IMPLEMENTATION_FILE_TAG),
|
||||
(TEST_EXECUTOR_FILE_NAME, TEST_EXECUTOR_FILE_TAG),
|
||||
(REQUIREMENTS_FILE_NAME, REQUIREMENTS_FILE_TAG),
|
||||
(DOCKER_FILE_NAME, DOCKER_FILE_TAG),
|
||||
@@ -38,7 +41,7 @@ DEMO_TOKEN = '45372338e04f5a41af949024db929d46'
|
||||
|
||||
PROBLEMATIC_PACKAGES = [
|
||||
# 'Pyrender', 'Trimesh',
|
||||
'ModernGL', 'PyOpenGL', 'Pyglet', 'pythreejs', 'panda3d', # because they need a screen,
|
||||
'moderngl', 'pyopengl', 'pyglet', 'pythreejs', 'panda3d', # because they need a screen,
|
||||
]
|
||||
|
||||
UNNECESSARY_PACKAGES = ['FastAPI']
|
||||
UNNECESSARY_PACKAGES = ['fastapi']
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
import os
|
||||
|
||||
from src.constants import REQUIREMENTS_FILE_NAME, DOCKER_FILE_NAME, IMPLEMENTATION_FILE_NAME, TEST_EXECUTOR_FILE_NAME
|
||||
|
||||
|
||||
def list_dirs_no_hidden(path):
|
||||
"""
|
||||
List all non-hidden directories in the specified path.
|
||||
@@ -44,11 +47,11 @@ def validate_folder_is_correct(microservice_path):
|
||||
latest_version_path = get_latest_version_path(microservice_path)
|
||||
required_files = [
|
||||
'gateway/app.py',
|
||||
'requirements.txt',
|
||||
'Dockerfile',
|
||||
REQUIREMENTS_FILE_NAME,
|
||||
DOCKER_FILE_NAME,
|
||||
IMPLEMENTATION_FILE_NAME,
|
||||
TEST_EXECUTOR_FILE_NAME,
|
||||
'config.yml',
|
||||
'microservice.py',
|
||||
'test_microservice.py',
|
||||
]
|
||||
for file_name in required_files:
|
||||
if not os.path.exists(os.path.join(latest_version_path, file_name)):
|
||||
|
||||
@@ -13,13 +13,17 @@ from pydantic.dataclasses import dataclass
|
||||
from src.apis import gpt
|
||||
from src.apis.jina_cloud import process_error_message, push_executor, is_executor_in_hub
|
||||
from src.constants import FILE_AND_TAG_PAIRS, NUM_IMPLEMENTATION_STRATEGIES, MAX_DEBUGGING_ITERATIONS, \
|
||||
PROBLEMATIC_PACKAGES, EXECUTOR_FILE_NAME, EXECUTOR_FILE_TAG, TEST_EXECUTOR_FILE_NAME, TEST_EXECUTOR_FILE_TAG, \
|
||||
PROBLEMATIC_PACKAGES, EXECUTOR_FILE_NAME, TEST_EXECUTOR_FILE_NAME, TEST_EXECUTOR_FILE_TAG, \
|
||||
REQUIREMENTS_FILE_NAME, REQUIREMENTS_FILE_TAG, DOCKER_FILE_NAME, UNNECESSARY_PACKAGES
|
||||
from src.options.generate.templates_system import template_system_message_base, gpt_example, executor_example, \
|
||||
docarray_example, client_example, system_task_iteration, system_task_introduction, system_test_iteration
|
||||
from src.options.generate.templates_user import template_generate_microservice_name, \
|
||||
template_generate_possible_packages, \
|
||||
template_solve_code_issue, \
|
||||
template_solve_pip_dependency_issue, template_is_dependency_issue, template_generate_playground, \
|
||||
template_generate_function, template_generate_test, template_generate_requirements, \
|
||||
template_chain_of_thought, template_summarize_error, \
|
||||
template_generate_apt_get_install, template_solve_apt_get_dependency_issue
|
||||
template_solve_pip_dependency_issue, \
|
||||
template_generate_apt_get_install, template_solve_apt_get_dependency_issue, \
|
||||
template_is_dependency_issue, template_generate_playground, \
|
||||
@@ -55,7 +59,7 @@ class Generator:
|
||||
return single_code_block_match[0].strip()
|
||||
return ''
|
||||
|
||||
def write_config_yml(self, class_name, dest_folder, python_file='microservice.py'):
|
||||
def write_config_yml(self, class_name, dest_folder, python_file=EXECUTOR_FILE_NAME):
|
||||
config_content = f'''jtype: {class_name}
|
||||
py_modules:
|
||||
- {python_file}
|
||||
@@ -87,9 +91,9 @@ metas:
|
||||
section_title: str,
|
||||
template: PromptTemplate,
|
||||
destination_folder: str,
|
||||
file_name_s: Union[str, List[str]] = None,
|
||||
file_name_s: List[str] = None,
|
||||
parse_result_fn: Callable = None,
|
||||
system_definition_examples: List[str] = ['gpt', 'executor', 'docarray', 'client'],
|
||||
system_definition_examples: List[str] = [],
|
||||
**template_kwargs
|
||||
):
|
||||
"""This function generates file(s) using the given template and persists it/them in the given destination folder.
|
||||
@@ -99,23 +103,22 @@ metas:
|
||||
section_title (str): The title of the section to be printed in the console.
|
||||
template (PromptTemplate): The template to be used for generating the file(s).
|
||||
destination_folder (str): The destination folder where the generated file(s) should be persisted.
|
||||
file_name_s (Union[str, List[str]], optional): The name of the file(s) to be generated. Defaults to None.
|
||||
file_name_s (List[str], optional): The name of the file(s) to be generated. Defaults to None.
|
||||
parse_result_fn (Callable, optional): A function that parses the generated content and returns a dictionary
|
||||
mapping file_name to its content. If no content could be extract, it returns an empty dictionary.
|
||||
Defaults to None. If None, default parsing is used which uses the file_name to extract from the generated content.
|
||||
system_definition_examples (List[str], optional): The system definition examples to be used for the conversation.
|
||||
Defaults to ['gpt', 'executor', 'docarray', 'client'].
|
||||
system_definition_examples (List[str], optional): The system definition examples to be used for the conversation. Defaults to [].
|
||||
**template_kwargs: The keyword arguments to be passed to the template.
|
||||
"""
|
||||
if parse_result_fn is None:
|
||||
parse_result_fn = self.get_default_parse_result_fn([file_name_s] if isinstance(file_name_s, str) else file_name_s)
|
||||
parse_result_fn = self.get_default_parse_result_fn(file_name_s)
|
||||
|
||||
print_colored('', f'\n\n############# {section_title} #############', 'blue')
|
||||
system_introduction_message = self._create_system_message(self.microservice_specification.task, self.microservice_specification.test, system_definition_examples)
|
||||
conversation = self.gpt_session.get_conversation(messages=[system_introduction_message])
|
||||
template_kwargs = {k: v for k, v in template_kwargs.items() if k in template.input_variables}
|
||||
if 'file_name' in template.input_variables:
|
||||
template_kwargs['file_name'] = file_name_s
|
||||
if 'file_name' in template.input_variables and len(file_name_s) == 1:
|
||||
template_kwargs['file_name'] = file_name_s[0]
|
||||
content_raw = conversation.chat(
|
||||
template.format(
|
||||
**template_kwargs
|
||||
@@ -123,7 +126,7 @@ metas:
|
||||
)
|
||||
content = parse_result_fn(content_raw)
|
||||
if content == {}:
|
||||
content_raw = conversation.chat('You must add the content' + (f' for {file_name_s}.' if file_name_s else ''))
|
||||
content_raw = conversation.chat('You must add the content' + (f' for {file_name_s[0]}' if len(file_name_s) == 1 else ''))
|
||||
content = parse_result_fn(content_raw)
|
||||
for _file_name, _file_content in content.items():
|
||||
persist_file(_file_content, os.path.join(destination_folder, _file_name))
|
||||
@@ -138,18 +141,25 @@ metas:
|
||||
MICROSERVICE_FOLDER_v1 = get_microservice_path(self.microservice_root_path, microservice_name, packages, num_approach, 1)
|
||||
os.makedirs(MICROSERVICE_FOLDER_v1)
|
||||
|
||||
with open(os.path.join(os.path.dirname(__file__), 'static_files', 'microservice', 'microservice.py'), 'r') as f:
|
||||
microservice_executor_boilerplate = f.read()
|
||||
microservice_executor_code = microservice_executor_boilerplate.replace('class GPTDeployExecutor(Executor):', f'class {microservice_name}(Executor):')
|
||||
persist_file(microservice_executor_code, os.path.join(MICROSERVICE_FOLDER_v1, EXECUTOR_FILE_NAME))
|
||||
|
||||
with open(os.path.join(os.path.dirname(__file__), 'static_files', 'microservice', 'apis.py'), 'r') as f:
|
||||
persist_file(f.read(), os.path.join(MICROSERVICE_FOLDER_v1, 'apis.py'))
|
||||
|
||||
microservice_content = self.generate_and_persist_file(
|
||||
'Microservice',
|
||||
template_generate_executor,
|
||||
MICROSERVICE_FOLDER_v1,
|
||||
microservice_name=microservice_name,
|
||||
section_title='Microservice',
|
||||
template=template_generate_function,
|
||||
destination_folder=MICROSERVICE_FOLDER_v1,
|
||||
microservice_description=self.microservice_specification.task,
|
||||
test_description=self.microservice_specification.test,
|
||||
packages=packages,
|
||||
file_name_purpose=EXECUTOR_FILE_NAME,
|
||||
tag_name=EXECUTOR_FILE_TAG,
|
||||
file_name_s=EXECUTOR_FILE_NAME,
|
||||
)[EXECUTOR_FILE_NAME]
|
||||
file_name_purpose=IMPLEMENTATION_FILE_NAME,
|
||||
tag_name=IMPLEMENTATION_FILE_TAG,
|
||||
file_name_s=[IMPLEMENTATION_FILE_NAME],
|
||||
)[IMPLEMENTATION_FILE_NAME]
|
||||
|
||||
test_microservice_content = self.generate_and_persist_file(
|
||||
'Test Microservice',
|
||||
@@ -161,19 +171,20 @@ metas:
|
||||
test_description=self.microservice_specification.test,
|
||||
file_name_purpose=TEST_EXECUTOR_FILE_NAME,
|
||||
tag_name=TEST_EXECUTOR_FILE_TAG,
|
||||
file_name_s=TEST_EXECUTOR_FILE_NAME,
|
||||
file_name_s=[TEST_EXECUTOR_FILE_NAME],
|
||||
)[TEST_EXECUTOR_FILE_NAME]
|
||||
|
||||
requirements_content = self.generate_and_persist_file(
|
||||
'Requirements',
|
||||
template_generate_requirements,
|
||||
MICROSERVICE_FOLDER_v1,
|
||||
system_definition_examples=None,
|
||||
code_files_wrapped=self.files_to_string({
|
||||
EXECUTOR_FILE_NAME: microservice_content,
|
||||
IMPLEMENTATION_FILE_NAME: microservice_content,
|
||||
TEST_EXECUTOR_FILE_NAME: test_microservice_content,
|
||||
}),
|
||||
file_name_purpose=REQUIREMENTS_FILE_NAME,
|
||||
file_name_s=REQUIREMENTS_FILE_NAME,
|
||||
file_name_s=[REQUIREMENTS_FILE_NAME],
|
||||
tag_name=REQUIREMENTS_FILE_TAG,
|
||||
)[REQUIREMENTS_FILE_NAME]
|
||||
|
||||
@@ -209,7 +220,7 @@ metas:
|
||||
conversation = self.gpt_session.get_conversation()
|
||||
conversation.chat(
|
||||
template_generate_playground.format(
|
||||
code_files_wrapped=self.files_to_string(file_name_to_content, ['microservice.py', 'test_microservice.py']),
|
||||
code_files_wrapped=self.files_to_string(file_name_to_content, ['test_microservice.py']),
|
||||
microservice_name=microservice_name,
|
||||
)
|
||||
)
|
||||
@@ -308,7 +319,7 @@ metas:
|
||||
section_title='Debugging pip dependency issue',
|
||||
template=template_solve_pip_dependency_issue,
|
||||
destination_folder=next_microservice_path,
|
||||
file_name_s=REQUIREMENTS_FILE_NAME,
|
||||
file_name_s=[REQUIREMENTS_FILE_NAME],
|
||||
summarized_error=summarized_error,
|
||||
all_files_string=dock_req_string,
|
||||
)
|
||||
@@ -317,11 +328,11 @@ metas:
|
||||
section_title='Debugging code issue',
|
||||
template=template_solve_code_issue,
|
||||
destination_folder=next_microservice_path,
|
||||
file_name_s=[EXECUTOR_FILE_NAME, TEST_EXECUTOR_FILE_NAME, REQUIREMENTS_FILE_NAME],
|
||||
file_name_s=[IMPLEMENTATION_FILE_NAME, TEST_EXECUTOR_FILE_NAME, REQUIREMENTS_FILE_NAME],
|
||||
summarized_error=summarized_error,
|
||||
task_description=self.microservice_specification.task,
|
||||
test_description=self.microservice_specification.test,
|
||||
all_files_string=self.files_to_string(file_name_to_content),
|
||||
all_files_string=self.files_to_string({key: val for key, val in file_name_to_content.items() if key != EXECUTOR_FILE_NAME}),
|
||||
)
|
||||
|
||||
class MaxDebugTimeReachedException(BaseException):
|
||||
@@ -349,7 +360,7 @@ metas:
|
||||
section_title='Generate microservice name',
|
||||
template=template_generate_microservice_name,
|
||||
destination_folder=self.microservice_root_path,
|
||||
file_name_s='name.txt',
|
||||
file_name_s=['name.txt'],
|
||||
description=description
|
||||
)['name.txt']
|
||||
return name
|
||||
@@ -360,11 +371,11 @@ metas:
|
||||
section_title='Generate possible packages',
|
||||
template=template_generate_possible_packages,
|
||||
destination_folder=self.microservice_root_path,
|
||||
file_name_s='packages.csv',
|
||||
system_definition_examples=['gpt'],
|
||||
file_name_s=['packages.csv'],
|
||||
system_definition_examples=[],
|
||||
description=self.microservice_specification.task
|
||||
)['packages.csv']
|
||||
packages_list = [[pkg.strip() for pkg in packages_string.split(',')] for packages_string in packages_csv_string.split('\n')]
|
||||
packages_list = [[pkg.strip().lower() for pkg in packages_string.split(',')] for packages_string in packages_csv_string.split('\n')]
|
||||
packages_list = [
|
||||
packages for packages in packages_list if len(set(packages).intersection(set(PROBLEMATIC_PACKAGES))) == 0
|
||||
]
|
||||
|
||||
23
src/options/generate/static_files/microservice/apis.py
Normal file
23
src/options/generate/static_files/microservice/apis.py
Normal file
@@ -0,0 +1,23 @@
|
||||
import os
|
||||
import openai
|
||||
|
||||
|
||||
openai.api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
|
||||
class GPT_3_5_Turbo_API:
|
||||
def __init__(self, system: str = ''):
|
||||
self.system = system
|
||||
|
||||
def __call__(self, prompt: str) -> str:
|
||||
response = openai.ChatCompletion.create(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[{
|
||||
"role": 'system',
|
||||
"content": self.system
|
||||
}, {
|
||||
"role": 'user',
|
||||
"content": prompt
|
||||
}]
|
||||
)
|
||||
return response.choices[0]['message']['content']
|
||||
@@ -0,0 +1,15 @@
|
||||
from jina import Executor, requests as jina_requests, DocumentArray
|
||||
import json
|
||||
|
||||
from .implementation import func
|
||||
|
||||
|
||||
class GPTDeployExecutor(Executor):
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
|
||||
@jina_requests()
|
||||
def endpoint(self, docs: DocumentArray, **kwargs) -> DocumentArray:
|
||||
for d in docs:
|
||||
d.text = json.dumps(func(json.loads(d.text)))
|
||||
return docs
|
||||
@@ -1,95 +1,4 @@
|
||||
from src.constants import FLOW_URL_PLACEHOLDER
|
||||
from src.options.generate.templates_user import not_allowed_docker_string, not_allowed_executor_string
|
||||
|
||||
gpt_example = '''
|
||||
# gpt_3_5_turbo is a language model that can be used to generate text.
|
||||
you can use it to generate text given a system definition and a user prompt.
|
||||
The system definition defines the agent the user is talking to.
|
||||
The user prompt is precise question and the expected answer format.
|
||||
Example:
|
||||
# in the executor init:
|
||||
gpt = GPT_3_5_Turbo_API(
|
||||
system=\'\'\'
|
||||
You are a tv-reporter who is specialized in C-list celebrities.
|
||||
When you get asked something like 'Who was having a date with <X>?', then you answer with a json like '{"dates": ["<Y>", "<Z>"]}'.
|
||||
You must not answer something else - only the json.
|
||||
\'\'\')
|
||||
|
||||
# in the executor endpoint function:
|
||||
response_string = gpt(prompt)
|
||||
response = json.loads(response_string) # response is a string
|
||||
|
||||
'''
|
||||
|
||||
executor_example = '''Using the Jina framework, users can define executors.
|
||||
Here is an example of how an executor can be defined. It always starts with a comment:
|
||||
|
||||
**microservice.py**
|
||||
```python
|
||||
from jina import Executor, requests, DocumentArray, Document
|
||||
import json
|
||||
class MyInfoExecutor(Executor):
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__()
|
||||
|
||||
@requests() # each Executor must have exactly this decorator without parameters
|
||||
def foo(self, docs: DocumentArray, **kwargs) => DocumentArray:
|
||||
for d in docs:
|
||||
content = json.loads(d.text)
|
||||
...
|
||||
d.text = json.dumps(modified_content) # serialized json
|
||||
return docs
|
||||
```
|
||||
|
||||
An Executor gets a DocumentArray as input and returns a DocumentArray as output.
|
||||
'''
|
||||
|
||||
docarray_example = f'''A DocumentArray is a python class that can be seen as a list of Documents.
|
||||
A Document is a python class that represents a single document.
|
||||
Here is the protobuf definition of a Document:
|
||||
```
|
||||
message DocumentProto {{
|
||||
// used to store serialized json data the executor gets and returns
|
||||
string text = 1;
|
||||
}}
|
||||
```
|
||||
|
||||
Here are examples of how a DocumentArray can be defined:
|
||||
|
||||
```
|
||||
from jina import DocumentArray, Document
|
||||
import json
|
||||
|
||||
d1 = Document(text=json.dumps({{'he_says': 'hello'}}))
|
||||
|
||||
# you can load binary data into a document
|
||||
url = 'https://...'
|
||||
response = requests.get(url)
|
||||
obj_data = response.content
|
||||
base64_data = base64.b64encode(png_data).decode('utf-8')
|
||||
d2 = Document(text=json.dumps({{'image': base64_data}}))
|
||||
|
||||
array = numpy.array([1, 2, 3])
|
||||
array_list = array.tolist()
|
||||
d3 = Document(text=json.dumps(array_list))
|
||||
d4 = Document()
|
||||
d4.text = '{{"uri": "https://.../logo.png"}}'
|
||||
```
|
||||
'''
|
||||
|
||||
|
||||
client_example = f'''After the executor is deployed, it can be called via Jina Client.
|
||||
Here is an example of a client file:
|
||||
|
||||
**client.py**
|
||||
```python
|
||||
from jina import Client, Document, DocumentArray
|
||||
client = Client(host='{FLOW_URL_PLACEHOLDER}', protocol='http')
|
||||
d = Document(uri='...')
|
||||
d.load_uri_to_blob()
|
||||
response = client.post('/', inputs=DocumentArray([d])) # the client must be called on '/'
|
||||
print(response[0].text)
|
||||
```'''
|
||||
from src.options.generate.templates_user import not_allowed_docker_string, not_allowed_function_string
|
||||
|
||||
|
||||
template_system_message_base = f'''It is the year 2021.
|
||||
@@ -105,7 +14,7 @@ and the following test scenario:
|
||||
```
|
||||
|
||||
You must obey the following rules:
|
||||
{not_allowed_executor_string}
|
||||
{not_allowed_function_string}
|
||||
{not_allowed_docker_string}'''
|
||||
|
||||
system_task_introduction = f'''
|
||||
|
||||
@@ -1,12 +1,9 @@
|
||||
from langchain import PromptTemplate
|
||||
|
||||
from src.constants import IMPLEMENTATION_FILE_NAME
|
||||
|
||||
general_guidelines_string = '''The code you write is production ready. Every file starts with comments describing what the code is doing before the first import. Comments can only be written within code blocks.
|
||||
Then all imports are listed. It is important to import all modules that could be needed in the Executor code. Always import:
|
||||
from jina import Executor, DocumentArray, Document, requests
|
||||
import json
|
||||
from io import BytesIO
|
||||
import requests as req
|
||||
Then all imports are listed.
|
||||
|
||||
Start from top-level and then fully implement all methods.'''
|
||||
|
||||
@@ -16,17 +13,16 @@ Note that the Dockerfile runs the test_microservice.py during the build process.
|
||||
The Dockerfile must not attach a virtual display when running test_microservice.py.'''
|
||||
|
||||
|
||||
not_allowed_executor_string = '''The executor and the test must not use the GPU.
|
||||
The executor and the test must not access a database.
|
||||
The executor and the test must not access a display.
|
||||
The executor and the test must not access external apis except unless it is explicitly mentioned in the description or test case (e.g. by mentioning the api that should be used or by providing a URL to access the data).
|
||||
The executor and the test must not load data from the local file system unless it was created by the executor itself.
|
||||
The executor and the test must not use a pre-trained model unless it is explicitly mentioned in the description.
|
||||
The executor and the test must not train a model.
|
||||
The executor and the test must not use any attribute of Document accept Document.text.
|
||||
The executor and the test must not contain prototype or placeholder implementations.
|
||||
The executor and the test must run in a docker container based on debian.
|
||||
The executor and the test must use gpt_3_5_turbo_api if the task requires understanding or generating natural language or using any language model. Other language models are not allowed.'''
|
||||
not_allowed_function_string = '''The implemented function and the test must not use the GPU.
|
||||
The implemented function and the test must not access a database.
|
||||
The implemented function and the test must not access a display.
|
||||
The implemented function and the test must not access external apis except unless it is explicitly mentioned in the description or test case (e.g. by mentioning the api that should be used or by providing a URL to access the data).
|
||||
The implemented function and the test must not load data from the local file system unless it was created by the implemented function itself.
|
||||
The implemented function and the test must not use a pre-trained model unless it is explicitly mentioned in the description.
|
||||
The implemented function and the test must not train a model.
|
||||
The implemented function and the test must not contain prototype or placeholder implementations.
|
||||
The implemented function and the test must run in a docker container based on debian.
|
||||
The implemented function and the test must use gpt_3_5_turbo_api if the task requires understanding or generating natural language or using any language model. Other language models are not allowed.'''
|
||||
|
||||
|
||||
|
||||
@@ -72,7 +68,7 @@ b) has a stable api among different versions
|
||||
c) does not have system requirements
|
||||
d) can solve the task when running in a docker container
|
||||
e) the implementation of the core problem using the package would obey the following rules:
|
||||
''' + not_allowed_executor_string + '''
|
||||
''' + not_allowed_function_string + '''
|
||||
|
||||
When answering, just write "yes" or "no".
|
||||
|
||||
@@ -91,49 +87,39 @@ template_code_wrapping_string = '''The code will go into {file_name_purpose}. Ma
|
||||
You must provide the complete file with the exact same syntax to wrap the code.'''
|
||||
|
||||
|
||||
template_generate_executor = PromptTemplate.from_template(
|
||||
gpt_35_turbo_usage_string = """If you use gpt_3_5_turbo_api, then this is an example on how to use it:
|
||||
```
|
||||
from .apis import GPT_3_5_Turbo_API
|
||||
|
||||
gpt_3_5_turbo_api = GPT_3_5_Turbo_API(
|
||||
system=\'\'\'
|
||||
You are a tv-reporter who is specialized in C-list celebrities.
|
||||
When you get asked something like 'Who was having a date with <X>?', then you answer with a json like '{{"dates": ["<Y>", "<Z>"]}}'.
|
||||
You must not answer something else - only the json.
|
||||
\'\'\')
|
||||
|
||||
response_string = gpt(prompt) # fill-in the prompt (str); the output is a string
|
||||
```
|
||||
"""
|
||||
|
||||
|
||||
template_generate_function = PromptTemplate.from_template(
|
||||
general_guidelines_string + '''
|
||||
|
||||
Write the executor called '{microservice_name}'. The name is very important to keep.
|
||||
It matches the following description: '{microservice_description}'.
|
||||
Write a python function which receives as input a dictionary and outputs a dictionary. The function is called 'func'.
|
||||
The function must full-fill: '{microservice_description}'.
|
||||
It will be tested with the following scenario: '{test_description}'.
|
||||
For the implementation use the following package(s): '{packages}'.
|
||||
|
||||
Obey the following rules:
|
||||
Have in mind that d.uri is never a path to a local file. It is always a url.
|
||||
''' + not_allowed_executor_string + '''
|
||||
''' + not_allowed_function_string + '''
|
||||
|
||||
Your approach:
|
||||
1. Identify the core challenge when implementing the executor.
|
||||
2. Think about solutions for these challenges. Use gpt_3_5_turbo_api if it is mentioned in the above list of packages.
|
||||
1. Identify the core challenge when implementing the function.
|
||||
2. Think about solutions for these challenges. If gpt_3_5_turbo_api is mentioned in the above list of packages, then you must use it.
|
||||
3. Decide for one of the solutions.
|
||||
4. Write the code for the executor. Don't write code for the test.
|
||||
If and only if gpt_3_5_turbo_api is in the package list, then you must always include the following code in microservice.py:
|
||||
```
|
||||
import os
|
||||
import openai
|
||||
openai.api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
class GPT_3_5_Turbo_API:
|
||||
def __init__(self, system: str = ''):
|
||||
self.system = system
|
||||
|
||||
def __call__(self, prompt: str) -> str:
|
||||
response = openai.ChatCompletion.create(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[{{
|
||||
"role": 'system',
|
||||
"content": self.system
|
||||
}}, {{
|
||||
"role": 'user',
|
||||
"content": prompt
|
||||
}}]
|
||||
)
|
||||
return response.choices[0]['message']['content']
|
||||
```
|
||||
|
||||
|
||||
''' + template_code_wrapping_string
|
||||
4. Write the code for the function. Don't write code for the test.
|
||||
''' + gpt_35_turbo_usage_string + '\n' + template_code_wrapping_string
|
||||
)
|
||||
|
||||
|
||||
@@ -142,15 +128,15 @@ template_generate_test = PromptTemplate.from_template(
|
||||
|
||||
{code_files_wrapped}
|
||||
|
||||
Write a single test case that tests the following scenario: '{test_description}'. In case the test scenario is not precise enough, test a general case without any assumptions.
|
||||
Write a single pytest case that tests the following scenario: '{test_description}'. In case the test scenario is not precise enough, test a general case without any assumptions.
|
||||
Start the test with an extensive comment about the test case. If gpt_3_5_turbo_api is used in the executor, then the test must not check the exact output of the executor as it is not deterministic.
|
||||
|
||||
Use the following import to import the executor:
|
||||
Use the following import to import the function:
|
||||
```
|
||||
from microservice import {microservice_name}
|
||||
from .implementation import func
|
||||
```
|
||||
|
||||
''' + not_allowed_executor_string + '''
|
||||
''' + not_allowed_function_string + '''
|
||||
The test must not open local files.
|
||||
The test must not mock a function of the executor.
|
||||
The test must not use other data than the one provided in the test scenario.
|
||||
@@ -164,14 +150,19 @@ template_generate_requirements = PromptTemplate.from_template(
|
||||
|
||||
{code_files_wrapped}
|
||||
|
||||
Write the content of the requirements.txt file.
|
||||
Make sure to include pytest.
|
||||
Make sure to include openai>=0.26.0.
|
||||
Make sure that jina==3.15.1.dev14.
|
||||
Make sure that docarray==0.21.0.
|
||||
You must not add gpt_3_5_turbo_api to the requirements.txt file.
|
||||
Write the content of the requirements.txt file.
|
||||
The requirements.txt file must include the following packages:
|
||||
**requirements.txt**
|
||||
```
|
||||
jina==3.15.1.dev14
|
||||
docarray==0.21.0
|
||||
openai>=0.26.0
|
||||
pytest
|
||||
```
|
||||
Add any more packages that are needed to run the code.
|
||||
You must not add gpt_3_5_turbo_api to the requirements.txt file.
|
||||
|
||||
All versions are fixed using ~=, ==, <, >, <=, >=. The package versions must not have conflicts.
|
||||
All versions are fixed using ~=, ==, <, >, <=, >=. The package versions must not have conflicts. Output only the requirements.txt file.
|
||||
''' + '\n' + template_code_wrapping_string
|
||||
)
|
||||
|
||||
@@ -258,15 +249,14 @@ Output them as a white space separated list:'''
|
||||
)
|
||||
|
||||
|
||||
|
||||
template_solve_code_issue = PromptTemplate.from_template(
|
||||
'''General rules:
|
||||
''' + not_allowed_executor_string + '''
|
||||
''' + not_allowed_function_string + '''
|
||||
|
||||
Here is the description of the task the executor must solve:
|
||||
Here is the description of the task the function must solve:
|
||||
{task_description}
|
||||
|
||||
Here is the test scenario the executor must pass:
|
||||
Here is the test scenario the function must pass:
|
||||
{test_description}
|
||||
Here are all the files I use:
|
||||
{all_files_string}
|
||||
@@ -279,12 +269,12 @@ To solve this error, you should:
|
||||
1. Suggest 3 to 5 possible solutions on how to solve it. You have no access to the documentation of the package.
|
||||
2. Decide for the best solution and explain it in detail.
|
||||
3. Write down the files that need to be changed, but not files that don't need to be changed.
|
||||
Note that any changes needed to make the test pass must be written under the constraint that ''' + IMPLEMENTATION_FILE_NAME + ''' will be used in a different file as well.
|
||||
Obey the following rules:
|
||||
''' + f'{not_allowed_executor_string}\n{not_allowed_docker_string}' + '''
|
||||
''' + f'{not_allowed_function_string}\n{not_allowed_docker_string}\n{gpt_35_turbo_usage_string}' + '''
|
||||
|
||||
Output all the files that need change. You must not change the Dockerfile.
|
||||
Output all the files that need change. You must not change the Dockerfile.
|
||||
Don't output files that don't need change. If you output a file, then write the complete file.
|
||||
If you change microservice.py and it uses gpt_3_5_turbo_api, then you must keep the code for gpt_3_5_turbo_api in the microservice.py file.
|
||||
Use the exact following syntax to wrap the code:
|
||||
|
||||
**...**
|
||||
@@ -316,6 +306,7 @@ The playground uses the following code to send a request to the microservice:
|
||||
```
|
||||
from jina import Client, Document, DocumentArray
|
||||
client = Client(host='http://localhost:8080')
|
||||
d = Document(text=json.dumps(INPUT_DICTIONARY)) # fill-in dictionary which takes input
|
||||
response = client.post('/', inputs=DocumentArray([d])) # always use '/'
|
||||
print(response[0].text) # can also be blob in case of image/audio..., this should be visualized in the streamlit app
|
||||
```
|
||||
|
||||
Reference in New Issue
Block a user