mirror of
https://github.com/aljazceru/dev-gpt.git
synced 2026-01-04 06:14:23 +01:00
feat: support for gpt
This commit is contained in:
@@ -6,7 +6,7 @@ from src.apis import gpt
|
||||
from src.apis.jina_cloud import process_error_message, push_executor
|
||||
from src.constants import FILE_AND_TAG_PAIRS, NUM_IMPLEMENTATION_STRATEGIES, MAX_DEBUGGING_ITERATIONS, \
|
||||
PROBLEMATIC_PACKAGES, EXECUTOR_FILE_NAME, EXECUTOR_FILE_TAG, TEST_EXECUTOR_FILE_NAME, TEST_EXECUTOR_FILE_TAG, \
|
||||
REQUIREMENTS_FILE_NAME, REQUIREMENTS_FILE_TAG, DOCKER_FILE_NAME, DOCKER_FILE_TAG
|
||||
REQUIREMENTS_FILE_NAME, REQUIREMENTS_FILE_TAG, DOCKER_FILE_NAME, DOCKER_FILE_TAG, GPT_3_5_TURBO_API_FILE_NAME
|
||||
from src.options.generate.templates_user import template_generate_microservice_name, template_generate_possible_packages, \
|
||||
template_solve_code_issue, \
|
||||
template_solve_dependency_issue, template_is_dependency_issue, template_generate_playground, \
|
||||
@@ -70,7 +70,7 @@ metas:
|
||||
content_raw, file_name, match_single_block=True
|
||||
)
|
||||
persist_file(content, os.path.join(destination_folder, file_name))
|
||||
return content_raw
|
||||
return content
|
||||
|
||||
def generate_microservice(
|
||||
self,
|
||||
@@ -82,10 +82,13 @@ metas:
|
||||
MICROSERVICE_FOLDER_v1 = get_microservice_path(path, microservice_name, packages, num_approach, 1)
|
||||
os.makedirs(MICROSERVICE_FOLDER_v1)
|
||||
|
||||
gpt_3_5_turbo_api_content = self.write_gpt_api_file(MICROSERVICE_FOLDER_v1)
|
||||
|
||||
microservice_content = self.generate_and_persist_file(
|
||||
'Microservice',
|
||||
template_generate_executor,
|
||||
MICROSERVICE_FOLDER_v1,
|
||||
code_files_wrapped=self.files_to_string({'gpt_3_5_turbo_api.py': gpt_3_5_turbo_api_content}),
|
||||
microservice_name=microservice_name,
|
||||
microservice_description=self.task_description,
|
||||
test_description=self.test_description,
|
||||
@@ -99,7 +102,7 @@ metas:
|
||||
'Test Microservice',
|
||||
template_generate_test,
|
||||
MICROSERVICE_FOLDER_v1,
|
||||
code_files_wrapped=self.files_to_string({'microservice.py': microservice_content}),
|
||||
code_files_wrapped=self.files_to_string({'microservice.py': microservice_content, 'gpt_3_5_turbo_api.py': gpt_3_5_turbo_api_content,}),
|
||||
microservice_name=microservice_name,
|
||||
microservice_description=self.task_description,
|
||||
test_description=self.test_description,
|
||||
@@ -114,7 +117,8 @@ metas:
|
||||
MICROSERVICE_FOLDER_v1,
|
||||
code_files_wrapped=self.files_to_string({
|
||||
'microservice.py': microservice_content,
|
||||
'test_microservice.py': test_microservice_content
|
||||
'test_microservice.py': test_microservice_content,
|
||||
'gpt_3_5_turbo_api.py': gpt_3_5_turbo_api_content,
|
||||
}),
|
||||
file_name_purpose=REQUIREMENTS_FILE_NAME,
|
||||
file_name=REQUIREMENTS_FILE_NAME,
|
||||
@@ -128,7 +132,8 @@ metas:
|
||||
code_files_wrapped=self.files_to_string({
|
||||
'microservice.py': microservice_content,
|
||||
'test_microservice.py': test_microservice_content,
|
||||
'requirements.txt': requirements_content
|
||||
'requirements.txt': requirements_content,
|
||||
'gpt_3_5_turbo_api.py': gpt_3_5_turbo_api_content,
|
||||
}),
|
||||
file_name_purpose=DOCKER_FILE_NAME,
|
||||
file_name=DOCKER_FILE_NAME,
|
||||
@@ -136,6 +141,7 @@ metas:
|
||||
)
|
||||
|
||||
self.write_config_yml(microservice_name, MICROSERVICE_FOLDER_v1)
|
||||
|
||||
print('\nFirst version of the microservice generated. Start iterating on it to make the tests pass...')
|
||||
|
||||
def generate_playground(self, microservice_name, microservice_path):
|
||||
@@ -271,3 +277,11 @@ gptdeploy deploy --path {microservice_path}
|
||||
conversation = self.gpt_session.get_conversation([])
|
||||
error_summary = conversation.chat(template_summarize_error.format(error=error))
|
||||
return error_summary
|
||||
|
||||
def write_gpt_api_file(self, MICROSERVICE_FOLDER_v1):
|
||||
cur_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
with open(os.path.join(cur_dir, GPT_3_5_TURBO_API_FILE_NAME), 'r', encoding='utf-8') as file:
|
||||
GPT_3_5_Turbo_API_content = file.read()
|
||||
with open(os.path.join(MICROSERVICE_FOLDER_v1, GPT_3_5_TURBO_API_FILE_NAME), 'w', encoding='utf-8') as file:
|
||||
file.write(GPT_3_5_Turbo_API_content)
|
||||
return GPT_3_5_Turbo_API_content
|
||||
|
||||
24
src/options/generate/gpt_3_5_turbo_api.py
Normal file
24
src/options/generate/gpt_3_5_turbo_api.py
Normal file
@@ -0,0 +1,24 @@
|
||||
import os
|
||||
import openai
|
||||
openai.api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
class GPT_3_5_Turbo_API:
|
||||
def __init__(self, system: str = ''):
|
||||
self.system = system
|
||||
|
||||
def __call__(self, prompt: str) -> str:
|
||||
response = openai.ChatCompletion.create(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[{
|
||||
"role": 'system',
|
||||
"content": self.system
|
||||
}, {
|
||||
"role": 'user',
|
||||
"content": prompt
|
||||
}]
|
||||
)
|
||||
return response.choices[0].text
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,27 @@
|
||||
from src.constants import FLOW_URL_PLACEHOLDER
|
||||
from src.options.generate.templates_user import not_allowed_docker_string, not_allowed_executor_string
|
||||
|
||||
gpt_example = '''
|
||||
# gpt_3_5_turbo is a language model that can be used to generate text.
|
||||
you can use it to generate text given a system definition and a user prompt.
|
||||
The system definition defines the agent the user is talking to.
|
||||
The user prompt is precise question and the expected answer format.
|
||||
Example:
|
||||
# in the executor init:
|
||||
from gpt_3_5_turbo_api import GPT_3_5_Turbo_API
|
||||
gpt = GPT_3_5_Turbo_API(
|
||||
system=\'\'\'
|
||||
You are a tv-reporter who is specialized in C-list celebrities.
|
||||
When you get asked something like 'Who was having a date with <X>?', then you answer with a json like '{"dates": ["<Y>", "<Z>"]}'.
|
||||
You must not answer something else - only the json.
|
||||
\'\'\')
|
||||
|
||||
# in the executor endpoint function:
|
||||
response_string = gpt(prompt)
|
||||
response = json.loads(response_string) # response is a string
|
||||
|
||||
'''
|
||||
|
||||
executor_example = '''Using the Jina framework, users can define executors.
|
||||
Here is an example of how an executor can be defined. It always starts with a comment:
|
||||
|
||||
|
||||
@@ -25,7 +25,9 @@ The executor and the test must not use a pre-trained model unless it is explicit
|
||||
The executor and the test must not train a model.
|
||||
The executor and the test must not use any attribute of Document accept Document.text.
|
||||
The executor and the test must not contain prototype or placeholder implementations.
|
||||
The executor and the test must run in a docker container based on debian.'''
|
||||
The executor and the test must run in a docker container based on debian.
|
||||
The executor and the test must use gpt_3_5_turbo_api if the task requires a language model. Other language models are not allowed.'''
|
||||
|
||||
|
||||
|
||||
template_generate_microservice_name = PromptTemplate.from_template(
|
||||
@@ -88,6 +90,8 @@ You must provide the complete file with the exact same syntax to wrap the code.'
|
||||
template_generate_executor = PromptTemplate.from_template(
|
||||
general_guidelines_string + '''
|
||||
|
||||
{code_files_wrapped}
|
||||
|
||||
Write the executor called '{microservice_name}'. The name is very important to keep.
|
||||
It matches the following description: '{microservice_description}'.
|
||||
It will be tested with the following scenario: '{test_description}'.
|
||||
@@ -99,7 +103,7 @@ Have in mind that d.uri is never a path to a local file. It is always a url.
|
||||
|
||||
Your approach:
|
||||
1. Identify the core challenge when implementing the executor.
|
||||
2. Think about solutions for these challenges.
|
||||
2. Think about solutions for these challenges including the usage of gpt via "from gpt_3_5_turbo_api import GPT_3_5_Turbo_API"
|
||||
3. Decide for one of the solutions.
|
||||
4. Write the code for the executor. Don't write code for the test.
|
||||
''' + '\n' + template_code_wrapping_string
|
||||
@@ -132,7 +136,13 @@ template_generate_requirements = PromptTemplate.from_template(
|
||||
|
||||
{code_files_wrapped}
|
||||
|
||||
Write the content of the requirements.txt file. Make sure to include pytest. Make sure that jina==3.14.1. Make sure that docarray==0.21.0.
|
||||
Write the content of the requirements.txt file.
|
||||
Make sure to include pytest.
|
||||
Make sure to include openai>=0.26.0.
|
||||
Make sure that jina==3.14.1.
|
||||
Make sure that docarray==0.21.0.
|
||||
You must not add gpt_3_5_turbo_api to the requirements.txt file.
|
||||
|
||||
All versions are fixed using ~=, ==, <, >, <=, >=. The package versions must not have conflicts.
|
||||
''' + '\n' + template_code_wrapping_string
|
||||
)
|
||||
@@ -148,7 +158,8 @@ It is important to make sure that all libs are installed that are required by th
|
||||
Usually libraries are installed with apt-get.
|
||||
Be aware that the machine the docker container is running on does not have a GPU - only CPU.
|
||||
Add the config.yml file to the Dockerfile.
|
||||
Note that the Dockerfile only has access to the files: microservice.py, requirements.txt, config.yml, test_microservice.py.
|
||||
Add the gpt_3_5_turbo_api.py file to the Dockerfile.
|
||||
Note that the Dockerfile only has access to the files: microservice.py, requirements.txt, config.yml, test_microservice.py and gpt_3_5_turbo_api.py.
|
||||
The base image of the Dockerfile is FROM jinaai/jina:3.14.1-py39-standard.
|
||||
The entrypoint is ENTRYPOINT ["jina", "executor", "--uses", "config.yml"].
|
||||
Make sure the all files are in the /workdir.
|
||||
|
||||
Reference in New Issue
Block a user