mirror of
https://github.com/aljazceru/dev-gpt.git
synced 2025-12-23 16:44:20 +01:00
feat: support for gpt
This commit is contained in:
@@ -26,6 +26,9 @@ Your imagination is the limit!
|
|||||||
<a href="https://github.com/tiangolo/gptdeploy/actions?query=workflow%3ATest+event%3Apush+branch%3Amaster" target="_blank">
|
<a href="https://github.com/tiangolo/gptdeploy/actions?query=workflow%3ATest+event%3Apush+branch%3Amaster" target="_blank">
|
||||||
<img src="https://img.shields.io/badge/platform-mac%20%7C%20linux%20%7C%20windows-blue" alt="Supported platforms">
|
<img src="https://img.shields.io/badge/platform-mac%20%7C%20linux%20%7C%20windows-blue" alt="Supported platforms">
|
||||||
</a>
|
</a>
|
||||||
|
<a href="https://pypi.org/project/gptdeploy" target="_blank">
|
||||||
|
<img src="https://img.shields.io/pypi/dm/gptdeploy?color=%2334D058&label=pypi%20downloads" alt="Downloads">
|
||||||
|
</a>
|
||||||
<a href="https://discord.gg/ESn8ED6Fyn" target="_blank">
|
<a href="https://discord.gg/ESn8ED6Fyn" target="_blank">
|
||||||
<img src="https://img.shields.io/badge/chat_on-Discord-7289DA?logo=discord&logoColor=white" alt="Discord Chat">
|
<img src="https://img.shields.io/badge/chat_on-Discord-7289DA?logo=discord&logoColor=white" alt="Discord Chat">
|
||||||
</a>
|
</a>
|
||||||
|
|||||||
@@ -4,9 +4,7 @@ REQUIREMENTS_FILE_NAME = 'requirements.txt'
|
|||||||
DOCKER_FILE_NAME = 'Dockerfile'
|
DOCKER_FILE_NAME = 'Dockerfile'
|
||||||
CLIENT_FILE_NAME = 'client.py'
|
CLIENT_FILE_NAME = 'client.py'
|
||||||
STREAMLIT_FILE_NAME = 'streamlit.py'
|
STREAMLIT_FILE_NAME = 'streamlit.py'
|
||||||
GPT_3_5_TURBO_API_FILE_NAME = 'gpt_3_5_turbo_api.py'
|
|
||||||
|
|
||||||
GPT_3_5_TURBO_API_FILE_TAG = 'python'
|
|
||||||
EXECUTOR_FILE_TAG = 'python'
|
EXECUTOR_FILE_TAG = 'python'
|
||||||
TEST_EXECUTOR_FILE_TAG = 'python'
|
TEST_EXECUTOR_FILE_TAG = 'python'
|
||||||
REQUIREMENTS_FILE_TAG = ''
|
REQUIREMENTS_FILE_TAG = ''
|
||||||
@@ -16,7 +14,6 @@ STREAMLIT_FILE_TAG = 'python'
|
|||||||
|
|
||||||
|
|
||||||
FILE_AND_TAG_PAIRS = [
|
FILE_AND_TAG_PAIRS = [
|
||||||
(GPT_3_5_TURBO_API_FILE_NAME, GPT_3_5_TURBO_API_FILE_NAME),
|
|
||||||
(EXECUTOR_FILE_NAME, EXECUTOR_FILE_TAG),
|
(EXECUTOR_FILE_NAME, EXECUTOR_FILE_TAG),
|
||||||
(TEST_EXECUTOR_FILE_NAME, TEST_EXECUTOR_FILE_TAG),
|
(TEST_EXECUTOR_FILE_NAME, TEST_EXECUTOR_FILE_TAG),
|
||||||
(REQUIREMENTS_FILE_NAME, REQUIREMENTS_FILE_TAG),
|
(REQUIREMENTS_FILE_NAME, REQUIREMENTS_FILE_TAG),
|
||||||
@@ -37,5 +34,3 @@ PROBLEMATIC_PACKAGES = [
|
|||||||
# 'Pyrender', 'Trimesh',
|
# 'Pyrender', 'Trimesh',
|
||||||
'ModernGL', 'PyOpenGL', 'Pyglet', 'pythreejs', 'panda3d' # because they need a screen
|
'ModernGL', 'PyOpenGL', 'Pyglet', 'pythreejs', 'panda3d' # because they need a screen
|
||||||
]
|
]
|
||||||
|
|
||||||
GPT_3_5_TURBO_API_FILE_NAME = 'gpt_3_5_turbo_api.py'
|
|
||||||
@@ -6,7 +6,7 @@ from src.apis import gpt
|
|||||||
from src.apis.jina_cloud import process_error_message, push_executor
|
from src.apis.jina_cloud import process_error_message, push_executor
|
||||||
from src.constants import FILE_AND_TAG_PAIRS, NUM_IMPLEMENTATION_STRATEGIES, MAX_DEBUGGING_ITERATIONS, \
|
from src.constants import FILE_AND_TAG_PAIRS, NUM_IMPLEMENTATION_STRATEGIES, MAX_DEBUGGING_ITERATIONS, \
|
||||||
PROBLEMATIC_PACKAGES, EXECUTOR_FILE_NAME, EXECUTOR_FILE_TAG, TEST_EXECUTOR_FILE_NAME, TEST_EXECUTOR_FILE_TAG, \
|
PROBLEMATIC_PACKAGES, EXECUTOR_FILE_NAME, EXECUTOR_FILE_TAG, TEST_EXECUTOR_FILE_NAME, TEST_EXECUTOR_FILE_TAG, \
|
||||||
REQUIREMENTS_FILE_NAME, REQUIREMENTS_FILE_TAG, DOCKER_FILE_NAME, DOCKER_FILE_TAG, GPT_3_5_TURBO_API_FILE_NAME
|
REQUIREMENTS_FILE_NAME, REQUIREMENTS_FILE_TAG, DOCKER_FILE_NAME, DOCKER_FILE_TAG
|
||||||
from src.options.generate.templates_user import template_generate_microservice_name, template_generate_possible_packages, \
|
from src.options.generate.templates_user import template_generate_microservice_name, template_generate_possible_packages, \
|
||||||
template_solve_code_issue, \
|
template_solve_code_issue, \
|
||||||
template_solve_dependency_issue, template_is_dependency_issue, template_generate_playground, \
|
template_solve_dependency_issue, template_is_dependency_issue, template_generate_playground, \
|
||||||
@@ -82,13 +82,10 @@ metas:
|
|||||||
MICROSERVICE_FOLDER_v1 = get_microservice_path(path, microservice_name, packages, num_approach, 1)
|
MICROSERVICE_FOLDER_v1 = get_microservice_path(path, microservice_name, packages, num_approach, 1)
|
||||||
os.makedirs(MICROSERVICE_FOLDER_v1)
|
os.makedirs(MICROSERVICE_FOLDER_v1)
|
||||||
|
|
||||||
gpt_3_5_turbo_api_content = self.write_gpt_api_file(MICROSERVICE_FOLDER_v1)
|
|
||||||
|
|
||||||
microservice_content = self.generate_and_persist_file(
|
microservice_content = self.generate_and_persist_file(
|
||||||
'Microservice',
|
'Microservice',
|
||||||
template_generate_executor,
|
template_generate_executor,
|
||||||
MICROSERVICE_FOLDER_v1,
|
MICROSERVICE_FOLDER_v1,
|
||||||
code_files_wrapped=self.files_to_string({'gpt_3_5_turbo_api.py': gpt_3_5_turbo_api_content}),
|
|
||||||
microservice_name=microservice_name,
|
microservice_name=microservice_name,
|
||||||
microservice_description=self.task_description,
|
microservice_description=self.task_description,
|
||||||
test_description=self.test_description,
|
test_description=self.test_description,
|
||||||
@@ -102,7 +99,7 @@ metas:
|
|||||||
'Test Microservice',
|
'Test Microservice',
|
||||||
template_generate_test,
|
template_generate_test,
|
||||||
MICROSERVICE_FOLDER_v1,
|
MICROSERVICE_FOLDER_v1,
|
||||||
code_files_wrapped=self.files_to_string({'microservice.py': microservice_content, 'gpt_3_5_turbo_api.py': gpt_3_5_turbo_api_content,}),
|
code_files_wrapped=self.files_to_string({'microservice.py': microservice_content}),
|
||||||
microservice_name=microservice_name,
|
microservice_name=microservice_name,
|
||||||
microservice_description=self.task_description,
|
microservice_description=self.task_description,
|
||||||
test_description=self.test_description,
|
test_description=self.test_description,
|
||||||
@@ -118,7 +115,6 @@ metas:
|
|||||||
code_files_wrapped=self.files_to_string({
|
code_files_wrapped=self.files_to_string({
|
||||||
'microservice.py': microservice_content,
|
'microservice.py': microservice_content,
|
||||||
'test_microservice.py': test_microservice_content,
|
'test_microservice.py': test_microservice_content,
|
||||||
'gpt_3_5_turbo_api.py': gpt_3_5_turbo_api_content,
|
|
||||||
}),
|
}),
|
||||||
file_name_purpose=REQUIREMENTS_FILE_NAME,
|
file_name_purpose=REQUIREMENTS_FILE_NAME,
|
||||||
file_name=REQUIREMENTS_FILE_NAME,
|
file_name=REQUIREMENTS_FILE_NAME,
|
||||||
@@ -133,7 +129,6 @@ metas:
|
|||||||
'microservice.py': microservice_content,
|
'microservice.py': microservice_content,
|
||||||
'test_microservice.py': test_microservice_content,
|
'test_microservice.py': test_microservice_content,
|
||||||
'requirements.txt': requirements_content,
|
'requirements.txt': requirements_content,
|
||||||
'gpt_3_5_turbo_api.py': gpt_3_5_turbo_api_content,
|
|
||||||
}),
|
}),
|
||||||
file_name_purpose=DOCKER_FILE_NAME,
|
file_name_purpose=DOCKER_FILE_NAME,
|
||||||
file_name=DOCKER_FILE_NAME,
|
file_name=DOCKER_FILE_NAME,
|
||||||
@@ -278,10 +273,3 @@ gptdeploy deploy --path {microservice_path}
|
|||||||
error_summary = conversation.chat(template_summarize_error.format(error=error))
|
error_summary = conversation.chat(template_summarize_error.format(error=error))
|
||||||
return error_summary
|
return error_summary
|
||||||
|
|
||||||
def write_gpt_api_file(self, MICROSERVICE_FOLDER_v1):
|
|
||||||
cur_dir = os.path.dirname(os.path.abspath(__file__))
|
|
||||||
with open(os.path.join(cur_dir, GPT_3_5_TURBO_API_FILE_NAME), 'r', encoding='utf-8') as file:
|
|
||||||
GPT_3_5_Turbo_API_content = file.read()
|
|
||||||
with open(os.path.join(MICROSERVICE_FOLDER_v1, GPT_3_5_TURBO_API_FILE_NAME), 'w', encoding='utf-8') as file:
|
|
||||||
file.write(GPT_3_5_Turbo_API_content)
|
|
||||||
return GPT_3_5_Turbo_API_content
|
|
||||||
|
|||||||
@@ -1,24 +0,0 @@
|
|||||||
import os
|
|
||||||
import openai
|
|
||||||
openai.api_key = os.getenv("OPENAI_API_KEY")
|
|
||||||
|
|
||||||
class GPT_3_5_Turbo_API:
|
|
||||||
def __init__(self, system: str = ''):
|
|
||||||
self.system = system
|
|
||||||
|
|
||||||
def __call__(self, prompt: str) -> str:
|
|
||||||
response = openai.ChatCompletion.create(
|
|
||||||
model="gpt-3.5-turbo",
|
|
||||||
messages=[{
|
|
||||||
"role": 'system',
|
|
||||||
"content": self.system
|
|
||||||
}, {
|
|
||||||
"role": 'user',
|
|
||||||
"content": prompt
|
|
||||||
}]
|
|
||||||
)
|
|
||||||
return response.choices[0].text
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -8,7 +8,6 @@ The system definition defines the agent the user is talking to.
|
|||||||
The user prompt is precise question and the expected answer format.
|
The user prompt is precise question and the expected answer format.
|
||||||
Example:
|
Example:
|
||||||
# in the executor init:
|
# in the executor init:
|
||||||
from gpt_3_5_turbo_api import GPT_3_5_Turbo_API
|
|
||||||
gpt = GPT_3_5_Turbo_API(
|
gpt = GPT_3_5_Turbo_API(
|
||||||
system=\'\'\'
|
system=\'\'\'
|
||||||
You are a tv-reporter who is specialized in C-list celebrities.
|
You are a tv-reporter who is specialized in C-list celebrities.
|
||||||
|
|||||||
@@ -90,8 +90,6 @@ You must provide the complete file with the exact same syntax to wrap the code.'
|
|||||||
template_generate_executor = PromptTemplate.from_template(
|
template_generate_executor = PromptTemplate.from_template(
|
||||||
general_guidelines_string + '''
|
general_guidelines_string + '''
|
||||||
|
|
||||||
{code_files_wrapped}
|
|
||||||
|
|
||||||
Write the executor called '{microservice_name}'. The name is very important to keep.
|
Write the executor called '{microservice_name}'. The name is very important to keep.
|
||||||
It matches the following description: '{microservice_description}'.
|
It matches the following description: '{microservice_description}'.
|
||||||
It will be tested with the following scenario: '{test_description}'.
|
It will be tested with the following scenario: '{test_description}'.
|
||||||
@@ -103,9 +101,32 @@ Have in mind that d.uri is never a path to a local file. It is always a url.
|
|||||||
|
|
||||||
Your approach:
|
Your approach:
|
||||||
1. Identify the core challenge when implementing the executor.
|
1. Identify the core challenge when implementing the executor.
|
||||||
2. Think about solutions for these challenges including the usage of gpt via "from gpt_3_5_turbo_api import GPT_3_5_Turbo_API"
|
2. Think about solutions for these challenges including the usage of gpt via "gpt_3_5_turbo_api"
|
||||||
3. Decide for one of the solutions.
|
3. Decide for one of the solutions.
|
||||||
4. Write the code for the executor. Don't write code for the test.
|
4. Write the code for the executor. Don't write code for the test.
|
||||||
|
If you decided to use gpt, then the executor must include the following code:
|
||||||
|
import os
|
||||||
|
import openai
|
||||||
|
openai.api_key = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
class GPT_3_5_Turbo_API:
|
||||||
|
def __init__(self, system: str = ''):
|
||||||
|
self.system = system
|
||||||
|
|
||||||
|
def __call__(self, prompt: str) -> str:
|
||||||
|
response = openai.ChatCompletion.create(
|
||||||
|
model="gpt-3.5-turbo",
|
||||||
|
messages=[{{
|
||||||
|
"role": 'system',
|
||||||
|
"content": self.system
|
||||||
|
}}, {{
|
||||||
|
"role": 'user',
|
||||||
|
"content": prompt
|
||||||
|
}}]
|
||||||
|
)
|
||||||
|
return response.choices[0]['message']['content']
|
||||||
|
|
||||||
|
|
||||||
''' + '\n' + template_code_wrapping_string
|
''' + '\n' + template_code_wrapping_string
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -158,8 +179,7 @@ It is important to make sure that all libs are installed that are required by th
|
|||||||
Usually libraries are installed with apt-get.
|
Usually libraries are installed with apt-get.
|
||||||
Be aware that the machine the docker container is running on does not have a GPU - only CPU.
|
Be aware that the machine the docker container is running on does not have a GPU - only CPU.
|
||||||
Add the config.yml file to the Dockerfile.
|
Add the config.yml file to the Dockerfile.
|
||||||
Add the gpt_3_5_turbo_api.py file to the Dockerfile.
|
Note that the Dockerfile only has access to the files: microservice.py, requirements.txt, config.yml and test_microservice.py.
|
||||||
Note that the Dockerfile only has access to the files: microservice.py, requirements.txt, config.yml, test_microservice.py and gpt_3_5_turbo_api.py.
|
|
||||||
The base image of the Dockerfile is FROM jinaai/jina:3.14.1-py39-standard.
|
The base image of the Dockerfile is FROM jinaai/jina:3.14.1-py39-standard.
|
||||||
The entrypoint is ENTRYPOINT ["jina", "executor", "--uses", "config.yml"].
|
The entrypoint is ENTRYPOINT ["jina", "executor", "--uses", "config.yml"].
|
||||||
Make sure the all files are in the /workdir.
|
Make sure the all files are in the /workdir.
|
||||||
@@ -275,7 +295,7 @@ template_generate_playground = PromptTemplate.from_template(
|
|||||||
Create a playground for the executor {microservice_name} using streamlit.
|
Create a playground for the executor {microservice_name} using streamlit.
|
||||||
The playground must look like it was made by a professional designer.
|
The playground must look like it was made by a professional designer.
|
||||||
All the ui elements are well thought out to make them visually appealing and easy to use.
|
All the ui elements are well thought out to make them visually appealing and easy to use.
|
||||||
The playground contains emojis that fit the theme of the playground.
|
The playground contains many emojis that fit the theme of the playground and has an emoji as favicon.
|
||||||
This is an example how you can connect to the executor assuming the document (d) is already defined:
|
This is an example how you can connect to the executor assuming the document (d) is already defined:
|
||||||
```
|
```
|
||||||
from jina import Client, Document, DocumentArray
|
from jina import Client, Document, DocumentArray
|
||||||
|
|||||||
Reference in New Issue
Block a user