diff --git a/README.md b/README.md index ad9e2d6..22730ef 100644 --- a/README.md +++ b/README.md @@ -26,6 +26,9 @@ Your imagination is the limit! Supported platforms + + Downloads + Discord Chat diff --git a/src/constants.py b/src/constants.py index 6c05b97..0e0a69d 100644 --- a/src/constants.py +++ b/src/constants.py @@ -4,9 +4,7 @@ REQUIREMENTS_FILE_NAME = 'requirements.txt' DOCKER_FILE_NAME = 'Dockerfile' CLIENT_FILE_NAME = 'client.py' STREAMLIT_FILE_NAME = 'streamlit.py' -GPT_3_5_TURBO_API_FILE_NAME = 'gpt_3_5_turbo_api.py' -GPT_3_5_TURBO_API_FILE_TAG = 'python' EXECUTOR_FILE_TAG = 'python' TEST_EXECUTOR_FILE_TAG = 'python' REQUIREMENTS_FILE_TAG = '' @@ -16,7 +14,6 @@ STREAMLIT_FILE_TAG = 'python' FILE_AND_TAG_PAIRS = [ - (GPT_3_5_TURBO_API_FILE_NAME, GPT_3_5_TURBO_API_FILE_NAME), (EXECUTOR_FILE_NAME, EXECUTOR_FILE_TAG), (TEST_EXECUTOR_FILE_NAME, TEST_EXECUTOR_FILE_TAG), (REQUIREMENTS_FILE_NAME, REQUIREMENTS_FILE_TAG), @@ -37,5 +34,3 @@ PROBLEMATIC_PACKAGES = [ # 'Pyrender', 'Trimesh', 'ModernGL', 'PyOpenGL', 'Pyglet', 'pythreejs', 'panda3d' # because they need a screen ] - -GPT_3_5_TURBO_API_FILE_NAME = 'gpt_3_5_turbo_api.py' \ No newline at end of file diff --git a/src/options/generate/generator.py b/src/options/generate/generator.py index b27a893..7b4785c 100644 --- a/src/options/generate/generator.py +++ b/src/options/generate/generator.py @@ -6,7 +6,7 @@ from src.apis import gpt from src.apis.jina_cloud import process_error_message, push_executor from src.constants import FILE_AND_TAG_PAIRS, NUM_IMPLEMENTATION_STRATEGIES, MAX_DEBUGGING_ITERATIONS, \ PROBLEMATIC_PACKAGES, EXECUTOR_FILE_NAME, EXECUTOR_FILE_TAG, TEST_EXECUTOR_FILE_NAME, TEST_EXECUTOR_FILE_TAG, \ - REQUIREMENTS_FILE_NAME, REQUIREMENTS_FILE_TAG, DOCKER_FILE_NAME, DOCKER_FILE_TAG, GPT_3_5_TURBO_API_FILE_NAME + REQUIREMENTS_FILE_NAME, REQUIREMENTS_FILE_TAG, DOCKER_FILE_NAME, DOCKER_FILE_TAG from src.options.generate.templates_user import template_generate_microservice_name, template_generate_possible_packages, \ template_solve_code_issue, \ template_solve_dependency_issue, template_is_dependency_issue, template_generate_playground, \ @@ -82,13 +82,10 @@ metas: MICROSERVICE_FOLDER_v1 = get_microservice_path(path, microservice_name, packages, num_approach, 1) os.makedirs(MICROSERVICE_FOLDER_v1) - gpt_3_5_turbo_api_content = self.write_gpt_api_file(MICROSERVICE_FOLDER_v1) - microservice_content = self.generate_and_persist_file( 'Microservice', template_generate_executor, MICROSERVICE_FOLDER_v1, - code_files_wrapped=self.files_to_string({'gpt_3_5_turbo_api.py': gpt_3_5_turbo_api_content}), microservice_name=microservice_name, microservice_description=self.task_description, test_description=self.test_description, @@ -102,7 +99,7 @@ metas: 'Test Microservice', template_generate_test, MICROSERVICE_FOLDER_v1, - code_files_wrapped=self.files_to_string({'microservice.py': microservice_content, 'gpt_3_5_turbo_api.py': gpt_3_5_turbo_api_content,}), + code_files_wrapped=self.files_to_string({'microservice.py': microservice_content}), microservice_name=microservice_name, microservice_description=self.task_description, test_description=self.test_description, @@ -118,7 +115,6 @@ metas: code_files_wrapped=self.files_to_string({ 'microservice.py': microservice_content, 'test_microservice.py': test_microservice_content, - 'gpt_3_5_turbo_api.py': gpt_3_5_turbo_api_content, }), file_name_purpose=REQUIREMENTS_FILE_NAME, file_name=REQUIREMENTS_FILE_NAME, @@ -133,7 +129,6 @@ metas: 'microservice.py': microservice_content, 'test_microservice.py': test_microservice_content, 'requirements.txt': requirements_content, - 'gpt_3_5_turbo_api.py': gpt_3_5_turbo_api_content, }), file_name_purpose=DOCKER_FILE_NAME, file_name=DOCKER_FILE_NAME, @@ -278,10 +273,3 @@ gptdeploy deploy --path {microservice_path} error_summary = conversation.chat(template_summarize_error.format(error=error)) return error_summary - def write_gpt_api_file(self, MICROSERVICE_FOLDER_v1): - cur_dir = os.path.dirname(os.path.abspath(__file__)) - with open(os.path.join(cur_dir, GPT_3_5_TURBO_API_FILE_NAME), 'r', encoding='utf-8') as file: - GPT_3_5_Turbo_API_content = file.read() - with open(os.path.join(MICROSERVICE_FOLDER_v1, GPT_3_5_TURBO_API_FILE_NAME), 'w', encoding='utf-8') as file: - file.write(GPT_3_5_Turbo_API_content) - return GPT_3_5_Turbo_API_content diff --git a/src/options/generate/gpt_3_5_turbo_api.py b/src/options/generate/gpt_3_5_turbo_api.py deleted file mode 100644 index 75dad0e..0000000 --- a/src/options/generate/gpt_3_5_turbo_api.py +++ /dev/null @@ -1,24 +0,0 @@ -import os -import openai -openai.api_key = os.getenv("OPENAI_API_KEY") - -class GPT_3_5_Turbo_API: - def __init__(self, system: str = ''): - self.system = system - - def __call__(self, prompt: str) -> str: - response = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=[{ - "role": 'system', - "content": self.system - }, { - "role": 'user', - "content": prompt - }] - ) - return response.choices[0].text - - - - diff --git a/src/options/generate/templates_system.py b/src/options/generate/templates_system.py index ae22c2a..76f88dc 100644 --- a/src/options/generate/templates_system.py +++ b/src/options/generate/templates_system.py @@ -8,7 +8,6 @@ The system definition defines the agent the user is talking to. The user prompt is precise question and the expected answer format. Example: # in the executor init: -from gpt_3_5_turbo_api import GPT_3_5_Turbo_API gpt = GPT_3_5_Turbo_API( system=\'\'\' You are a tv-reporter who is specialized in C-list celebrities. diff --git a/src/options/generate/templates_user.py b/src/options/generate/templates_user.py index b5a3b10..95277eb 100644 --- a/src/options/generate/templates_user.py +++ b/src/options/generate/templates_user.py @@ -90,8 +90,6 @@ You must provide the complete file with the exact same syntax to wrap the code.' template_generate_executor = PromptTemplate.from_template( general_guidelines_string + ''' -{code_files_wrapped} - Write the executor called '{microservice_name}'. The name is very important to keep. It matches the following description: '{microservice_description}'. It will be tested with the following scenario: '{test_description}'. @@ -103,9 +101,32 @@ Have in mind that d.uri is never a path to a local file. It is always a url. Your approach: 1. Identify the core challenge when implementing the executor. -2. Think about solutions for these challenges including the usage of gpt via "from gpt_3_5_turbo_api import GPT_3_5_Turbo_API" +2. Think about solutions for these challenges including the usage of gpt via "gpt_3_5_turbo_api" 3. Decide for one of the solutions. 4. Write the code for the executor. Don't write code for the test. +If you decided to use gpt, then the executor must include the following code: +import os +import openai +openai.api_key = os.getenv("OPENAI_API_KEY") + +class GPT_3_5_Turbo_API: + def __init__(self, system: str = ''): + self.system = system + + def __call__(self, prompt: str) -> str: + response = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=[{{ + "role": 'system', + "content": self.system + }}, {{ + "role": 'user', + "content": prompt + }}] + ) + return response.choices[0]['message']['content'] + + ''' + '\n' + template_code_wrapping_string ) @@ -158,8 +179,7 @@ It is important to make sure that all libs are installed that are required by th Usually libraries are installed with apt-get. Be aware that the machine the docker container is running on does not have a GPU - only CPU. Add the config.yml file to the Dockerfile. -Add the gpt_3_5_turbo_api.py file to the Dockerfile. -Note that the Dockerfile only has access to the files: microservice.py, requirements.txt, config.yml, test_microservice.py and gpt_3_5_turbo_api.py. +Note that the Dockerfile only has access to the files: microservice.py, requirements.txt, config.yml and test_microservice.py. The base image of the Dockerfile is FROM jinaai/jina:3.14.1-py39-standard. The entrypoint is ENTRYPOINT ["jina", "executor", "--uses", "config.yml"]. Make sure the all files are in the /workdir. @@ -275,7 +295,7 @@ template_generate_playground = PromptTemplate.from_template( Create a playground for the executor {microservice_name} using streamlit. The playground must look like it was made by a professional designer. All the ui elements are well thought out to make them visually appealing and easy to use. -The playground contains emojis that fit the theme of the playground. +The playground contains many emojis that fit the theme of the playground and has an emoji as favicon. This is an example how you can connect to the executor assuming the document (d) is already defined: ``` from jina import Client, Document, DocumentArray