mirror of
https://github.com/aljazceru/dev-gpt.git
synced 2025-12-20 07:04:20 +01:00
fix: remove jina version
This commit is contained in:
15
README.md
15
README.md
@@ -136,6 +136,12 @@ gptdeploy create --description "Given a word, return a list of rhyming words usi
|
|||||||
```
|
```
|
||||||
<img src="res/rhyme_generator_example.png" alt="Rhyme Generator" width="600" />
|
<img src="res/rhyme_generator_example.png" alt="Rhyme Generator" width="600" />
|
||||||
|
|
||||||
|
### Word Cloud Generator
|
||||||
|
```bash
|
||||||
|
gptdeploy create --description "Generate a word cloud from a given text" --test "Lorem ipsum dolor sit amet, consectetur adipiscing elit."
|
||||||
|
```
|
||||||
|
<img src="res/word_cloud_example.png" alt="Word Cloud Generator" width="600" />
|
||||||
|
|
||||||
### 3d model info
|
### 3d model info
|
||||||
```bash
|
```bash
|
||||||
gptdeploy create --description "Given a 3d object, return vertex count and face count" --test "https://raw.githubusercontent.com/polygonjs/polygonjs-assets/master/models/wolf.obj"
|
gptdeploy create --description "Given a 3d object, return vertex count and face count" --test "https://raw.githubusercontent.com/polygonjs/polygonjs-assets/master/models/wolf.obj"
|
||||||
@@ -336,13 +342,6 @@ gptdeploy create --description "Generate QR code from URL" --test "https://www.e
|
|||||||
[//]: # (```)
|
[//]: # (```)
|
||||||
|
|
||||||
[//]: # ()
|
[//]: # ()
|
||||||
[//]: # (### Word Cloud Generator)
|
|
||||||
|
|
||||||
[//]: # (```bash)
|
|
||||||
|
|
||||||
[//]: # (gptdeploy create --description "Generate a word cloud from a given text" --test "Lorem ipsum dolor sit amet, consectetur adipiscing elit.")
|
|
||||||
|
|
||||||
[//]: # (```)
|
|
||||||
|
|
||||||
[//]: # ()
|
[//]: # ()
|
||||||
[//]: # (### Mandelbrot Set Visualizer)
|
[//]: # (### Mandelbrot Set Visualizer)
|
||||||
@@ -426,3 +425,5 @@ Make sure it is only printed twice in case it changed.
|
|||||||
- [ ] use gptdeploy list to show all deployments
|
- [ ] use gptdeploy list to show all deployments
|
||||||
- [ ] gptdeploy delete to delete a deployment
|
- [ ] gptdeploy delete to delete a deployment
|
||||||
- [ ] gptdeploy update to update a deployment
|
- [ ] gptdeploy update to update a deployment
|
||||||
|
- [ ] if the user runs gptdeploy without any arguments, show the help message
|
||||||
|
- [ ] start streamlit playground automatically after the deployment
|
||||||
|
|||||||
BIN
res/word_cloud_example.png
Normal file
BIN
res/word_cloud_example.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 592 KiB |
2
setup.py
2
setup.py
@@ -7,7 +7,7 @@ def read_requirements():
|
|||||||
|
|
||||||
setup(
|
setup(
|
||||||
name='gptdeploy',
|
name='gptdeploy',
|
||||||
version='0.18.9',
|
version='0.18.10',
|
||||||
description='Use natural language interface to create, deploy and update your microservice infrastructure.',
|
description='Use natural language interface to create, deploy and update your microservice infrastructure.',
|
||||||
long_description=open('README.md', 'r', encoding='utf-8').read(),
|
long_description=open('README.md', 'r', encoding='utf-8').read(),
|
||||||
long_description_content_type='text/markdown',
|
long_description_content_type='text/markdown',
|
||||||
|
|||||||
@@ -1,2 +1,2 @@
|
|||||||
__version__ = '0.18.9'
|
__version__ = '0.18.10'
|
||||||
from src.cli import main
|
from src.cli import main
|
||||||
@@ -1,17 +1,15 @@
|
|||||||
|
import os
|
||||||
import random
|
import random
|
||||||
|
import re
|
||||||
|
|
||||||
from src import gpt, jina_cloud
|
from src import gpt, jina_cloud
|
||||||
|
from src.constants import FILE_AND_TAG_PAIRS
|
||||||
from src.jina_cloud import push_executor, process_error_message
|
from src.jina_cloud import push_executor, process_error_message
|
||||||
from src.prompt_tasks import general_guidelines, executor_file_task, chain_of_thought_creation, test_executor_file_task, \
|
from src.prompt_tasks import general_guidelines, executor_file_task, chain_of_thought_creation, test_executor_file_task, \
|
||||||
chain_of_thought_optimization, requirements_file_task, docker_file_task, not_allowed
|
chain_of_thought_optimization, requirements_file_task, docker_file_task, not_allowed
|
||||||
from src.utils.io import recreate_folder, persist_file
|
from src.utils.io import recreate_folder, persist_file
|
||||||
from src.utils.string_tools import print_colored
|
from src.utils.string_tools import print_colored
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
|
|
||||||
from src.constants import FILE_AND_TAG_PAIRS
|
|
||||||
|
|
||||||
|
|
||||||
class ExecutorFactory:
|
class ExecutorFactory:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
@@ -72,7 +70,6 @@ class ExecutorFactory:
|
|||||||
):
|
):
|
||||||
EXECUTOR_FOLDER_v1 = self.get_executor_path(output_path, package, 1)
|
EXECUTOR_FOLDER_v1 = self.get_executor_path(output_path, package, 1)
|
||||||
recreate_folder(EXECUTOR_FOLDER_v1)
|
recreate_folder(EXECUTOR_FOLDER_v1)
|
||||||
recreate_folder('../flow')
|
|
||||||
|
|
||||||
print_colored('', '############# Executor #############', 'red')
|
print_colored('', '############# Executor #############', 'red')
|
||||||
user_query = (
|
user_query = (
|
||||||
@@ -107,6 +104,7 @@ class ExecutorFactory:
|
|||||||
persist_file(test_executor_content, os.path.join(EXECUTOR_FOLDER_v1, 'test_executor.py'))
|
persist_file(test_executor_content, os.path.join(EXECUTOR_FOLDER_v1, 'test_executor.py'))
|
||||||
|
|
||||||
print_colored('', '############# Requirements #############', 'red')
|
print_colored('', '############# Requirements #############', 'red')
|
||||||
|
requirements_path = os.path.join(EXECUTOR_FOLDER_v1, 'requirements.txt')
|
||||||
user_query = (
|
user_query = (
|
||||||
general_guidelines()
|
general_guidelines()
|
||||||
+ self.wrap_content_in_code_block(executor_content, 'executor.py', 'python')
|
+ self.wrap_content_in_code_block(executor_content, 'executor.py', 'python')
|
||||||
@@ -117,17 +115,17 @@ class ExecutorFactory:
|
|||||||
requirements_content_raw = conversation.query(user_query)
|
requirements_content_raw = conversation.query(user_query)
|
||||||
if is_chain_of_thought:
|
if is_chain_of_thought:
|
||||||
requirements_content_raw = conversation.query(
|
requirements_content_raw = conversation.query(
|
||||||
chain_of_thought_optimization('', '../requirements.txt') + "Keep the same version of jina ")
|
chain_of_thought_optimization('', requirements_path) + "Keep the same version of jina ")
|
||||||
|
|
||||||
requirements_content = self.extract_content_from_result(requirements_content_raw, '../requirements.txt')
|
requirements_content = self.extract_content_from_result(requirements_content_raw, 'requirements.txt')
|
||||||
persist_file(requirements_content, os.path.join(EXECUTOR_FOLDER_v1, '../requirements.txt'))
|
persist_file(requirements_content, requirements_path)
|
||||||
|
|
||||||
print_colored('', '############# Dockerfile #############', 'red')
|
print_colored('', '############# Dockerfile #############', 'red')
|
||||||
user_query = (
|
user_query = (
|
||||||
general_guidelines()
|
general_guidelines()
|
||||||
+ self.wrap_content_in_code_block(executor_content, 'executor.py', 'python')
|
+ self.wrap_content_in_code_block(executor_content, 'executor.py', 'python')
|
||||||
+ self.wrap_content_in_code_block(test_executor_content, 'test_executor.py', 'python')
|
+ self.wrap_content_in_code_block(test_executor_content, 'test_executor.py', 'python')
|
||||||
+ self.wrap_content_in_code_block(requirements_content, '../requirements.txt', '')
|
+ self.wrap_content_in_code_block(requirements_content, 'requirements.txt', '')
|
||||||
+ docker_file_task()
|
+ docker_file_task()
|
||||||
)
|
)
|
||||||
conversation = self.gpt_session.get_conversation()
|
conversation = self.gpt_session.get_conversation()
|
||||||
@@ -139,6 +137,7 @@ class ExecutorFactory:
|
|||||||
persist_file(dockerfile_content, os.path.join(EXECUTOR_FOLDER_v1, 'Dockerfile'))
|
persist_file(dockerfile_content, os.path.join(EXECUTOR_FOLDER_v1, 'Dockerfile'))
|
||||||
|
|
||||||
self.write_config_yml(executor_name, EXECUTOR_FOLDER_v1)
|
self.write_config_yml(executor_name, EXECUTOR_FOLDER_v1)
|
||||||
|
print('First version of the executor created. Start iterating on it to make the tests pass...')
|
||||||
|
|
||||||
def create_playground(self, executor_name, executor_path, host):
|
def create_playground(self, executor_name, executor_path, host):
|
||||||
print_colored('', '############# Playground #############', 'red')
|
print_colored('', '############# Playground #############', 'red')
|
||||||
@@ -176,6 +175,7 @@ print(response[0].text) # can also be blob in case of image/audio..., this shoul
|
|||||||
MAX_DEBUGGING_ITERATIONS = 10
|
MAX_DEBUGGING_ITERATIONS = 10
|
||||||
error_before = ''
|
error_before = ''
|
||||||
for i in range(1, MAX_DEBUGGING_ITERATIONS):
|
for i in range(1, MAX_DEBUGGING_ITERATIONS):
|
||||||
|
print('Debugging iteration', i)
|
||||||
previous_executor_path = self.get_executor_path(output_path, package, i)
|
previous_executor_path = self.get_executor_path(output_path, package, i)
|
||||||
next_executor_path = self.get_executor_path(output_path, package, i + 1)
|
next_executor_path = self.get_executor_path(output_path, package, i + 1)
|
||||||
log_hubble = push_executor(previous_executor_path)
|
log_hubble = push_executor(previous_executor_path)
|
||||||
@@ -279,31 +279,26 @@ package2,package3,...
|
|||||||
packages = packages[:threads]
|
packages = packages[:threads]
|
||||||
return packages
|
return packages
|
||||||
|
|
||||||
|
|
||||||
def create(self, description, num_approaches, output_path, test):
|
def create(self, description, num_approaches, output_path, test):
|
||||||
generated_name = self.generate_executor_name(description)
|
generated_name = self.generate_executor_name(description)
|
||||||
executor_name = f'{generated_name}{random.randint(0, 1000_000)}'
|
executor_name = f'{generated_name}{random.randint(0, 1000_000)}'
|
||||||
packages_list = self.get_possible_packages(description, num_approaches)
|
packages_list = self.get_possible_packages(description, num_approaches)
|
||||||
recreate_folder(output_path)
|
recreate_folder(output_path)
|
||||||
# packages_list = [['a']]
|
|
||||||
# executor_name = 'ColorPaletteGeneratorExecutor5946'
|
|
||||||
# executor_path = '/Users/florianhonicke/jina/gptdeploy/executor/colorsys_colorharmony/v5'
|
|
||||||
# host = 'grpcs://gptdeploy-5f6ea44fc8.wolf.jina.ai'
|
|
||||||
for packages in packages_list:
|
for packages in packages_list:
|
||||||
try:
|
try:
|
||||||
self.create_executor(description, test, output_path, executor_name, packages)
|
self.create_executor(description, test, output_path, executor_name, packages)
|
||||||
executor_path = self.debug_executor(output_path, packages, description, test)
|
executor_path = self.debug_executor(output_path, packages, description, test)
|
||||||
print('Deploy a jina flow')
|
|
||||||
host = jina_cloud.deploy_flow(executor_name, executor_path)
|
host = jina_cloud.deploy_flow(executor_name, executor_path)
|
||||||
print(f'Flow is deployed create the playground for {host}')
|
|
||||||
self.create_playground(executor_name, executor_path, host)
|
self.create_playground(executor_name, executor_path, host)
|
||||||
except self.MaxDebugTimeReachedException:
|
except self.MaxDebugTimeReachedException:
|
||||||
print('Could not debug the executor.')
|
print('Could not debug the executor.')
|
||||||
continue
|
continue
|
||||||
print(
|
print(f'''
|
||||||
'Executor name:', executor_name, '\n',
|
Executor name: {executor_name}
|
||||||
'Executor path:', executor_path, '\n',
|
Executor path: {executor_path}
|
||||||
'Host:', host, '\n',
|
Host: {host}
|
||||||
'Playground:', f'streamlit run {os.path.join(executor_path, "app.py")}', '\n',
|
|
||||||
|
Playground: streamlit run {os.path.join(executor_path, "app.py")}
|
||||||
|
'''
|
||||||
)
|
)
|
||||||
break
|
break
|
||||||
|
|||||||
@@ -54,9 +54,10 @@ class GPTSession:
|
|||||||
print('\n')
|
print('\n')
|
||||||
money_prompt = round(self.chars_prompt_so_far / 3.4 * self.pricing_prompt / 1000, 2)
|
money_prompt = round(self.chars_prompt_so_far / 3.4 * self.pricing_prompt / 1000, 2)
|
||||||
money_generation = round(self.chars_generation_so_far / 3.4 * self.pricing_generation / 1000, 2)
|
money_generation = round(self.chars_generation_so_far / 3.4 * self.pricing_generation / 1000, 2)
|
||||||
print('money prompt:', f'${money_prompt}')
|
print('Estimated costs on openai.com:')
|
||||||
print('money generation:', f'${money_generation}')
|
# print('money prompt:', f'${money_prompt}')
|
||||||
print('total money:', f'${money_prompt + money_generation}')
|
# print('money generation:', f'${money_generation}')
|
||||||
|
print('total money so far:', f'${money_prompt + money_generation}')
|
||||||
print('\n')
|
print('\n')
|
||||||
|
|
||||||
def get_conversation(self):
|
def get_conversation(self):
|
||||||
|
|||||||
@@ -80,6 +80,7 @@ def deploy_on_jcloud(flow_yaml):
|
|||||||
|
|
||||||
|
|
||||||
def deploy_flow(executor_name, dest_folder):
|
def deploy_flow(executor_name, dest_folder):
|
||||||
|
print('Deploy a jina flow')
|
||||||
flow = f'''
|
flow = f'''
|
||||||
jtype: Flow
|
jtype: Flow
|
||||||
with:
|
with:
|
||||||
@@ -106,12 +107,9 @@ executors:
|
|||||||
with open(full_flow_path, 'w') as f:
|
with open(full_flow_path, 'w') as f:
|
||||||
f.write(flow)
|
f.write(flow)
|
||||||
|
|
||||||
# print('try local execution')
|
host = deploy_on_jcloud(flow_yaml=full_flow_path)
|
||||||
# flow = Flow.load_config(full_flow_path)
|
print(f'Flow is deployed create the playground for {host}')
|
||||||
# with flow:
|
return host
|
||||||
# pass
|
|
||||||
print('deploy flow on jcloud')
|
|
||||||
return deploy_on_jcloud(flow_yaml=full_flow_path)
|
|
||||||
|
|
||||||
|
|
||||||
def replace_client_line(file_content: str, replacement: str) -> str:
|
def replace_client_line(file_content: str, replacement: str) -> str:
|
||||||
|
|||||||
@@ -93,7 +93,7 @@ def set_api_key(key):
|
|||||||
if system_platform == "windows":
|
if system_platform == "windows":
|
||||||
set_env_variable_command = f'setx OPENAI_API_KEY "{key}"'
|
set_env_variable_command = f'setx OPENAI_API_KEY "{key}"'
|
||||||
subprocess.call(set_env_variable_command, shell=True)
|
subprocess.call(set_env_variable_command, shell=True)
|
||||||
click.echo("OPENAI_API_KEY has been set.")
|
click.echo("✅ Success, OPENAI_API_KEY has been set.\nPlease restart your Command Prompt to apply the changes.")
|
||||||
elif system_platform in ["linux", "darwin"]:
|
elif system_platform in ["linux", "darwin"]:
|
||||||
if "OPENAI_API_KEY" in os.environ or is_key_set_in_config_file(key):
|
if "OPENAI_API_KEY" in os.environ or is_key_set_in_config_file(key):
|
||||||
if not click.confirm("OPENAI_API_KEY is already set. Do you want to overwrite it?"):
|
if not click.confirm("OPENAI_API_KEY is already set. Do you want to overwrite it?"):
|
||||||
|
|||||||
Reference in New Issue
Block a user