diff --git a/README.md b/README.md index e4c9e45..a235b95 100644 --- a/README.md +++ b/README.md @@ -136,6 +136,12 @@ gptdeploy create --description "Given a word, return a list of rhyming words usi ``` Rhyme Generator +### Word Cloud Generator +```bash +gptdeploy create --description "Generate a word cloud from a given text" --test "Lorem ipsum dolor sit amet, consectetur adipiscing elit." +``` +Word Cloud Generator + ### 3d model info ```bash gptdeploy create --description "Given a 3d object, return vertex count and face count" --test "https://raw.githubusercontent.com/polygonjs/polygonjs-assets/master/models/wolf.obj" @@ -336,13 +342,6 @@ gptdeploy create --description "Generate QR code from URL" --test "https://www.e [//]: # (```) [//]: # () -[//]: # (### Word Cloud Generator) - -[//]: # (```bash) - -[//]: # (gptdeploy create --description "Generate a word cloud from a given text" --test "Lorem ipsum dolor sit amet, consectetur adipiscing elit.") - -[//]: # (```) [//]: # () [//]: # (### Mandelbrot Set Visualizer) @@ -426,3 +425,5 @@ Make sure it is only printed twice in case it changed. - [ ] use gptdeploy list to show all deployments - [ ] gptdeploy delete to delete a deployment - [ ] gptdeploy update to update a deployment +- [ ] if the user runs gptdeploy without any arguments, show the help message +- [ ] start streamlit playground automatically after the deployment diff --git a/res/word_cloud_example.png b/res/word_cloud_example.png new file mode 100644 index 0000000..4a29448 Binary files /dev/null and b/res/word_cloud_example.png differ diff --git a/setup.py b/setup.py index 3ee09a4..bd582fb 100644 --- a/setup.py +++ b/setup.py @@ -7,7 +7,7 @@ def read_requirements(): setup( name='gptdeploy', - version='0.18.9', + version='0.18.10', description='Use natural language interface to create, deploy and update your microservice infrastructure.', long_description=open('README.md', 'r', encoding='utf-8').read(), long_description_content_type='text/markdown', diff --git a/src/__init__.py b/src/__init__.py index 0f5bf97..ea2001b 100644 --- a/src/__init__.py +++ b/src/__init__.py @@ -1,2 +1,2 @@ -__version__ = '0.18.9' +__version__ = '0.18.10' from src.cli import main \ No newline at end of file diff --git a/src/executor_factory.py b/src/executor_factory.py index 51d0c37..c7b0a41 100644 --- a/src/executor_factory.py +++ b/src/executor_factory.py @@ -1,17 +1,15 @@ +import os import random +import re from src import gpt, jina_cloud +from src.constants import FILE_AND_TAG_PAIRS from src.jina_cloud import push_executor, process_error_message from src.prompt_tasks import general_guidelines, executor_file_task, chain_of_thought_creation, test_executor_file_task, \ chain_of_thought_optimization, requirements_file_task, docker_file_task, not_allowed from src.utils.io import recreate_folder, persist_file from src.utils.string_tools import print_colored -import os -import re - -from src.constants import FILE_AND_TAG_PAIRS - class ExecutorFactory: def __init__(self): @@ -72,7 +70,6 @@ class ExecutorFactory: ): EXECUTOR_FOLDER_v1 = self.get_executor_path(output_path, package, 1) recreate_folder(EXECUTOR_FOLDER_v1) - recreate_folder('../flow') print_colored('', '############# Executor #############', 'red') user_query = ( @@ -107,6 +104,7 @@ class ExecutorFactory: persist_file(test_executor_content, os.path.join(EXECUTOR_FOLDER_v1, 'test_executor.py')) print_colored('', '############# Requirements #############', 'red') + requirements_path = os.path.join(EXECUTOR_FOLDER_v1, 'requirements.txt') user_query = ( general_guidelines() + self.wrap_content_in_code_block(executor_content, 'executor.py', 'python') @@ -117,17 +115,17 @@ class ExecutorFactory: requirements_content_raw = conversation.query(user_query) if is_chain_of_thought: requirements_content_raw = conversation.query( - chain_of_thought_optimization('', '../requirements.txt') + "Keep the same version of jina ") + chain_of_thought_optimization('', requirements_path) + "Keep the same version of jina ") - requirements_content = self.extract_content_from_result(requirements_content_raw, '../requirements.txt') - persist_file(requirements_content, os.path.join(EXECUTOR_FOLDER_v1, '../requirements.txt')) + requirements_content = self.extract_content_from_result(requirements_content_raw, 'requirements.txt') + persist_file(requirements_content, requirements_path) print_colored('', '############# Dockerfile #############', 'red') user_query = ( general_guidelines() + self.wrap_content_in_code_block(executor_content, 'executor.py', 'python') + self.wrap_content_in_code_block(test_executor_content, 'test_executor.py', 'python') - + self.wrap_content_in_code_block(requirements_content, '../requirements.txt', '') + + self.wrap_content_in_code_block(requirements_content, 'requirements.txt', '') + docker_file_task() ) conversation = self.gpt_session.get_conversation() @@ -139,6 +137,7 @@ class ExecutorFactory: persist_file(dockerfile_content, os.path.join(EXECUTOR_FOLDER_v1, 'Dockerfile')) self.write_config_yml(executor_name, EXECUTOR_FOLDER_v1) + print('First version of the executor created. Start iterating on it to make the tests pass...') def create_playground(self, executor_name, executor_path, host): print_colored('', '############# Playground #############', 'red') @@ -176,6 +175,7 @@ print(response[0].text) # can also be blob in case of image/audio..., this shoul MAX_DEBUGGING_ITERATIONS = 10 error_before = '' for i in range(1, MAX_DEBUGGING_ITERATIONS): + print('Debugging iteration', i) previous_executor_path = self.get_executor_path(output_path, package, i) next_executor_path = self.get_executor_path(output_path, package, i + 1) log_hubble = push_executor(previous_executor_path) @@ -279,31 +279,26 @@ package2,package3,... packages = packages[:threads] return packages - def create(self, description, num_approaches, output_path, test): generated_name = self.generate_executor_name(description) executor_name = f'{generated_name}{random.randint(0, 1000_000)}' packages_list = self.get_possible_packages(description, num_approaches) recreate_folder(output_path) - # packages_list = [['a']] - # executor_name = 'ColorPaletteGeneratorExecutor5946' - # executor_path = '/Users/florianhonicke/jina/gptdeploy/executor/colorsys_colorharmony/v5' - # host = 'grpcs://gptdeploy-5f6ea44fc8.wolf.jina.ai' for packages in packages_list: try: self.create_executor(description, test, output_path, executor_name, packages) executor_path = self.debug_executor(output_path, packages, description, test) - print('Deploy a jina flow') host = jina_cloud.deploy_flow(executor_name, executor_path) - print(f'Flow is deployed create the playground for {host}') self.create_playground(executor_name, executor_path, host) except self.MaxDebugTimeReachedException: print('Could not debug the executor.') continue - print( - 'Executor name:', executor_name, '\n', - 'Executor path:', executor_path, '\n', - 'Host:', host, '\n', - 'Playground:', f'streamlit run {os.path.join(executor_path, "app.py")}', '\n', - ) + print(f''' +Executor name: {executor_name} +Executor path: {executor_path} +Host: {host} + +Playground: streamlit run {os.path.join(executor_path, "app.py")} +''' + ) break diff --git a/src/gpt.py b/src/gpt.py index f5cf175..adc299c 100644 --- a/src/gpt.py +++ b/src/gpt.py @@ -54,9 +54,10 @@ class GPTSession: print('\n') money_prompt = round(self.chars_prompt_so_far / 3.4 * self.pricing_prompt / 1000, 2) money_generation = round(self.chars_generation_so_far / 3.4 * self.pricing_generation / 1000, 2) - print('money prompt:', f'${money_prompt}') - print('money generation:', f'${money_generation}') - print('total money:', f'${money_prompt + money_generation}') + print('Estimated costs on openai.com:') + # print('money prompt:', f'${money_prompt}') + # print('money generation:', f'${money_generation}') + print('total money so far:', f'${money_prompt + money_generation}') print('\n') def get_conversation(self): diff --git a/src/jina_cloud.py b/src/jina_cloud.py index ac43193..8d53d07 100644 --- a/src/jina_cloud.py +++ b/src/jina_cloud.py @@ -80,6 +80,7 @@ def deploy_on_jcloud(flow_yaml): def deploy_flow(executor_name, dest_folder): + print('Deploy a jina flow') flow = f''' jtype: Flow with: @@ -106,12 +107,9 @@ executors: with open(full_flow_path, 'w') as f: f.write(flow) - # print('try local execution') - # flow = Flow.load_config(full_flow_path) - # with flow: - # pass - print('deploy flow on jcloud') - return deploy_on_jcloud(flow_yaml=full_flow_path) + host = deploy_on_jcloud(flow_yaml=full_flow_path) + print(f'Flow is deployed create the playground for {host}') + return host def replace_client_line(file_content: str, replacement: str) -> str: diff --git a/src/key_handling.py b/src/key_handling.py index 6e968ad..999432c 100644 --- a/src/key_handling.py +++ b/src/key_handling.py @@ -93,7 +93,7 @@ def set_api_key(key): if system_platform == "windows": set_env_variable_command = f'setx OPENAI_API_KEY "{key}"' subprocess.call(set_env_variable_command, shell=True) - click.echo("OPENAI_API_KEY has been set.") + click.echo("✅ Success, OPENAI_API_KEY has been set.\nPlease restart your Command Prompt to apply the changes.") elif system_platform in ["linux", "darwin"]: if "OPENAI_API_KEY" in os.environ or is_key_set_in_config_file(key): if not click.confirm("OPENAI_API_KEY is already set. Do you want to overwrite it?"):