mirror of
https://github.com/aljazceru/dev-gpt.git
synced 2025-12-20 15:14:20 +01:00
fix: package
This commit is contained in:
19
README.md
19
README.md
@@ -43,7 +43,7 @@ gptdeploy configure --key <your openai api key>
|
|||||||
```
|
```
|
||||||
If you set the environment variable `OPENAI_API_KEY`, the configuration step can be skipped.
|
If you set the environment variable `OPENAI_API_KEY`, the configuration step can be skipped.
|
||||||
|
|
||||||
### run
|
### Create Microservice
|
||||||
```bash
|
```bash
|
||||||
gptdeploy create --description "Given a PDF, return it's text" --test "https://www.africau.edu/images/default/sample.pdf"
|
gptdeploy create --description "Given a PDF, return it's text" --test "https://www.africau.edu/images/default/sample.pdf"
|
||||||
```
|
```
|
||||||
@@ -52,8 +52,18 @@ To create your personal microservice two things are required:
|
|||||||
- A `test` scenario that ensures the microservice works as expected.
|
- A `test` scenario that ensures the microservice works as expected.
|
||||||
|
|
||||||
The creation process should take between 5 and 15 minutes.
|
The creation process should take between 5 and 15 minutes.
|
||||||
During this time, GPT iteratively builds your microservice until it finds a strategy that make you test scenario pass.
|
During this time, GPT iteratively builds your microservice until it finds a strategy that make your test scenario pass.
|
||||||
Once the microservice is created and deployed, you can test it using the generated Streamlit playground.
|
Once the microservice is created and deployed, you can test it using the generated Streamlit playground.
|
||||||
|
The deployment is made on the Jina`s infrastructure.
|
||||||
|
When creating a Jina account, you get some free credits, which you can use to deploy your microservice ($0.025/hour).
|
||||||
|
If you run out of credits, you can purchase more.
|
||||||
|
|
||||||
|
### Delete Microservice
|
||||||
|
To save credits you can delete your microservice via the following commands:
|
||||||
|
```bash
|
||||||
|
jc list # get the microservice id
|
||||||
|
jc delete <microservice id>
|
||||||
|
```
|
||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
The graphic below illustrates the process of creating a microservice and deploying it to the cloud.
|
The graphic below illustrates the process of creating a microservice and deploying it to the cloud.
|
||||||
@@ -368,7 +378,7 @@ If you want to contribute to this project, feel free to open a PR or an issue.
|
|||||||
In the following, you can find a list of things that need to be done.
|
In the following, you can find a list of things that need to be done.
|
||||||
|
|
||||||
Critical:
|
Critical:
|
||||||
- [ ] fix problem with package installation
|
- [x] fix problem with key setup
|
||||||
- [ ] add instruction about cleanup of deployments
|
- [ ] add instruction about cleanup of deployments
|
||||||
|
|
||||||
Nice to have:
|
Nice to have:
|
||||||
@@ -391,3 +401,6 @@ Make sure it is only printed twice in case it changed.
|
|||||||
- [ ] feat: make playground more stylish by adding attributes like: clean design, beautiful, like it was made by a professional designer, ...
|
- [ ] feat: make playground more stylish by adding attributes like: clean design, beautiful, like it was made by a professional designer, ...
|
||||||
- [ ] support for other large language models like ChatGLM
|
- [ ] support for other large language models like ChatGLM
|
||||||
- [ ] for cost savings, it should be possible to insert less context during the code generation of the main functionality - no jina knowledge is required
|
- [ ] for cost savings, it should be possible to insert less context during the code generation of the main functionality - no jina knowledge is required
|
||||||
|
- [ ] use gptdeploy list to show all deployments
|
||||||
|
- [ ] gptdeploy delete to delete a deployment
|
||||||
|
- [ ] gptdeploy update to update a deployment
|
||||||
|
|||||||
2
setup.py
2
setup.py
@@ -7,7 +7,7 @@ def read_requirements():
|
|||||||
|
|
||||||
setup(
|
setup(
|
||||||
name='gptdeploy',
|
name='gptdeploy',
|
||||||
version='0.18.4',
|
version='0.18.5',
|
||||||
description='Use natural language interface to create, deploy and update your microservice infrastructure.',
|
description='Use natural language interface to create, deploy and update your microservice infrastructure.',
|
||||||
long_description=open('README.md', 'r', encoding='utf-8').read(),
|
long_description=open('README.md', 'r', encoding='utf-8').read(),
|
||||||
long_description_content_type='text/markdown',
|
long_description_content_type='text/markdown',
|
||||||
|
|||||||
@@ -1,2 +1,2 @@
|
|||||||
__version__ = '0.18.4'
|
__version__ = '0.18.5'
|
||||||
from src.main import main
|
from src.cli import main
|
||||||
36
src/cli.py
Normal file
36
src/cli.py
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
import click
|
||||||
|
|
||||||
|
from src.executor_factory import ExecutorFactory
|
||||||
|
from src.jina_cloud import jina_auth_login
|
||||||
|
from src.key_handling import set_api_key
|
||||||
|
|
||||||
|
|
||||||
|
@click.group(invoke_without_command=True)
|
||||||
|
def main():
|
||||||
|
jina_auth_login()
|
||||||
|
|
||||||
|
|
||||||
|
@main.command()
|
||||||
|
@click.option('--description', required=True, help='Description of the executor.')
|
||||||
|
@click.option('--test', required=True, help='Test scenario for the executor.')
|
||||||
|
@click.option('--num_approaches', default=3, type=int,
|
||||||
|
help='Number of num_approaches to use to fulfill the task (default: 3).')
|
||||||
|
@click.option('--output_path', default='executor', help='Path to the output folder (must be empty). ')
|
||||||
|
def create(
|
||||||
|
description,
|
||||||
|
test,
|
||||||
|
num_approaches=3,
|
||||||
|
output_path='executor',
|
||||||
|
):
|
||||||
|
executor_factory = ExecutorFactory()
|
||||||
|
executor_factory.create(description, num_approaches, output_path, test)
|
||||||
|
|
||||||
|
|
||||||
|
@main.command()
|
||||||
|
@click.option('--key', required=True, help='Your OpenAI API key.')
|
||||||
|
def configure(key):
|
||||||
|
set_api_key(key)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
309
src/executor_factory.py
Normal file
309
src/executor_factory.py
Normal file
@@ -0,0 +1,309 @@
|
|||||||
|
import random
|
||||||
|
|
||||||
|
from src import gpt, jina_cloud
|
||||||
|
from src.jina_cloud import push_executor, process_error_message
|
||||||
|
from src.prompt_tasks import general_guidelines, executor_file_task, chain_of_thought_creation, test_executor_file_task, \
|
||||||
|
chain_of_thought_optimization, requirements_file_task, docker_file_task, not_allowed
|
||||||
|
from src.utils.io import recreate_folder, persist_file
|
||||||
|
from src.utils.string_tools import print_colored
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
|
||||||
|
from src.constants import FILE_AND_TAG_PAIRS
|
||||||
|
|
||||||
|
|
||||||
|
class ExecutorFactory:
|
||||||
|
def __init__(self):
|
||||||
|
self.gpt_session = gpt.GPTSession()
|
||||||
|
|
||||||
|
def extract_content_from_result(self, plain_text, file_name):
|
||||||
|
pattern = fr"^\*\*{file_name}\*\*\n```(?:\w+\n)?([\s\S]*?)```"
|
||||||
|
match = re.search(pattern, plain_text, re.MULTILINE)
|
||||||
|
if match:
|
||||||
|
return match.group(1).strip()
|
||||||
|
else:
|
||||||
|
return ''
|
||||||
|
|
||||||
|
def write_config_yml(self, executor_name, dest_folder):
|
||||||
|
config_content = f'''
|
||||||
|
jtype: {executor_name}
|
||||||
|
py_modules:
|
||||||
|
- executor.py
|
||||||
|
metas:
|
||||||
|
name: {executor_name}
|
||||||
|
'''
|
||||||
|
with open(os.path.join(dest_folder, 'config.yml'), 'w') as f:
|
||||||
|
f.write(config_content)
|
||||||
|
|
||||||
|
def get_all_executor_files_with_content(self, folder_path):
|
||||||
|
file_name_to_content = {}
|
||||||
|
for filename in os.listdir(folder_path):
|
||||||
|
file_path = os.path.join(folder_path, filename)
|
||||||
|
|
||||||
|
if os.path.isfile(file_path):
|
||||||
|
with open(file_path, 'r', encoding='utf-8') as file:
|
||||||
|
content = file.read()
|
||||||
|
file_name_to_content[filename] = content
|
||||||
|
|
||||||
|
return file_name_to_content
|
||||||
|
|
||||||
|
def files_to_string(self, file_name_to_content):
|
||||||
|
all_executor_files_string = ''
|
||||||
|
for file_name, tag in FILE_AND_TAG_PAIRS:
|
||||||
|
if file_name in file_name_to_content:
|
||||||
|
all_executor_files_string += f'**{file_name}**\n'
|
||||||
|
all_executor_files_string += f'```{tag}\n'
|
||||||
|
all_executor_files_string += file_name_to_content[file_name]
|
||||||
|
all_executor_files_string += '\n```\n\n'
|
||||||
|
return all_executor_files_string
|
||||||
|
|
||||||
|
def wrap_content_in_code_block(self, executor_content, file_name, tag):
|
||||||
|
return f'**{file_name}**\n```{tag}\n{executor_content}\n```\n\n'
|
||||||
|
|
||||||
|
def create_executor(
|
||||||
|
self,
|
||||||
|
description,
|
||||||
|
test,
|
||||||
|
output_path,
|
||||||
|
executor_name,
|
||||||
|
package,
|
||||||
|
is_chain_of_thought=False,
|
||||||
|
):
|
||||||
|
EXECUTOR_FOLDER_v1 = self.get_executor_path(output_path, package, 1)
|
||||||
|
recreate_folder(EXECUTOR_FOLDER_v1)
|
||||||
|
recreate_folder('../flow')
|
||||||
|
|
||||||
|
print_colored('', '############# Executor #############', 'red')
|
||||||
|
user_query = (
|
||||||
|
general_guidelines()
|
||||||
|
+ executor_file_task(executor_name, description, test, package)
|
||||||
|
+ chain_of_thought_creation()
|
||||||
|
)
|
||||||
|
conversation = self.gpt_session.get_conversation()
|
||||||
|
executor_content_raw = conversation.query(user_query)
|
||||||
|
if is_chain_of_thought:
|
||||||
|
executor_content_raw = conversation.query(
|
||||||
|
f"General rules: " + not_allowed() + chain_of_thought_optimization('python', 'executor.py'))
|
||||||
|
executor_content = self.extract_content_from_result(executor_content_raw, 'executor.py')
|
||||||
|
|
||||||
|
persist_file(executor_content, os.path.join(EXECUTOR_FOLDER_v1, 'executor.py'))
|
||||||
|
|
||||||
|
print_colored('', '############# Test Executor #############', 'red')
|
||||||
|
user_query = (
|
||||||
|
general_guidelines()
|
||||||
|
+ self.wrap_content_in_code_block(executor_content, 'executor.py', 'python')
|
||||||
|
+ test_executor_file_task(executor_name, test)
|
||||||
|
)
|
||||||
|
conversation = self.gpt_session.get_conversation()
|
||||||
|
test_executor_content_raw = conversation.query(user_query)
|
||||||
|
if is_chain_of_thought:
|
||||||
|
test_executor_content_raw = conversation.query(
|
||||||
|
f"General rules: " + not_allowed() +
|
||||||
|
chain_of_thought_optimization('python', 'test_executor.py')
|
||||||
|
+ "Don't add any additional tests. "
|
||||||
|
)
|
||||||
|
test_executor_content = self.extract_content_from_result(test_executor_content_raw, 'test_executor.py')
|
||||||
|
persist_file(test_executor_content, os.path.join(EXECUTOR_FOLDER_v1, 'test_executor.py'))
|
||||||
|
|
||||||
|
print_colored('', '############# Requirements #############', 'red')
|
||||||
|
user_query = (
|
||||||
|
general_guidelines()
|
||||||
|
+ self.wrap_content_in_code_block(executor_content, 'executor.py', 'python')
|
||||||
|
+ self.wrap_content_in_code_block(test_executor_content, 'test_executor.py', 'python')
|
||||||
|
+ requirements_file_task()
|
||||||
|
)
|
||||||
|
conversation = self.gpt_session.get_conversation()
|
||||||
|
requirements_content_raw = conversation.query(user_query)
|
||||||
|
if is_chain_of_thought:
|
||||||
|
requirements_content_raw = conversation.query(
|
||||||
|
chain_of_thought_optimization('', '../requirements.txt') + "Keep the same version of jina ")
|
||||||
|
|
||||||
|
requirements_content = self.extract_content_from_result(requirements_content_raw, '../requirements.txt')
|
||||||
|
persist_file(requirements_content, os.path.join(EXECUTOR_FOLDER_v1, '../requirements.txt'))
|
||||||
|
|
||||||
|
print_colored('', '############# Dockerfile #############', 'red')
|
||||||
|
user_query = (
|
||||||
|
general_guidelines()
|
||||||
|
+ self.wrap_content_in_code_block(executor_content, 'executor.py', 'python')
|
||||||
|
+ self.wrap_content_in_code_block(test_executor_content, 'test_executor.py', 'python')
|
||||||
|
+ self.wrap_content_in_code_block(requirements_content, '../requirements.txt', '')
|
||||||
|
+ docker_file_task()
|
||||||
|
)
|
||||||
|
conversation = self.gpt_session.get_conversation()
|
||||||
|
dockerfile_content_raw = conversation.query(user_query)
|
||||||
|
if is_chain_of_thought:
|
||||||
|
dockerfile_content_raw = conversation.query(
|
||||||
|
f"General rules: " + not_allowed() + chain_of_thought_optimization('dockerfile', 'Dockerfile'))
|
||||||
|
dockerfile_content = self.extract_content_from_result(dockerfile_content_raw, 'Dockerfile')
|
||||||
|
persist_file(dockerfile_content, os.path.join(EXECUTOR_FOLDER_v1, 'Dockerfile'))
|
||||||
|
|
||||||
|
self.write_config_yml(executor_name, EXECUTOR_FOLDER_v1)
|
||||||
|
|
||||||
|
def create_playground(self, executor_name, executor_path, host):
|
||||||
|
print_colored('', '############# Playground #############', 'red')
|
||||||
|
|
||||||
|
file_name_to_content = self.get_all_executor_files_with_content(executor_path)
|
||||||
|
user_query = (
|
||||||
|
general_guidelines()
|
||||||
|
+ self.wrap_content_in_code_block(file_name_to_content['executor.py'], 'executor.py', 'python')
|
||||||
|
+ self.wrap_content_in_code_block(file_name_to_content['test_executor.py'], 'test_executor.py',
|
||||||
|
'python')
|
||||||
|
+ f'''
|
||||||
|
Create a playground for the executor {executor_name} using streamlit.
|
||||||
|
The playground must look like it was made by a professional designer.
|
||||||
|
All the ui elements are well thought out to make them visually appealing and easy to use.
|
||||||
|
The executor is hosted on {host}.
|
||||||
|
This is an example how you can connect to the executor assuming the document (d) is already defined:
|
||||||
|
from jina import Client, Document, DocumentArray
|
||||||
|
client = Client(host='{host}')
|
||||||
|
response = client.post('/', inputs=DocumentArray([d])) # always use '/'
|
||||||
|
print(response[0].text) # can also be blob in case of image/audio..., this should be visualized in the streamlit app
|
||||||
|
'''
|
||||||
|
)
|
||||||
|
conversation = self.gpt_session.get_conversation()
|
||||||
|
conversation.query(user_query)
|
||||||
|
playground_content_raw = conversation.query(
|
||||||
|
f"General rules: " + not_allowed() + chain_of_thought_optimization('python', 'app.py'))
|
||||||
|
playground_content = self.extract_content_from_result(playground_content_raw, 'app.py')
|
||||||
|
persist_file(playground_content, os.path.join(executor_path, 'app.py'))
|
||||||
|
|
||||||
|
def get_executor_path(self, output_path, package, version):
|
||||||
|
package_path = '_'.join(package)
|
||||||
|
return os.path.join(output_path, package_path, f'v{version}')
|
||||||
|
|
||||||
|
def debug_executor(self, output_path, package, description, test):
|
||||||
|
MAX_DEBUGGING_ITERATIONS = 10
|
||||||
|
error_before = ''
|
||||||
|
for i in range(1, MAX_DEBUGGING_ITERATIONS):
|
||||||
|
previous_executor_path = self.get_executor_path(output_path, package, i)
|
||||||
|
next_executor_path = self.get_executor_path(output_path, package, i + 1)
|
||||||
|
log_hubble = push_executor(previous_executor_path)
|
||||||
|
error = process_error_message(log_hubble)
|
||||||
|
if error:
|
||||||
|
recreate_folder(next_executor_path)
|
||||||
|
file_name_to_content = self.get_all_executor_files_with_content(previous_executor_path)
|
||||||
|
all_files_string = self.files_to_string(file_name_to_content)
|
||||||
|
user_query = (
|
||||||
|
f"General rules: " + not_allowed()
|
||||||
|
+ 'Here is the description of the task the executor must solve:\n'
|
||||||
|
+ description
|
||||||
|
+ '\n\nHere is the test scenario the executor must pass:\n'
|
||||||
|
+ test
|
||||||
|
+ 'Here are all the files I use:\n'
|
||||||
|
+ all_files_string
|
||||||
|
+ (('This is an error that is already fixed before:\n'
|
||||||
|
+ error_before) if error_before else '')
|
||||||
|
+ '\n\nNow, I get the following error:\n'
|
||||||
|
+ error + '\n'
|
||||||
|
+ 'Think quickly about possible reasons. '
|
||||||
|
'Then output the files that need change. '
|
||||||
|
"Don't output files that don't need change. "
|
||||||
|
"If you output a file, then write the complete file. "
|
||||||
|
"Use the exact same syntax to wrap the code:\n"
|
||||||
|
f"**...**\n"
|
||||||
|
f"```...\n"
|
||||||
|
f"...code...\n"
|
||||||
|
f"```\n\n"
|
||||||
|
)
|
||||||
|
conversation = self.gpt_session.get_conversation()
|
||||||
|
returned_files_raw = conversation.query(user_query)
|
||||||
|
for file_name, tag in FILE_AND_TAG_PAIRS:
|
||||||
|
updated_file = self.extract_content_from_result(returned_files_raw, file_name)
|
||||||
|
if updated_file:
|
||||||
|
file_name_to_content[file_name] = updated_file
|
||||||
|
|
||||||
|
for file_name, content in file_name_to_content.items():
|
||||||
|
persist_file(content, os.path.join(next_executor_path, file_name))
|
||||||
|
error_before = error
|
||||||
|
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
if i == MAX_DEBUGGING_ITERATIONS - 1:
|
||||||
|
raise self.MaxDebugTimeReachedException('Could not debug the executor.')
|
||||||
|
return self.get_executor_path(output_path, package, i)
|
||||||
|
|
||||||
|
class MaxDebugTimeReachedException(BaseException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def generate_executor_name(self, description):
|
||||||
|
conversation = self.gpt_session.get_conversation()
|
||||||
|
user_query = f'''
|
||||||
|
Generate a name for the executor matching the description:
|
||||||
|
"{description}"
|
||||||
|
The executor name must fulfill the following criteria:
|
||||||
|
- camel case
|
||||||
|
- start with a capital letter
|
||||||
|
- only consists of lower and upper case characters
|
||||||
|
- end with Executor.
|
||||||
|
|
||||||
|
The output is a the raw string wrapped into ``` and starting with **name.txt** like this:
|
||||||
|
**name.txt**
|
||||||
|
```
|
||||||
|
PDFParserExecutor
|
||||||
|
```
|
||||||
|
'''
|
||||||
|
name_raw = conversation.query(user_query)
|
||||||
|
name = self.extract_content_from_result(name_raw, 'name.txt')
|
||||||
|
return name
|
||||||
|
|
||||||
|
def get_possible_packages(self, description, threads):
|
||||||
|
print_colored('', '############# What package to use? #############', 'red')
|
||||||
|
user_query = f'''
|
||||||
|
Here is the task description of the problme you need to solve:
|
||||||
|
"{description}"
|
||||||
|
First, write down all the subtasks you need to solve which require python packages.
|
||||||
|
For each subtask:
|
||||||
|
Provide a list of 1 to 3 python packages you could use to solve the subtask. Prefer modern packages.
|
||||||
|
For each package:
|
||||||
|
Write down some non-obvious thoughts about the challenges you might face for the task and give multiple approaches on how you handle them.
|
||||||
|
For example, there might be some packages you must not use because they do not obay the rules:
|
||||||
|
{not_allowed()}
|
||||||
|
Discuss the pros and cons for all of these packages.
|
||||||
|
Create a list of package subsets that you could use to solve the task.
|
||||||
|
The list is sorted in a way that the most promising subset of packages is at the top.
|
||||||
|
The maximum length of the list is 5.
|
||||||
|
|
||||||
|
The output must be a list of lists wrapped into ``` and starting with **packages.csv** like this:
|
||||||
|
**packages.csv**
|
||||||
|
```
|
||||||
|
package1,package2
|
||||||
|
package2,package3,...
|
||||||
|
...
|
||||||
|
```
|
||||||
|
'''
|
||||||
|
conversation = self.gpt_session.get_conversation()
|
||||||
|
packages_raw = conversation.query(user_query)
|
||||||
|
packages_csv_string = self.extract_content_from_result(packages_raw, 'packages.csv')
|
||||||
|
packages = [package.split(',') for package in packages_csv_string.split('\n')]
|
||||||
|
packages = packages[:threads]
|
||||||
|
return packages
|
||||||
|
|
||||||
|
|
||||||
|
def create(self, description, num_approaches, output_path, test):
|
||||||
|
generated_name = self.generate_executor_name(description)
|
||||||
|
executor_name = f'{generated_name}{random.randint(0, 1000_000)}'
|
||||||
|
packages_list = self.get_possible_packages(description, num_approaches)
|
||||||
|
recreate_folder(output_path)
|
||||||
|
# packages_list = [['a']]
|
||||||
|
# executor_name = 'ColorPaletteGeneratorExecutor5946'
|
||||||
|
# executor_path = '/Users/florianhonicke/jina/gptdeploy/executor/colorsys_colorharmony/v5'
|
||||||
|
# host = 'grpcs://gptdeploy-5f6ea44fc8.wolf.jina.ai'
|
||||||
|
for packages in packages_list:
|
||||||
|
try:
|
||||||
|
self.create_executor(description, test, output_path, executor_name, packages)
|
||||||
|
executor_path = self.debug_executor(output_path, packages, description, test)
|
||||||
|
print('Deploy a jina flow')
|
||||||
|
host = jina_cloud.deploy_flow(executor_name, executor_path)
|
||||||
|
print(f'Flow is deployed create the playground for {host}')
|
||||||
|
self.create_playground(executor_name, executor_path, host)
|
||||||
|
except self.MaxDebugTimeReachedException:
|
||||||
|
print('Could not debug the executor.')
|
||||||
|
continue
|
||||||
|
print(
|
||||||
|
'Executor name:', executor_name, '\n',
|
||||||
|
'Executor path:', executor_path, '\n',
|
||||||
|
'Host:', host, '\n',
|
||||||
|
'Playground:', f'streamlit run {os.path.join(executor_path, "app.py")}', '\n',
|
||||||
|
)
|
||||||
|
break
|
||||||
@@ -1,6 +1,8 @@
|
|||||||
import os
|
import os
|
||||||
import platform
|
import platform
|
||||||
|
import re
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
import click
|
import click
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@@ -13,35 +15,76 @@ def get_shell():
|
|||||||
if psutil is None:
|
if psutil is None:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
shell_names = ["bash", "zsh", "sh", "fish", "csh", "tcsh", "ksh", "dash"]
|
||||||
|
|
||||||
|
# Check the SHELL environment variable first
|
||||||
|
shell_env = os.environ.get('SHELL')
|
||||||
|
if shell_env:
|
||||||
|
shell_name = os.path.basename(shell_env)
|
||||||
|
if shell_name in shell_names:
|
||||||
|
return shell_name
|
||||||
|
|
||||||
|
# Fallback to traversing the process tree
|
||||||
try:
|
try:
|
||||||
p = psutil.Process(os.getpid())
|
p = psutil.Process(os.getpid())
|
||||||
while p.parent() and p.parent().name() != "init":
|
|
||||||
|
# Traverse the process tree
|
||||||
|
while p.parent():
|
||||||
p = p.parent()
|
p = p.parent()
|
||||||
|
if p.name() in shell_names:
|
||||||
return p.name()
|
return p.name()
|
||||||
|
|
||||||
|
return None
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
click.echo(f"Error detecting shell: {e}")
|
click.echo(f"Error detecting shell: {e}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def set_env_variable(shell, key):
|
def get_shell_config(key):
|
||||||
shell_config = {
|
return {
|
||||||
"bash": {"config_file": "~/.bashrc", "export_line": f"export OPENAI_API_KEY={key}"},
|
"bash": {"config_file": "~/.bashrc", "export_line": f"export OPENAI_API_KEY={key}"},
|
||||||
"zsh": {"config_file": "~/.zshrc", "export_line": f"export OPENAI_API_KEY={key}"},
|
"zsh": {"config_file": "~/.zshrc", "export_line": f"export OPENAI_API_KEY={key}"},
|
||||||
|
"sh": {"config_file": "~/.profile", "export_line": f"export OPENAI_API_KEY={key}"},
|
||||||
"fish": {
|
"fish": {
|
||||||
"config_file": "~/.config/fish/config.fish",
|
"config_file": "~/.config/fish/config.fish",
|
||||||
"export_line": f"set -gx OPENAI_API_KEY {key}",
|
"export_line": f"set -gx OPENAI_API_KEY {key}",
|
||||||
},
|
},
|
||||||
|
"csh": {"config_file": "~/.cshrc", "export_line": f"setenv OPENAI_API_KEY {key}"},
|
||||||
|
"tcsh": {"config_file": "~/.tcshrc", "export_line": f"setenv OPENAI_API_KEY {key}"},
|
||||||
|
"ksh": {"config_file": "~/.kshrc", "export_line": f"export OPENAI_API_KEY={key}"},
|
||||||
|
"dash": {"config_file": "~/.profile", "export_line": f"export OPENAI_API_KEY={key}"}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def set_env_variable(shell, key):
|
||||||
|
shell_config = get_shell_config(key)
|
||||||
if shell not in shell_config:
|
if shell not in shell_config:
|
||||||
click.echo("Sorry, your shell is not supported.")
|
click.echo("Sorry, your shell is not supported. Please add the key OPENAI_API_KEY manually.")
|
||||||
return
|
return
|
||||||
|
|
||||||
config_file = os.path.expanduser(shell_config[shell]["config_file"])
|
config_file = os.path.expanduser(shell_config[shell]["config_file"])
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(config_file, "r") as file:
|
||||||
|
content = file.read()
|
||||||
|
|
||||||
|
export_line = shell_config[shell]['export_line']
|
||||||
|
|
||||||
|
# Update the existing API key if it exists, otherwise append it to the config file
|
||||||
|
if f"OPENAI_API_KEY" in content:
|
||||||
|
content = re.sub(r'OPENAI_API_KEY=.*', f'OPENAI_API_KEY={key}', content, flags=re.MULTILINE)
|
||||||
|
|
||||||
|
with open(config_file, "w") as file:
|
||||||
|
file.write(content)
|
||||||
|
else:
|
||||||
with open(config_file, "a") as file:
|
with open(config_file, "a") as file:
|
||||||
file.write(f"\n{shell_config[shell]['export_line']}\n")
|
file.write(f"\n{export_line}\n")
|
||||||
click.echo(f"OPENAI_API_KEY has been set in {config_file}.")
|
|
||||||
|
click.echo(
|
||||||
|
f"✅ Success, OPENAI_API_KEY has been set in {config_file}\nPlease restart your shell to apply the changes.")
|
||||||
|
|
||||||
|
except FileNotFoundError:
|
||||||
|
click.echo(f"Error: {config_file} not found. Please set the environment variable manually.")
|
||||||
|
|
||||||
|
|
||||||
def set_api_key(key):
|
def set_api_key(key):
|
||||||
@@ -52,14 +95,15 @@ def set_api_key(key):
|
|||||||
subprocess.call(set_env_variable_command, shell=True)
|
subprocess.call(set_env_variable_command, shell=True)
|
||||||
click.echo("OPENAI_API_KEY has been set.")
|
click.echo("OPENAI_API_KEY has been set.")
|
||||||
elif system_platform in ["linux", "darwin"]:
|
elif system_platform in ["linux", "darwin"]:
|
||||||
if "OPENAI_API_KEY" in os.environ:
|
if "OPENAI_API_KEY" in os.environ or is_key_set_in_config_file(key):
|
||||||
if not click.confirm("OPENAI_API_KEY is already set. Do you want to overwrite it?"):
|
if not click.confirm("OPENAI_API_KEY is already set. Do you want to overwrite it?"):
|
||||||
click.echo("Aborted.")
|
click.echo("Aborted.")
|
||||||
return
|
return
|
||||||
|
|
||||||
shell = get_shell()
|
shell = get_shell()
|
||||||
if shell is None:
|
if shell is None:
|
||||||
click.echo("Error: Unable to detect your shell or psutil is not available. Please set the environment variable manually.")
|
click.echo(
|
||||||
|
"Error: Unable to detect your shell or psutil is not available. Please set the environment variable manually.")
|
||||||
return
|
return
|
||||||
|
|
||||||
set_env_variable(shell, key)
|
set_env_variable(shell, key)
|
||||||
@@ -67,3 +111,21 @@ def set_api_key(key):
|
|||||||
click.echo("Sorry, this platform is not supported.")
|
click.echo("Sorry, this platform is not supported.")
|
||||||
|
|
||||||
|
|
||||||
|
def is_key_set_in_config_file(key):
|
||||||
|
shell = get_shell()
|
||||||
|
if shell is None:
|
||||||
|
return False
|
||||||
|
|
||||||
|
shell_config = get_shell_config(key)
|
||||||
|
|
||||||
|
config_file = os.path.expanduser(shell_config[shell]["config_file"])
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(config_file, "r") as file:
|
||||||
|
content = file.read()
|
||||||
|
if f"OPENAI_API_KEY" in content:
|
||||||
|
return True
|
||||||
|
except FileNotFoundError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return False
|
||||||
|
|||||||
337
src/main.py
337
src/main.py
@@ -1,337 +0,0 @@
|
|||||||
import random
|
|
||||||
|
|
||||||
import click
|
|
||||||
|
|
||||||
from src import gpt, jina_cloud
|
|
||||||
from src.jina_cloud import push_executor, process_error_message, jina_auth_login
|
|
||||||
from src.key_handling import set_api_key
|
|
||||||
from src.prompt_tasks import general_guidelines, executor_file_task, chain_of_thought_creation, test_executor_file_task, \
|
|
||||||
chain_of_thought_optimization, requirements_file_task, docker_file_task, not_allowed
|
|
||||||
from src.utils.io import recreate_folder, persist_file
|
|
||||||
from src.utils.string_tools import print_colored
|
|
||||||
|
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
|
|
||||||
from src.constants import FILE_AND_TAG_PAIRS
|
|
||||||
|
|
||||||
gpt_session = gpt.GPTSession()
|
|
||||||
|
|
||||||
def extract_content_from_result(plain_text, file_name):
|
|
||||||
pattern = fr"^\*\*{file_name}\*\*\n```(?:\w+\n)?([\s\S]*?)```"
|
|
||||||
match = re.search(pattern, plain_text, re.MULTILINE)
|
|
||||||
if match:
|
|
||||||
return match.group(1).strip()
|
|
||||||
else:
|
|
||||||
return ''
|
|
||||||
|
|
||||||
def write_config_yml(executor_name, dest_folder):
|
|
||||||
config_content = f'''
|
|
||||||
jtype: {executor_name}
|
|
||||||
py_modules:
|
|
||||||
- executor.py
|
|
||||||
metas:
|
|
||||||
name: {executor_name}
|
|
||||||
'''
|
|
||||||
with open(os.path.join(dest_folder, 'config.yml'), 'w') as f:
|
|
||||||
f.write(config_content)
|
|
||||||
|
|
||||||
def get_all_executor_files_with_content(folder_path):
|
|
||||||
file_name_to_content = {}
|
|
||||||
for filename in os.listdir(folder_path):
|
|
||||||
file_path = os.path.join(folder_path, filename)
|
|
||||||
|
|
||||||
if os.path.isfile(file_path):
|
|
||||||
with open(file_path, 'r', encoding='utf-8') as file:
|
|
||||||
content = file.read()
|
|
||||||
file_name_to_content[filename] = content
|
|
||||||
|
|
||||||
return file_name_to_content
|
|
||||||
|
|
||||||
def files_to_string(file_name_to_content):
|
|
||||||
all_executor_files_string = ''
|
|
||||||
for file_name, tag in FILE_AND_TAG_PAIRS:
|
|
||||||
if file_name in file_name_to_content:
|
|
||||||
all_executor_files_string += f'**{file_name}**\n'
|
|
||||||
all_executor_files_string += f'```{tag}\n'
|
|
||||||
all_executor_files_string += file_name_to_content[file_name]
|
|
||||||
all_executor_files_string += '\n```\n\n'
|
|
||||||
return all_executor_files_string
|
|
||||||
|
|
||||||
|
|
||||||
def wrap_content_in_code_block(executor_content, file_name, tag):
|
|
||||||
return f'**{file_name}**\n```{tag}\n{executor_content}\n```\n\n'
|
|
||||||
|
|
||||||
|
|
||||||
def create_executor(
|
|
||||||
description,
|
|
||||||
test,
|
|
||||||
output_path,
|
|
||||||
executor_name,
|
|
||||||
package,
|
|
||||||
is_chain_of_thought=False,
|
|
||||||
):
|
|
||||||
EXECUTOR_FOLDER_v1 = get_executor_path(output_path, package, 1)
|
|
||||||
recreate_folder(EXECUTOR_FOLDER_v1)
|
|
||||||
recreate_folder('../flow')
|
|
||||||
|
|
||||||
print_colored('', '############# Executor #############', 'red')
|
|
||||||
user_query = (
|
|
||||||
general_guidelines()
|
|
||||||
+ executor_file_task(executor_name, description, test, package)
|
|
||||||
+ chain_of_thought_creation()
|
|
||||||
)
|
|
||||||
conversation = gpt_session.get_conversation()
|
|
||||||
executor_content_raw = conversation.query(user_query)
|
|
||||||
if is_chain_of_thought:
|
|
||||||
executor_content_raw = conversation.query(
|
|
||||||
f"General rules: " + not_allowed() + chain_of_thought_optimization('python', 'executor.py'))
|
|
||||||
executor_content = extract_content_from_result(executor_content_raw, 'executor.py')
|
|
||||||
|
|
||||||
persist_file(executor_content, os.path.join(EXECUTOR_FOLDER_v1, 'executor.py'))
|
|
||||||
|
|
||||||
print_colored('', '############# Test Executor #############', 'red')
|
|
||||||
user_query = (
|
|
||||||
general_guidelines()
|
|
||||||
+ wrap_content_in_code_block(executor_content, 'executor.py', 'python')
|
|
||||||
+ test_executor_file_task(executor_name, test)
|
|
||||||
)
|
|
||||||
conversation = gpt_session.get_conversation()
|
|
||||||
test_executor_content_raw = conversation.query(user_query)
|
|
||||||
if is_chain_of_thought:
|
|
||||||
test_executor_content_raw = conversation.query(
|
|
||||||
f"General rules: " + not_allowed() +
|
|
||||||
chain_of_thought_optimization('python', 'test_executor.py')
|
|
||||||
+ "Don't add any additional tests. "
|
|
||||||
)
|
|
||||||
test_executor_content = extract_content_from_result(test_executor_content_raw, 'test_executor.py')
|
|
||||||
persist_file(test_executor_content, os.path.join(EXECUTOR_FOLDER_v1, 'test_executor.py'))
|
|
||||||
|
|
||||||
print_colored('', '############# Requirements #############', 'red')
|
|
||||||
user_query = (
|
|
||||||
general_guidelines()
|
|
||||||
+ wrap_content_in_code_block(executor_content, 'executor.py', 'python')
|
|
||||||
+ wrap_content_in_code_block(test_executor_content, 'test_executor.py', 'python')
|
|
||||||
+ requirements_file_task()
|
|
||||||
)
|
|
||||||
conversation = gpt_session.get_conversation()
|
|
||||||
requirements_content_raw = conversation.query(user_query)
|
|
||||||
if is_chain_of_thought:
|
|
||||||
requirements_content_raw = conversation.query(
|
|
||||||
chain_of_thought_optimization('', '../requirements.txt') + "Keep the same version of jina ")
|
|
||||||
|
|
||||||
requirements_content = extract_content_from_result(requirements_content_raw, '../requirements.txt')
|
|
||||||
persist_file(requirements_content, os.path.join(EXECUTOR_FOLDER_v1, '../requirements.txt'))
|
|
||||||
|
|
||||||
print_colored('', '############# Dockerfile #############', 'red')
|
|
||||||
user_query = (
|
|
||||||
general_guidelines()
|
|
||||||
+ wrap_content_in_code_block(executor_content, 'executor.py', 'python')
|
|
||||||
+ wrap_content_in_code_block(test_executor_content, 'test_executor.py', 'python')
|
|
||||||
+ wrap_content_in_code_block(requirements_content, '../requirements.txt', '')
|
|
||||||
+ docker_file_task()
|
|
||||||
)
|
|
||||||
conversation = gpt_session.get_conversation()
|
|
||||||
dockerfile_content_raw = conversation.query(user_query)
|
|
||||||
if is_chain_of_thought:
|
|
||||||
dockerfile_content_raw = conversation.query(
|
|
||||||
f"General rules: " + not_allowed() + chain_of_thought_optimization('dockerfile', 'Dockerfile'))
|
|
||||||
dockerfile_content = extract_content_from_result(dockerfile_content_raw, 'Dockerfile')
|
|
||||||
persist_file(dockerfile_content, os.path.join(EXECUTOR_FOLDER_v1, 'Dockerfile'))
|
|
||||||
|
|
||||||
write_config_yml(executor_name, EXECUTOR_FOLDER_v1)
|
|
||||||
|
|
||||||
|
|
||||||
def create_playground(executor_name, executor_path, host):
|
|
||||||
print_colored('', '############# Playground #############', 'red')
|
|
||||||
|
|
||||||
file_name_to_content = get_all_executor_files_with_content(executor_path)
|
|
||||||
user_query = (
|
|
||||||
general_guidelines()
|
|
||||||
+ wrap_content_in_code_block(file_name_to_content['executor.py'], 'executor.py', 'python')
|
|
||||||
+ wrap_content_in_code_block(file_name_to_content['test_executor.py'], 'test_executor.py', 'python')
|
|
||||||
+ f'''
|
|
||||||
Create a playground for the executor {executor_name} using streamlit.
|
|
||||||
The playground must look like it was made by a professional designer.
|
|
||||||
All the ui elements are well thought out to make them visually appealing and easy to use.
|
|
||||||
The executor is hosted on {host}.
|
|
||||||
This is an example how you can connect to the executor assuming the document (d) is already defined:
|
|
||||||
from jina import Client, Document, DocumentArray
|
|
||||||
client = Client(host='{host}')
|
|
||||||
response = client.post('/', inputs=DocumentArray([d])) # always use '/'
|
|
||||||
print(response[0].text) # can also be blob in case of image/audio..., this should be visualized in the streamlit app
|
|
||||||
'''
|
|
||||||
)
|
|
||||||
conversation = gpt_session.get_conversation()
|
|
||||||
conversation.query(user_query)
|
|
||||||
playground_content_raw = conversation.query(
|
|
||||||
f"General rules: " + not_allowed() + chain_of_thought_optimization('python', 'app.py'))
|
|
||||||
playground_content = extract_content_from_result(playground_content_raw, 'app.py')
|
|
||||||
persist_file(playground_content, os.path.join(executor_path, 'app.py'))
|
|
||||||
|
|
||||||
def get_executor_path(output_path, package, version):
|
|
||||||
package_path = '_'.join(package)
|
|
||||||
return os.path.join(output_path, package_path, f'v{version}')
|
|
||||||
|
|
||||||
def debug_executor(output_path, package, description, test):
|
|
||||||
MAX_DEBUGGING_ITERATIONS = 10
|
|
||||||
error_before = ''
|
|
||||||
for i in range(1, MAX_DEBUGGING_ITERATIONS):
|
|
||||||
previous_executor_path = get_executor_path(output_path, package, i)
|
|
||||||
next_executor_path = get_executor_path(output_path, package, i + 1)
|
|
||||||
log_hubble = push_executor(previous_executor_path)
|
|
||||||
error = process_error_message(log_hubble)
|
|
||||||
if error:
|
|
||||||
recreate_folder(next_executor_path)
|
|
||||||
file_name_to_content = get_all_executor_files_with_content(previous_executor_path)
|
|
||||||
all_files_string = files_to_string(file_name_to_content)
|
|
||||||
user_query = (
|
|
||||||
f"General rules: " + not_allowed()
|
|
||||||
+ 'Here is the description of the task the executor must solve:\n'
|
|
||||||
+ description
|
|
||||||
+ '\n\nHere is the test scenario the executor must pass:\n'
|
|
||||||
+ test
|
|
||||||
+ 'Here are all the files I use:\n'
|
|
||||||
+ all_files_string
|
|
||||||
+ (('This is an error that is already fixed before:\n'
|
|
||||||
+ error_before) if error_before else '')
|
|
||||||
+ '\n\nNow, I get the following error:\n'
|
|
||||||
+ error + '\n'
|
|
||||||
+ 'Think quickly about possible reasons. '
|
|
||||||
'Then output the files that need change. '
|
|
||||||
"Don't output files that don't need change. "
|
|
||||||
"If you output a file, then write the complete file. "
|
|
||||||
"Use the exact same syntax to wrap the code:\n"
|
|
||||||
f"**...**\n"
|
|
||||||
f"```...\n"
|
|
||||||
f"...code...\n"
|
|
||||||
f"```\n\n"
|
|
||||||
)
|
|
||||||
conversation = gpt_session.get_conversation()
|
|
||||||
returned_files_raw = conversation.query(user_query)
|
|
||||||
for file_name, tag in FILE_AND_TAG_PAIRS:
|
|
||||||
updated_file = extract_content_from_result(returned_files_raw, file_name)
|
|
||||||
if updated_file:
|
|
||||||
file_name_to_content[file_name] = updated_file
|
|
||||||
|
|
||||||
for file_name, content in file_name_to_content.items():
|
|
||||||
persist_file(content, os.path.join(next_executor_path, file_name))
|
|
||||||
error_before = error
|
|
||||||
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
if i == MAX_DEBUGGING_ITERATIONS - 1:
|
|
||||||
raise MaxDebugTimeReachedException('Could not debug the executor.')
|
|
||||||
return get_executor_path(output_path, package, i)
|
|
||||||
|
|
||||||
class MaxDebugTimeReachedException(BaseException):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def generate_executor_name(description):
|
|
||||||
conversation = gpt_session.get_conversation()
|
|
||||||
user_query = f'''
|
|
||||||
Generate a name for the executor matching the description:
|
|
||||||
"{description}"
|
|
||||||
The executor name must fulfill the following criteria:
|
|
||||||
- camel case
|
|
||||||
- start with a capital letter
|
|
||||||
- only consists of lower and upper case characters
|
|
||||||
- end with Executor.
|
|
||||||
|
|
||||||
The output is a the raw string wrapped into ``` and starting with **name.txt** like this:
|
|
||||||
**name.txt**
|
|
||||||
```
|
|
||||||
PDFParserExecutor
|
|
||||||
```
|
|
||||||
'''
|
|
||||||
name_raw = conversation.query(user_query)
|
|
||||||
name = extract_content_from_result(name_raw, 'name.txt')
|
|
||||||
return name
|
|
||||||
|
|
||||||
def get_possible_packages(description, threads):
|
|
||||||
print_colored('', '############# What package to use? #############', 'red')
|
|
||||||
user_query = f'''
|
|
||||||
Here is the task description of the problme you need to solve:
|
|
||||||
"{description}"
|
|
||||||
First, write down all the subtasks you need to solve which require python packages.
|
|
||||||
For each subtask:
|
|
||||||
Provide a list of 1 to 3 python packages you could use to solve the subtask. Prefer modern packages.
|
|
||||||
For each package:
|
|
||||||
Write down some non-obvious thoughts about the challenges you might face for the task and give multiple approaches on how you handle them.
|
|
||||||
For example, there might be some packages you must not use because they do not obay the rules:
|
|
||||||
{not_allowed()}
|
|
||||||
Discuss the pros and cons for all of these packages.
|
|
||||||
Create a list of package subsets that you could use to solve the task.
|
|
||||||
The list is sorted in a way that the most promising subset of packages is at the top.
|
|
||||||
The maximum length of the list is 5.
|
|
||||||
|
|
||||||
The output must be a list of lists wrapped into ``` and starting with **packages.csv** like this:
|
|
||||||
**packages.csv**
|
|
||||||
```
|
|
||||||
package1,package2
|
|
||||||
package2,package3,...
|
|
||||||
...
|
|
||||||
```
|
|
||||||
'''
|
|
||||||
conversation = gpt_session.get_conversation()
|
|
||||||
packages_raw = conversation.query(user_query)
|
|
||||||
packages_csv_string = extract_content_from_result(packages_raw, 'packages.csv')
|
|
||||||
packages = [package.split(',') for package in packages_csv_string.split('\n')]
|
|
||||||
packages = packages[:threads]
|
|
||||||
return packages
|
|
||||||
|
|
||||||
@click.group(invoke_without_command=True)
|
|
||||||
def main():
|
|
||||||
pass
|
|
||||||
|
|
||||||
@main.command()
|
|
||||||
@click.option('--description', required=True, help='Description of the executor.')
|
|
||||||
@click.option('--test', required=True, help='Test scenario for the executor.')
|
|
||||||
@click.option('--num_approaches', default=3, type=int, help='Number of num_approaches to use to fulfill the task (default: 3).')
|
|
||||||
@click.option('--output_path', default='executor', help='Path to the output folder (must be empty). ')
|
|
||||||
def create(
|
|
||||||
description,
|
|
||||||
test,
|
|
||||||
num_approaches=3,
|
|
||||||
output_path='executor',
|
|
||||||
):
|
|
||||||
jina_auth_login()
|
|
||||||
|
|
||||||
generated_name = generate_executor_name(description)
|
|
||||||
executor_name = f'{generated_name}{random.randint(0, 1000_000)}'
|
|
||||||
|
|
||||||
packages_list = get_possible_packages(description, num_approaches)
|
|
||||||
recreate_folder(output_path)
|
|
||||||
# packages_list = [['a']]
|
|
||||||
# executor_name = 'ColorPaletteGeneratorExecutor5946'
|
|
||||||
# executor_path = '/Users/florianhonicke/jina/gptdeploy/executor/colorsys_colorharmony/v5'
|
|
||||||
# host = 'grpcs://gptdeploy-5f6ea44fc8.wolf.jina.ai'
|
|
||||||
for packages in packages_list:
|
|
||||||
try:
|
|
||||||
create_executor(description, test, output_path, executor_name, packages)
|
|
||||||
executor_path = debug_executor(output_path, packages, description, test)
|
|
||||||
print('Deploy a jina flow')
|
|
||||||
host = jina_cloud.deploy_flow(executor_name, executor_path)
|
|
||||||
print(f'Flow is deployed create the playground for {host}')
|
|
||||||
create_playground(executor_name, executor_path, host)
|
|
||||||
except MaxDebugTimeReachedException:
|
|
||||||
print('Could not debug the executor.')
|
|
||||||
continue
|
|
||||||
print(
|
|
||||||
'Executor name:', executor_name, '\n',
|
|
||||||
'Executor path:', executor_path, '\n',
|
|
||||||
'Host:', host, '\n',
|
|
||||||
'Playground:', f'streamlit run {os.path.join(executor_path, "app.py")}', '\n',
|
|
||||||
)
|
|
||||||
break
|
|
||||||
|
|
||||||
@main.command()
|
|
||||||
@click.option('--key', required=True, help='Your OpenAI API key.')
|
|
||||||
def configure(key):
|
|
||||||
set_api_key(key)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
Reference in New Issue
Block a user