diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index abfc233..85d34e8 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -10,7 +10,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- group: [0, 1, 2, 3, 4]
+ group: [0, 1, 2, 3, 4, 5_company_logos]
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.8
@@ -28,11 +28,13 @@ jobs:
id: test
run: |
pytest -vs test/integration/test_generator.py::test_generation_level_${{ matrix.group }}
- timeout-minutes: 15
+ timeout-minutes: 17
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
SCENEX_API_KEY: ${{ secrets.SCENEX_API_KEY }}
WHISPER_API_KEY: ${{ secrets.WHISPER_API_KEY }}
+ GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
+ GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }}
test_unit:
runs-on: ubuntu-latest
@@ -58,6 +60,8 @@ jobs:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
SCENEX_API_KEY: ${{ secrets.SCENEX_API_KEY }}
WHISPER_API_KEY: ${{ secrets.WHISPER_API_KEY }}
+ GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
+ GOOGLE_CSE_ID: ${{ secrets.GOOGLE_CSE_ID }}
base-image-push:
runs-on: ubuntu-latest
diff --git a/README.md b/README.md
index 99c1292..e9a29ec 100644
--- a/README.md
+++ b/README.md
@@ -49,13 +49,14 @@ Your imagination is the limit!
-
-
-
+
+
-Welcome to Dev GPT, where we bring your ideas to life with the power of advanced artificial intelligence! Our automated development team is designed to create microservices tailored to your specific needs, making your software development process seamless and efficient. Comprised of a virtual Product Manager, Developer, and DevOps, our AI team ensures that every aspect of your project is covered, from concept to deployment.
+Welcome to Dev-GPT, where we bring your ideas to life with the power of advanced artificial intelligence!
+Our automated development team is designed to create microservices tailored to your specific needs, making your software development process seamless and efficient.
+Comprised of a virtual Product Manager, Developer, and DevOps, our AI team ensures that every aspect of your project is covered, from concept to deployment.
## Quickstart
@@ -66,8 +67,13 @@ dev-gpt generate
### Requirements
- OpenAI key with access to gpt-3.5-turbo or gpt-4
+- if you want to enable your microservice to search for web content,
+you need to set the GOOGLE_API_KEY and GOOGLE_CSE_ID environment variables.
+More information can be found [here](https://developers.google.com/custom-search/v1/overview).
```bash
-dev-gpt configure --key
+dev-gpt configure --openai_api_key
+dev-gpt configure --google_api_key (optional if you want to use google search)
+dev-gpt configure --google_cse_id (optional if you want to use google search)
```
If you set the environment variable `OPENAI_API_KEY`, the configuration step can be skipped.
diff --git a/dev_gpt/apis/gpt.py b/dev_gpt/apis/gpt.py
index 44d329f..335eab0 100644
--- a/dev_gpt/apis/gpt.py
+++ b/dev_gpt/apis/gpt.py
@@ -16,6 +16,7 @@ from urllib3.exceptions import InvalidChunkLength
from dev_gpt.constants import PRICING_GPT4_PROMPT, PRICING_GPT4_GENERATION, PRICING_GPT3_5_TURBO_PROMPT, \
PRICING_GPT3_5_TURBO_GENERATION, CHARS_PER_TOKEN
+from dev_gpt.options.generate.conversation_logger import ConversationLogger
from dev_gpt.options.generate.templates_system import template_system_message_base
from dev_gpt.utils.string_tools import print_colored, get_template_parameters
@@ -24,7 +25,7 @@ def configure_openai_api_key():
if 'OPENAI_API_KEY' not in os.environ:
print_colored('You need to set OPENAI_API_KEY in your environment.', '''
Run:
-dev-gpt configure --key
+dev-gpt configure --openai_api_key
If you have updated it already, please restart your terminal.
''', 'red')
@@ -41,9 +42,10 @@ class GPTSession:
cls._instance = super(GPTSession, cls).__new__(cls)
return cls._instance
- def __init__(self, model: str = 'gpt-4', ):
+ def __init__(self, log_file_path: str, model: str = 'gpt-4', ):
if GPTSession._initialized:
return
+ self.conversation_logger = ConversationLogger(log_file_path)
if model == 'gpt-4' and self.is_gpt4_available():
self.pricing_prompt = PRICING_GPT4_PROMPT
self.pricing_generation = PRICING_GPT4_GENERATION
@@ -58,10 +60,13 @@ class GPTSession:
self.chars_generation_so_far = 0
GPTSession._initialized = True
+
+
+
def get_conversation(self, messages: List[BaseMessage] = [], print_stream: bool = True, print_costs: bool = True):
messages = deepcopy(messages)
return _GPTConversation(
- self.model_name, self.cost_callback, messages, print_stream, print_costs
+ self.model_name, self.cost_callback, messages, print_stream, print_costs, self.conversation_logger
)
@staticmethod
@@ -107,7 +112,7 @@ class AssistantStreamingStdOutCallbackHandler(StreamingStdOutCallbackHandler):
class _GPTConversation:
- def __init__(self, model: str, cost_callback, messages: List[BaseMessage], print_stream, print_costs):
+ def __init__(self, model: str, cost_callback, messages: List[BaseMessage], print_stream, print_costs, conversation_logger: ConversationLogger = None):
self._chat = ChatOpenAI(
model_name=model,
streaming=True,
@@ -119,6 +124,7 @@ class _GPTConversation:
self.messages = messages
self.print_stream = print_stream
self.print_costs = print_costs
+ self.conversation_logger = conversation_logger
def print_messages(self, messages):
for i, message in enumerate(messages):
@@ -141,6 +147,7 @@ class _GPTConversation:
for i in range(10):
try:
response = self._chat(self.messages)
+ self.conversation_logger.log(self.messages, response)
break
except (ConnectionError, InvalidChunkLength, ChunkedEncodingError) as e:
print('There was a connection error. Retrying...')
@@ -173,7 +180,7 @@ def ask_gpt(prompt_template, parser, **kwargs):
if isinstance(value, dict):
kwargs[key] = json.dumps(value, indent=4)
prompt = prompt_template.format(**kwargs)
- conversation = GPTSession().get_conversation(
+ conversation = GPTSession._instance.get_conversation(
[],
print_stream=os.environ['VERBOSE'].lower() == 'true',
print_costs=False
diff --git a/dev_gpt/apis/jina_cloud.py b/dev_gpt/apis/jina_cloud.py
index c41b0fb..ac0953b 100644
--- a/dev_gpt/apis/jina_cloud.py
+++ b/dev_gpt/apis/jina_cloud.py
@@ -98,11 +98,16 @@ def _push_executor(dir_path):
'public': 'True',
'private': 'False',
'verbose': 'True',
- 'buildEnv': f'{{"OPENAI_API_KEY": "{os.environ["OPENAI_API_KEY"]}"}}',
+ 'buildEnv': f'{{"OPENAI_API_KEY": "{os.environ["OPENAI_API_KEY"]}", "GOOGLE_API_KEY": "{os.environ.get("GOOGLE_API_KEY","")}", "GOOGLE_CSE_ID": "{os.environ.get("GOOGLE_CSE_ID","")}"}}',
'md5sum': md5_digest,
}
with suppress_stdout():
headers = get_request_header()
+ headers['jinameta-platform'] = 'Darwin'
+ headers['jinameta-platform-release'] = '21.1.0'
+ headers['jinameta-platform-version'] = 'Darwin Kernel Version 21.1.0: Wed Oct 13 17:33:23 PDT 2021; root:xnu-8019.41.5~1/RELEASE_X86_64'
+ headers['jinameta-architecture'] = 'x86_64'
+ headers['jinameta-processor'] = 'i386'
resp = upload_file(
'https://api.hubble.jina.ai/v2/rpc/executor.push',
@@ -251,7 +256,9 @@ executors:
uses: {prefix}://{get_user_name(DEMO_TOKEN)}/{executor_name}:latest
{"" if use_docker else "install-requirements: True"}
env:
- OPENAI_API_KEY: {os.environ['OPENAI_API_KEY']}
+ OPENAI_API_KEY: ${{{{ ENV.OPENAI_API_KEY }}}}
+ GOOGLE_API_KEY: ${{{{ ENV.GOOGLE_API_KEY }}}}
+ GOOGLE_CSE_ID: ${{{{ ENV.GOOGLE_CSE_ID }}}}
jcloud:
resources:
instance: C2
diff --git a/dev_gpt/cli.py b/dev_gpt/cli.py
index 08d2ada..094bcbd 100644
--- a/dev_gpt/cli.py
+++ b/dev_gpt/cli.py
@@ -92,9 +92,16 @@ def deploy(path):
Deployer().deploy(path)
@main.command()
-@click.option('--key', required=True, help='Your OpenAI API key.')
-def configure(key):
- set_api_key(key)
+@click.option('--openai-api-key', default=None, help='Your OpenAI API key.')
+@click.option('--google-api-key', default=None, help='Your Google API key.')
+@click.option('--google-cse-id', default=None, help='Your Google CSE ID.')
+def configure(openai_api_key, google_api_key, google_cse_id):
+ if openai_api_key:
+ set_api_key('OPENAI_API_KEY', openai_api_key)
+ if google_api_key:
+ set_api_key('GOOGLE_API_KEY', google_api_key)
+ if google_cse_id:
+ set_api_key('GOOGLE_CSE_ID', google_cse_id)
if __name__ == '__main__':
diff --git a/dev_gpt/constants.py b/dev_gpt/constants.py
index bb5910b..facfeaf 100644
--- a/dev_gpt/constants.py
+++ b/dev_gpt/constants.py
@@ -26,6 +26,12 @@ FILE_AND_TAG_PAIRS = [
(STREAMLIT_FILE_NAME, STREAMLIT_FILE_TAG)
]
+INDICATOR_TO_IMPORT_STATEMENT = {
+ 'io.BytesIO': 'import io',
+ 'BytesIO': 'from io import BytesIO',
+ 'base64': 'import base64',
+}
+
FLOW_URL_PLACEHOLDER = 'jcloud.jina.ai'
PRICING_GPT4_PROMPT = 0.03
@@ -43,15 +49,20 @@ DEMO_TOKEN = '45372338e04f5a41af949024db929d46'
BLACKLISTED_PACKAGES = [
'moderngl', 'pyopengl', 'pyglet', 'pythreejs', 'panda3d', # because they need a screen,
'tika', # because it needs java
+ 'clearbit' # because of installation issues on latest version
]
UNNECESSARY_PACKAGES = [
'fastapi', 'uvicorn', 'starlette' # because the wrappers are used instead
]
LANGUAGE_PACKAGES = [
- 'allennlp', 'bertopic', 'fasttext', 'flair', 'gensim', 'nltk', 'openai',
+ 'allennlp', 'bertopic', 'GPT-3', 'fasttext', 'flair', 'gensim', 'nltk', 'openai',
'pattern', 'polyglot', 'pytorch-transformers', 'rasa', 'sentence-transformers',
'spacy', 'stanza', 'summarizer', 'sumy', 'textblob', 'textstat', 'transformers',
'vadersentiment'
]
+SEARCH_PACKAGES = [
+ 'googlesearch-python', 'google', 'googlesearch', 'google-api-python-client', 'pygooglenews', 'google-cloud'
+]
+
diff --git a/dev_gpt/options/configure/key_handling.py b/dev_gpt/options/configure/key_handling.py
index a2f8576..1012fab 100644
--- a/dev_gpt/options/configure/key_handling.py
+++ b/dev_gpt/options/configure/key_handling.py
@@ -40,26 +40,26 @@ def get_shell():
return None
-def get_shell_config(key):
+def get_shell_config(name, key):
return {
- "bash": {"config_file": "~/.bashrc", "export_line": f"export OPENAI_API_KEY={key}"},
- "zsh": {"config_file": "~/.zshrc", "export_line": f"export OPENAI_API_KEY={key}"},
- "sh": {"config_file": "~/.profile", "export_line": f"export OPENAI_API_KEY={key}"},
+ "bash": {"config_file": "~/.bashrc", "export_line": f"export {name}={key}"},
+ "zsh": {"config_file": "~/.zshrc", "export_line": f"export {name}={key}"},
+ "sh": {"config_file": "~/.profile", "export_line": f"export {name}={key}"},
"fish": {
"config_file": "~/.config/fish/config.fish",
- "export_line": f"set -gx OPENAI_API_KEY {key}",
+ "export_line": f"set -gx {name} {key}",
},
- "csh": {"config_file": "~/.cshrc", "export_line": f"setenv OPENAI_API_KEY {key}"},
- "tcsh": {"config_file": "~/.tcshrc", "export_line": f"setenv OPENAI_API_KEY {key}"},
- "ksh": {"config_file": "~/.kshrc", "export_line": f"export OPENAI_API_KEY={key}"},
- "dash": {"config_file": "~/.profile", "export_line": f"export OPENAI_API_KEY={key}"}
+ "csh": {"config_file": "~/.cshrc", "export_line": f"setenv {name} {key}"},
+ "tcsh": {"config_file": "~/.tcshrc", "export_line": f"setenv {name} {key}"},
+ "ksh": {"config_file": "~/.kshrc", "export_line": f"export {name}={key}"},
+ "dash": {"config_file": "~/.profile", "export_line": f"export {name}={key}"}
}
-def set_env_variable(shell, key):
- shell_config = get_shell_config(key)
+def set_env_variable(shell, name, key):
+ shell_config = get_shell_config(name, key)
if shell not in shell_config:
- click.echo("Sorry, your shell is not supported. Please add the key OPENAI_API_KEY manually.")
+ click.echo(f"Sorry, your shell is not supported. Please add the key {name} manually.")
return
config_file = os.path.expanduser(shell_config[shell]["config_file"])
@@ -71,8 +71,8 @@ def set_env_variable(shell, key):
export_line = shell_config[shell]['export_line']
# Update the existing API key if it exists, otherwise append it to the config file
- if f"OPENAI_API_KEY" in content:
- content = re.sub(r'OPENAI_API_KEY=.*', f'OPENAI_API_KEY={key}', content, flags=re.MULTILINE)
+ if f"{name}" in content:
+ content = re.sub(rf'{name}=.*', f'{name}={key}', content, flags=re.MULTILINE)
with open(config_file, "w", encoding='utf-8') as file:
file.write(content)
@@ -81,7 +81,7 @@ def set_env_variable(shell, key):
file.write(f"\n{export_line}\n")
click.echo(f'''
-✅ Success, OPENAI_API_KEY has been set in {config_file}.
+✅ Success, {name} has been set in {config_file}.
Please restart your shell to apply the changes or run:
source {config_file}
'''
@@ -91,21 +91,21 @@ source {config_file}
click.echo(f"Error: {config_file} not found. Please set the environment variable manually.")
-def set_api_key(key):
+def set_api_key(name, key):
system_platform = platform.system().lower()
if system_platform == "windows":
- set_env_variable_command = f'setx OPENAI_API_KEY "{key}"'
+ set_env_variable_command = f'setx {name} "{key}"'
subprocess.call(set_env_variable_command, shell=True)
- click.echo('''
-✅ Success, OPENAI_API_KEY has been set.
+ click.echo(f'''
+✅ Success, {name} has been set.
Please restart your Command Prompt to apply the changes.
'''
)
elif system_platform in ["linux", "darwin"]:
- if "OPENAI_API_KEY" in os.environ or is_key_set_in_config_file(key):
- if not click.confirm("OPENAI_API_KEY is already set. Do you want to overwrite it?"):
+ if f"{name}" in os.environ or is_key_set_in_config_file(key):
+ if not click.confirm(f"{name} is already set. Do you want to overwrite it?"):
click.echo("Aborted.")
return
@@ -115,24 +115,24 @@ Please restart your Command Prompt to apply the changes.
"Error: Unable to detect your shell or psutil is not available. Please set the environment variable manually.")
return
- set_env_variable(shell, key)
+ set_env_variable(shell, name, key)
else:
click.echo("Sorry, this platform is not supported.")
-def is_key_set_in_config_file(key):
+def is_key_set_in_config_file(name, key):
shell = get_shell()
if shell is None:
return False
- shell_config = get_shell_config(key)
+ shell_config = get_shell_config(name, key)
config_file = os.path.expanduser(shell_config[shell]["config_file"])
try:
with open(config_file, "r", encoding='utf-8') as file:
content = file.read()
- if f"OPENAI_API_KEY" in content:
+ if f"{name}" in content:
return True
except FileNotFoundError:
pass
diff --git a/dev_gpt/options/generate/chains/auto_refine_description.py b/dev_gpt/options/generate/chains/auto_refine_description.py
index 09e9818..085295a 100644
--- a/dev_gpt/options/generate/chains/auto_refine_description.py
+++ b/dev_gpt/options/generate/chains/auto_refine_description.py
@@ -3,7 +3,7 @@ import json
from dev_gpt.apis.gpt import ask_gpt
from dev_gpt.options.generate.parser import identity_parser
from dev_gpt.options.generate.prompt_factory import context_to_string
-
+from dev_gpt.options.generate.tools.tools import get_available_tools
def auto_refine_description(context):
@@ -36,7 +36,9 @@ def auto_refine_description(context):
better_description_prompt = f'''{{context_string}}
Update the description of the Microservice to make it more precise without adding or removing information.
Note: the output must be a list of tasks the Microservice has to perform.
-Example for the description: "return the average temperature of the 5 days weather forecast for a given location."
+Note: you can uses two tools if necessary:
+{get_available_tools()}
+Example for the description: "return a description of the average temperature of the 5 days weather forecast for a given location."
1. get the 5 days weather forcast from the https://openweathermap.org/ API
2. extract the temperature from the response
3. calculate the average temperature'''
@@ -47,7 +49,8 @@ Note: If you are not sure about the details, then come up with the minimal numbe
generate_output_schema_prompt = '''{context_string}
Generate the lean response json schema for the Microservice.
-Note: If you are not sure about the details, then come up with the minimal number of parameters possible.'''
+Note: If you are not sure about the details, then come up with the minimal number of parameters possible.
+Note: If you can decide to return files as URLs or as base64 encoded strings, then choose the base64 encoded strings.'''
summarize_description_and_schemas_prompt = '''{context_string}
Write an updated microservice description by incorporating information about the request and response parameters in a concise way without losing any information.
diff --git a/dev_gpt/options/generate/chains/question_answering.py b/dev_gpt/options/generate/chains/question_answering.py
index 2f191ce..eca3c35 100644
--- a/dev_gpt/options/generate/chains/question_answering.py
+++ b/dev_gpt/options/generate/chains/question_answering.py
@@ -1,25 +1,46 @@
from dev_gpt.apis.gpt import ask_gpt
-from dev_gpt.options.generate.parser import boolean_parser
+from dev_gpt.options.generate.parser import boolean_parser, identity_parser
+
def is_question_true(question):
def fn(text):
return answer_yes_no_question(text, question)
+
return fn
+
def is_question_false(question):
return lambda context: not is_question_true(question)(context)
def answer_yes_no_question(text, question):
- prompt = question_prompt.format(
- question=question,
- text=text
+ pros_and_cons = ask_gpt(
+ pros_and_cons_prompt.format(
+ question=question,
+ text=text,
+ ),
+ identity_parser,
)
- return ask_gpt(prompt, boolean_parser)
+
+ return ask_gpt(
+ question_prompt.format(
+ text=text,
+ question=question,
+ pros_and_cons=pros_and_cons,
+ ),
+ boolean_parser)
+
+pros_and_cons_prompt = '''\
+# Context
+{text}
+# Question
+{question}
+Note: You must not answer the question. Instead, give up to 5 bullet points (10 words) arguing why the question should be answered with true or false.'''
question_prompt = '''\
+# Context
{text}
+# Question
{question}
Note: You must answer "yes" or "no".
'''
-
diff --git a/dev_gpt/options/generate/conversation_logger.py b/dev_gpt/options/generate/conversation_logger.py
new file mode 100644
index 0000000..cbb3577
--- /dev/null
+++ b/dev_gpt/options/generate/conversation_logger.py
@@ -0,0 +1,28 @@
+import json
+from typing import List
+
+from langchain.schema import BaseMessage
+
+
+class ConversationLogger:
+ def __init__(self, log_file_path):
+ self.log_file_path = log_file_path
+ self.log_file = []
+
+ def log(self, prompt_message_list: List[BaseMessage], response: str):
+ prompt_list_json = [
+ {
+ 'role': f'{message.type}',
+ 'content': f'{message.content}'
+ }
+ for message in prompt_message_list
+ ]
+ self.log_file.append({
+ 'prompt': prompt_list_json,
+ 'response': f'{response}'
+ })
+ with open(self.log_file_path, 'w') as f:
+ f.write(json.dumps(self.log_file, indent=2))
+
+
+
diff --git a/dev_gpt/options/generate/generator.py b/dev_gpt/options/generate/generator.py
index b3f72c4..ee83f86 100644
--- a/dev_gpt/options/generate/generator.py
+++ b/dev_gpt/options/generate/generator.py
@@ -8,6 +8,7 @@ from typing import Callable
from typing import List, Text, Optional
from langchain import PromptTemplate
+from langchain.schema import SystemMessage, AIMessage
from pydantic.dataclasses import dataclass
from dev_gpt.apis import gpt
@@ -17,15 +18,19 @@ from dev_gpt.apis.pypi import is_package_on_pypi, clean_requirements_txt
from dev_gpt.constants import FILE_AND_TAG_PAIRS, NUM_IMPLEMENTATION_STRATEGIES, MAX_DEBUGGING_ITERATIONS, \
BLACKLISTED_PACKAGES, EXECUTOR_FILE_NAME, TEST_EXECUTOR_FILE_NAME, TEST_EXECUTOR_FILE_TAG, \
REQUIREMENTS_FILE_NAME, REQUIREMENTS_FILE_TAG, DOCKER_FILE_NAME, IMPLEMENTATION_FILE_NAME, \
- IMPLEMENTATION_FILE_TAG, LANGUAGE_PACKAGES, UNNECESSARY_PACKAGES, DOCKER_BASE_IMAGE_VERSION
+ IMPLEMENTATION_FILE_TAG, LANGUAGE_PACKAGES, UNNECESSARY_PACKAGES, DOCKER_BASE_IMAGE_VERSION, SEARCH_PACKAGES, \
+ INDICATOR_TO_IMPORT_STATEMENT
from dev_gpt.options.generate.pm.pm import PM
from dev_gpt.options.generate.templates_user import template_generate_microservice_name, \
template_generate_possible_packages, \
- template_solve_code_issue, \
+ template_implement_solution_code_issue, \
template_solve_pip_dependency_issue, template_is_dependency_issue, template_generate_playground, \
- template_generate_function, template_generate_test, template_generate_requirements, \
+ template_generate_function_constructor, template_generate_test, template_generate_requirements, \
template_chain_of_thought, template_summarize_error, \
- template_solve_apt_get_dependency_issue
+ template_solve_apt_get_dependency_issue, \
+ template_suggest_solutions_code_issue, template_was_error_seen_before, \
+ template_was_solution_tried_before, response_format_was_error_seen_before, \
+ response_format_was_solution_tried_before, response_format_suggest_solutions
from dev_gpt.utils.io import persist_file, get_all_microservice_files_with_content, get_microservice_path
from dev_gpt.utils.string_tools import print_colored
@@ -37,10 +42,16 @@ class TaskSpecification:
class Generator:
- def __init__(self, task_description, path, model='gpt-4'):
- self.gpt_session = gpt.GPTSession(model=model)
+ def __init__(self, task_description, path, model='gpt-4', self_healing=True):
+ self.gpt_session = gpt.GPTSession(os.path.join(path, 'log.json'), model=model)
self.microservice_specification = TaskSpecification(task=task_description, test=None)
+ self.self_healing = self_healing
self.microservice_root_path = path
+ self.microservice_name = None
+ self.previous_microservice_path = None
+ self.cur_microservice_path = None
+ self.previous_errors = []
+ self.previous_solutions = []
@staticmethod
def extract_content_from_result(plain_text, file_name, match_single_block=False, can_contain_code_block=True):
@@ -89,9 +100,12 @@ metas:
self,
section_title: str,
template: PromptTemplate,
- destination_folder: str,
+ destination_folder: str = None,
file_name_s: List[str] = None,
parse_result_fn: Callable = None,
+ use_custom_system_message: bool = True,
+ response_format_example: str = None,
+ post_process_fn: Callable = None,
**template_kwargs
):
"""This function generates file(s) using the given template and persists it/them in the given destination folder.
@@ -100,22 +114,32 @@ metas:
Args:
section_title (str): The title of the section to be printed in the console.
template (PromptTemplate): The template to be used for generating the file(s).
- destination_folder (str): The destination folder where the generated file(s) should be persisted.
+ destination_folder (str): The destination folder where the generated file(s) should be persisted. If None,
+ the current microservice path is used. Defaults to None.
file_name_s (List[str], optional): The name of the file(s) to be generated. Defaults to None.
parse_result_fn (Callable, optional): A function that parses the generated content and returns a dictionary
mapping file_name to its content. If no content could be extract, it returns an empty dictionary.
Defaults to None. If None, default parsing is used which uses the file_name to extract from the generated content.
+ use_custom_system_message (bool, optional): whether to use custom system message or not. Defaults to True.
**template_kwargs: The keyword arguments to be passed to the template.
"""
+ if destination_folder is None:
+ destination_folder = self.cur_microservice_path
+
if parse_result_fn is None:
parse_result_fn = self.get_default_parse_result_fn(file_name_s)
print_colored('', f'\n\n############# {section_title} #############', 'blue')
- system_introduction_message = _GPTConversation._create_system_message(
- self.microservice_specification.task,
- self.microservice_specification.test
+ if use_custom_system_message:
+ system_introduction_message = _GPTConversation._create_system_message(
+ self.microservice_specification.task,
+ self.microservice_specification.test
+ )
+ else:
+ system_introduction_message = SystemMessage(content='You are a helpful assistant.')
+ conversation = self.gpt_session.get_conversation(
+ messages=[system_introduction_message] if use_custom_system_message else []
)
- conversation = self.gpt_session.get_conversation(messages=[system_introduction_message])
template_kwargs = {k: v for k, v in template_kwargs.items() if k in template.input_variables}
if 'file_name' in template.input_variables and len(file_name_s) == 1:
template_kwargs['file_name'] = file_name_s[0]
@@ -125,9 +149,36 @@ metas:
)
)
content = parse_result_fn(content_raw)
+ if post_process_fn is not None:
+ content = post_process_fn(content)
if content == {}:
+ conversation = self.gpt_session.get_conversation(
+ messages=[SystemMessage(content='You are a helpful assistant.'), AIMessage(content=content_raw)]
+ )
+ if response_format_example is not None:
+ file_wrapping_example = response_format_example
+ elif len(file_name_s) == 1:
+ file_ending = file_name_s[0].split('.')[-1]
+ if file_ending == 'py':
+ tag = 'python'
+ elif file_ending == 'json':
+ tag = 'json'
+ else:
+ tag = ''
+ file_wrapping_example = f'''**{file_name_s[0]}**
+```{tag}
+
+```'''
+ else:
+ file_wrapping_example = '''**file_name.file_ending**
+```
+```'''
content_raw = conversation.chat(
- 'You must add the content in the format shown above' + (f' for {file_name_s[0]}' if len(file_name_s) == 1 else ''))
+ 'Based on your previous response, only output the content' + (f' for `{file_name_s[0]}`' if len(file_name_s) == 1 else '') +
+ '. Like this:\n' +
+ file_wrapping_example
+ )
content = parse_result_fn(content_raw)
for _file_name, _file_content in content.items():
persist_file(_file_content, os.path.join(destination_folder, _file_name))
@@ -135,52 +186,54 @@ metas:
def generate_microservice(
self,
- microservice_name,
packages,
num_approach,
):
- MICROSERVICE_FOLDER_v1 = get_microservice_path(self.microservice_root_path, microservice_name, packages,
- num_approach, 1)
- os.makedirs(MICROSERVICE_FOLDER_v1)
+ self.cur_microservice_path = get_microservice_path(
+ self.microservice_root_path, self.microservice_name, packages, num_approach, 1
+ )
+ os.makedirs(self.cur_microservice_path)
with open(os.path.join(os.path.dirname(__file__), 'static_files', 'microservice', 'jina_wrapper.py'), 'r', encoding='utf-8') as f:
microservice_executor_boilerplate = f.read()
- microservice_executor_code = microservice_executor_boilerplate.replace('class DevGPTExecutor(Executor):',
- f'class {microservice_name}(Executor):')
- persist_file(microservice_executor_code, os.path.join(MICROSERVICE_FOLDER_v1, EXECUTOR_FILE_NAME))
+ microservice_executor_code = microservice_executor_boilerplate \
+ .replace('class DevGPTExecutor(Executor):', f'class {self.microservice_name}(Executor):')
+ persist_file(microservice_executor_code, os.path.join(self.cur_microservice_path, EXECUTOR_FILE_NAME))
- with open(os.path.join(os.path.dirname(__file__), 'static_files', 'microservice', 'apis.py'), 'r', encoding='utf-8') as f:
- persist_file(f.read(), os.path.join(MICROSERVICE_FOLDER_v1, 'apis.py'))
+ for additional_file in ['google_custom_search.py', 'gpt_3_5_turbo.py']:
+ with open(os.path.join(os.path.dirname(__file__), 'static_files', 'microservice', additional_file), 'r', encoding='utf-8') as f:
+ persist_file(f.read(), os.path.join(self.cur_microservice_path, additional_file))
+ is_using_gpt_3_5_turbo = 'gpt_3_5_turbo' in packages or 'gpt-3-5-turbo' in packages
+ is_using_google_custom_search = 'google_custom_search' in packages or 'google-custom-search' in packages
microservice_content = self.generate_and_persist_file(
section_title='Microservice',
- template=template_generate_function,
- destination_folder=MICROSERVICE_FOLDER_v1,
+ template=template_generate_function_constructor(is_using_gpt_3_5_turbo, is_using_google_custom_search),
microservice_description=self.microservice_specification.task,
test_description=self.microservice_specification.test,
packages=packages,
file_name_purpose=IMPLEMENTATION_FILE_NAME,
tag_name=IMPLEMENTATION_FILE_TAG,
file_name_s=[IMPLEMENTATION_FILE_NAME],
+ post_process_fn=self.add_missing_imports_post_process_fn,
)[IMPLEMENTATION_FILE_NAME]
test_microservice_content = self.generate_and_persist_file(
'Test Microservice',
template_generate_test,
- MICROSERVICE_FOLDER_v1,
code_files_wrapped=self.files_to_string({EXECUTOR_FILE_NAME: microservice_content}),
- microservice_name=microservice_name,
+ microservice_name=self.microservice_name,
microservice_description=self.microservice_specification.task,
test_description=self.microservice_specification.test,
file_name_purpose=TEST_EXECUTOR_FILE_NAME,
tag_name=TEST_EXECUTOR_FILE_TAG,
file_name_s=[TEST_EXECUTOR_FILE_NAME],
+ post_process_fn=self.add_missing_imports_post_process_fn,
)[TEST_EXECUTOR_FILE_NAME]
- requirements_content = self.generate_and_persist_file(
+ self.generate_and_persist_file(
'Requirements',
template_generate_requirements,
- MICROSERVICE_FOLDER_v1,
code_files_wrapped=self.files_to_string({
IMPLEMENTATION_FILE_NAME: microservice_content,
TEST_EXECUTOR_FILE_NAME: test_microservice_content,
@@ -189,21 +242,7 @@ metas:
file_name_s=[REQUIREMENTS_FILE_NAME],
parse_result_fn=self.parse_result_fn_requirements,
tag_name=REQUIREMENTS_FILE_TAG,
- )[REQUIREMENTS_FILE_NAME]
-
- # I deactivated this because 3.5-turbo was hallucinating packages that were not needed
- # now, in the first iteration the default dockerfile is used
- # self.generate_and_persist_file(
- # section_title='Generate Dockerfile',
- # template=template_generate_apt_get_install,
- # destination_folder=MICROSERVICE_FOLDER_v1,
- # file_name_s=None,
- # parse_result_fn=self.parse_result_fn_dockerfile,
- # docker_file_wrapped=self.read_docker_template(),
- # requirements_file_wrapped=self.files_to_string({
- # REQUIREMENTS_FILE_NAME: requirements_content,
- # })
- # )
+ )
with open(os.path.join(os.path.dirname(__file__), 'static_files', 'microservice', 'Dockerfile'), 'r',
encoding='utf-8') as f:
@@ -212,13 +251,26 @@ metas:
line.replace('{{APT_GET_PACKAGES}}', '').replace('{{DOCKER_BASE_IMAGE_VERSION}}', DOCKER_BASE_IMAGE_VERSION)
for line in docker_file_template_lines
]
- docker_file_content = '\n'.join(docker_file_template_lines)
- persist_file(docker_file_content, os.path.join(MICROSERVICE_FOLDER_v1, 'Dockerfile'))
+ docker_file_content = ''.join(docker_file_template_lines)
+ persist_file(docker_file_content, os.path.join(self.cur_microservice_path, 'Dockerfile'))
- self.write_config_yml(microservice_name, MICROSERVICE_FOLDER_v1)
+ self.write_config_yml(self.microservice_name, self.cur_microservice_path)
print('\nFirst version of the microservice generated. Start iterating on it to make the tests pass...')
+
+ def add_missing_imports_post_process_fn(self, content_dict: dict):
+ for file_name, file_content in content_dict.items():
+ file_content = self.add_missing_imports_for_file(file_content)
+ content_dict[file_name] = file_content
+ return content_dict
+
+ def add_missing_imports_for_file(self, file_content):
+ for indicator, import_statement in INDICATOR_TO_IMPORT_STATEMENT.items():
+ if indicator in file_content and import_statement not in file_content:
+ file_content = f'{import_statement}\n{file_content}'
+ return file_content
+
@staticmethod
def read_docker_template():
with open(os.path.join(os.path.dirname(__file__), 'static_files', 'microservice', 'Dockerfile'), 'r', encoding='utf-8') as f:
@@ -245,15 +297,18 @@ pytest
{os.linesep.join(lines)}'''
return {REQUIREMENTS_FILE_NAME: content_modified}
- def generate_playground(self, microservice_name, microservice_path):
+ def generate_playground(self):
print_colored('', '\n\n############# Playground #############', 'blue')
- file_name_to_content = get_all_microservice_files_with_content(microservice_path)
+ with open(os.path.join(os.path.dirname(__file__), 'static_files', 'gateway', 'app_template.py'), 'r', encoding='utf-8') as f:
+ playground_template = f.read()
+ file_name_to_content = get_all_microservice_files_with_content(self.cur_microservice_path)
conversation = self.gpt_session.get_conversation()
conversation.chat(
template_generate_playground.format(
- code_files_wrapped=self.files_to_string(file_name_to_content, ['test_microservice.py']),
- microservice_name=microservice_name,
+ code_files_wrapped=self.files_to_string(file_name_to_content, ['test_microservice.py', 'microservice.py']),
+ microservice_name=self.microservice_name,
+ playground_template=playground_template,
)
)
playground_content_raw = conversation.chat(
@@ -269,13 +324,14 @@ pytest
playground_content = self.extract_content_from_result(
content_raw, 'app.py', match_single_block=True
)
+ playground_content = self.add_missing_imports_for_file(playground_content)
- gateway_path = os.path.join(microservice_path, 'gateway')
+ gateway_path = os.path.join(self.cur_microservice_path, 'gateway')
shutil.copytree(os.path.join(os.path.dirname(__file__), 'static_files', 'gateway'), gateway_path)
persist_file(playground_content, os.path.join(gateway_path, 'app.py'))
# fill-in name of microservice
- gateway_name = f'Gateway{microservice_name}'
+ gateway_name = f'Gateway{self.microservice_name}'
custom_gateway_path = os.path.join(gateway_path, 'custom_gateway.py')
with open(custom_gateway_path, 'r', encoding='utf-8') as f:
custom_gateway_content = f.read()
@@ -293,40 +349,41 @@ pytest
print('Final step...')
hubble_log = push_executor(gateway_path)
if not is_executor_in_hub(gateway_name):
- raise Exception(f'{microservice_name} not in hub. Hubble logs: {hubble_log}')
+ raise Exception(f'{self.microservice_name} not in hub. Hubble logs: {hubble_log}')
- def debug_microservice(self, microservice_name, num_approach, packages):
+ def debug_microservice(self, num_approach, packages, self_healing):
for i in range(1, MAX_DEBUGGING_ITERATIONS):
print('Debugging iteration', i)
print('Trying to debug the microservice. Might take a while...')
- previous_microservice_path = get_microservice_path(self.microservice_root_path, microservice_name, packages,
- num_approach, i)
- next_microservice_path = get_microservice_path(self.microservice_root_path, microservice_name, packages,
- num_approach, i + 1)
- clean_requirements_txt(previous_microservice_path)
- log_hubble = push_executor(previous_microservice_path)
+ clean_requirements_txt(self.cur_microservice_path)
+ log_hubble = push_executor(self.cur_microservice_path)
error = process_error_message(log_hubble)
if error:
+ if not self_healing:
+ print(error)
+ raise Exception('Self-healing is disabled. Please fix the error manually.')
print('An error occurred during the build process. Feeding the error back to the assistant...')
- self.do_debug_iteration(error, next_microservice_path, previous_microservice_path)
+ self.previous_microservice_path = self.cur_microservice_path
+ self.cur_microservice_path = get_microservice_path(
+ self.microservice_root_path, self.microservice_name, packages, num_approach, i + 1
+ )
+ os.makedirs(self.cur_microservice_path)
+ self.do_debug_iteration(error)
if i == MAX_DEBUGGING_ITERATIONS - 1:
raise self.MaxDebugTimeReachedException('Could not debug the microservice.')
else:
# at the moment, there can be cases where no error log is extracted but the executor is still not published
# it leads to problems later on when someone tries a run or deployment
- if is_executor_in_hub(microservice_name):
+ if is_executor_in_hub(self.microservice_name):
print('Successfully build microservice.')
break
else:
- raise Exception(f'{microservice_name} not in hub. Hubble logs: {log_hubble}')
+ raise Exception(f'{self.microservice_name} not in hub. Hubble logs: {log_hubble}')
- return get_microservice_path(self.microservice_root_path, microservice_name, packages, num_approach, i)
-
- def do_debug_iteration(self, error, next_microservice_path, previous_microservice_path):
- os.makedirs(next_microservice_path)
- file_name_to_content = get_all_microservice_files_with_content(previous_microservice_path)
+ def do_debug_iteration(self, error):
+ file_name_to_content = get_all_microservice_files_with_content(self.previous_microservice_path)
for file_name, content in file_name_to_content.items():
- persist_file(content, os.path.join(next_microservice_path, file_name))
+ persist_file(content, os.path.join(self.cur_microservice_path, file_name))
summarized_error = self.summarize_error(error)
dock_req_string = self.files_to_string({
@@ -339,7 +396,6 @@ pytest
self.generate_and_persist_file(
section_title='Debugging apt-get dependency issue',
template=template_solve_apt_get_dependency_issue,
- destination_folder=next_microservice_path,
file_name_s=['apt-get-packages.json'],
parse_result_fn=self.parse_result_fn_dockerfile,
summarized_error=summarized_error,
@@ -352,24 +408,86 @@ pytest
self.generate_and_persist_file(
section_title='Debugging pip dependency issue',
template=template_solve_pip_dependency_issue,
- destination_folder=next_microservice_path,
file_name_s=[REQUIREMENTS_FILE_NAME],
summarized_error=summarized_error,
all_files_string=dock_req_string,
)
else:
+ all_files_string = self.files_to_string(
+ {key: val for key, val in file_name_to_content.items() if key != EXECUTOR_FILE_NAME}
+ )
+
+ suggested_solution = self.generate_solution_suggestion(summarized_error, all_files_string)
+
self.generate_and_persist_file(
- section_title='Debugging code issue',
- template=template_solve_code_issue,
- destination_folder=next_microservice_path,
+ section_title='Implementing suggestion solution for code issue',
+ template=template_implement_solution_code_issue,
file_name_s=[IMPLEMENTATION_FILE_NAME, TEST_EXECUTOR_FILE_NAME, REQUIREMENTS_FILE_NAME],
summarized_error=summarized_error,
task_description=self.microservice_specification.task,
test_description=self.microservice_specification.test,
- all_files_string=self.files_to_string(
- {key: val for key, val in file_name_to_content.items() if key != EXECUTOR_FILE_NAME}),
+ all_files_string=all_files_string,
+ suggested_solution=suggested_solution,
)
+ self.previous_errors.append(summarized_error)
+ self.previous_solutions.append(suggested_solution)
+
+ def generate_solution_suggestion(self, summarized_error, all_files_string):
+ suggested_solutions = json.loads(
+ self.generate_and_persist_file(
+ section_title='Suggest solution for code issue',
+ template=template_suggest_solutions_code_issue,
+ file_name_s=['solutions.json'],
+ summarized_error=summarized_error,
+ task_description=self.microservice_specification.task,
+ test_description=self.microservice_specification.test,
+ all_files_string=all_files_string,
+ response_format_example=response_format_suggest_solutions,
+ )['solutions.json']
+ )
+
+ if len(self.previous_errors) > 0:
+ was_error_seen_before = json.loads(
+ self.generate_and_persist_file(
+ section_title='Check if error was seen before',
+ template=template_was_error_seen_before,
+ file_name_s=['was_error_seen_before.json'],
+ summarized_error=summarized_error,
+ previous_errors='- "' + f'"{os.linesep}- "'.join(self.previous_errors) + '"',
+ use_custom_system_message=False,
+ response_format_example=response_format_was_error_seen_before,
+ )['was_error_seen_before.json']
+ )['was_error_seen_before'].lower() == 'yes'
+
+ suggested_solution = None
+ if was_error_seen_before:
+ for _num_solution in range(1, len(suggested_solutions) + 1):
+ _suggested_solution = suggested_solutions[str(_num_solution)]
+ was_solution_tried_before = json.loads(
+ self.generate_and_persist_file(
+ section_title='Check if solution was tried before',
+ template=template_was_solution_tried_before,
+ file_name_s=['will_lead_to_different_actions.json'],
+ tried_solutions='- "' + f'"{os.linesep}- "'.join(self.previous_solutions) + '"',
+ suggested_solution=_suggested_solution,
+ use_custom_system_message=False,
+ response_format_example=response_format_was_solution_tried_before,
+ )['will_lead_to_different_actions.json']
+ )['will_lead_to_different_actions'].lower() == 'no'
+ if not was_solution_tried_before:
+ suggested_solution = _suggested_solution
+ break
+ else:
+ suggested_solution = suggested_solutions['1']
+
+ if suggested_solution is None:
+ suggested_solution = f"solve error: {summarized_error}"
+ else:
+ suggested_solution = suggested_solutions['1']
+
+ return suggested_solution
+
class MaxDebugTimeReachedException(BaseException):
pass
@@ -413,7 +531,7 @@ pytest
description=self.microservice_specification.task
)['strategies.json']
packages_list = [[pkg.strip().lower() for pkg in packages] for packages in json.loads(packages_json_string)]
- packages_list = [[self.replace_with_gpt_3_5_turbo_if_possible(pkg) for pkg in packages] for packages in
+ packages_list = [[self.replace_with_tool_if_possible(pkg) for pkg in packages] for packages in
packages_list]
packages_list = self.filter_packages_list(packages_list)
@@ -423,16 +541,16 @@ pytest
# '/private/var/folders/f5/whmffl4d7q79s29jpyb6719m0000gn/T/pytest-of-florianhonicke/pytest-128/test_generation_level_0_mock_i0'
# '/private/var/folders/f5/whmffl4d7q79s29jpyb6719m0000gn/T/pytest-of-florianhonicke/pytest-129/test_generation_level_0_mock_i0'
def generate(self):
- self.microservice_specification.task, self.microservice_specification.test = PM().refine_specification(self.microservice_specification.task)
os.makedirs(self.microservice_root_path)
+ self.microservice_specification.task, self.microservice_specification.test = PM().refine_specification(self.microservice_specification.task)
generated_name = self.generate_microservice_name(self.microservice_specification.task)
- microservice_name = f'{generated_name}{random.randint(0, 10_000_000)}'
+ self.microservice_name = f'{generated_name}{random.randint(0, 10_000_000)}'
packages_list = self.get_possible_packages()
for num_approach, packages in enumerate(packages_list):
try:
- self.generate_microservice(microservice_name, packages, num_approach)
- final_version_path = self.debug_microservice(microservice_name, num_approach, packages)
- self.generate_playground(microservice_name, final_version_path)
+ self.generate_microservice(packages, num_approach)
+ self.debug_microservice(num_approach, packages, self.self_healing)
+ self.generate_playground()
except self.MaxDebugTimeReachedException:
print('Could not debug the Microservice with the approach:', packages)
if num_approach == len(packages_list) - 1:
@@ -457,9 +575,11 @@ You can now run or deploy your microservice:
@staticmethod
- def replace_with_gpt_3_5_turbo_if_possible(pkg):
+ def replace_with_tool_if_possible(pkg):
if pkg in LANGUAGE_PACKAGES:
return 'gpt_3_5_turbo'
+ if pkg in SEARCH_PACKAGES:
+ return 'google_custom_search'
return pkg
@staticmethod
diff --git a/dev_gpt/options/generate/pm/pm.py b/dev_gpt/options/generate/pm/pm.py
index 7f3dfa5..83a3d98 100644
--- a/dev_gpt/options/generate/pm/pm.py
+++ b/dev_gpt/options/generate/pm/pm.py
@@ -5,7 +5,8 @@ from dev_gpt.options.generate.chains.question_answering import is_question_true
from dev_gpt.options.generate.chains.translation import translation
from dev_gpt.options.generate.chains.user_confirmation_feedback_loop import user_feedback_loop
from dev_gpt.options.generate.chains.get_user_input_if_needed import get_user_input_if_needed
-from dev_gpt.options.generate.parser import identity_parser
+from dev_gpt.options.generate.parser import identity_parser, json_parser
+from dev_gpt.options.generate.pm.task_tree_schema import TaskTree
from dev_gpt.options.generate.prompt_factory import make_prompt_friendly
from dev_gpt.options.generate.ui import get_random_employee
@@ -35,9 +36,9 @@ Description of the microservice:
def refine(self, microservice_description):
microservice_description, test_description = self.refine_description(microservice_description)
- return microservice_description, test_description
- # sub_task_tree = self.construct_sub_task_tree(microservice_description)
+ # sub_task_tree = construct_sub_task_tree(microservice_description)
# return sub_task_tree
+ return microservice_description, test_description
def refine_description(self, microservice_description):
context = {'microservice_description': microservice_description}
@@ -60,8 +61,9 @@ Description of the microservice:
microservice_description += self.user_input_extension_if_needed(
context,
microservice_description,
- condition_question='Does the microservice send requests to an API?',
- question_gen='Generate a question that asks for the endpoint and an example of a request and response when interacting with the external API.',
+ condition_question='''\
+Does the microservice send requests to an API beside the Google Custom Search API and gpt-3.5-turbo?''',
+ question_gen='Generate a question that asks for the endpoint of the external API and an example of a request and response when interacting with the external API.',
extension_name='Example of API usage',
post_transformation_fn=translation(from_format='api instruction', to_format='python code snippet raw without formatting')
)
@@ -127,44 +129,44 @@ Example:
# microservice_description=microservice_description
# )
#
-# def construct_sub_task_tree(self, microservice_description):
-# """
-# takes a microservice description and recursively constructs a tree of sub-tasks that need to be done to implement the microservice
-# """
-# #
-# # nlp_fns = self.get_nlp_fns(
-# # microservice_description
-# # )
-#
-# sub_task_tree_dict = ask_gpt(
-# construct_sub_task_tree_prompt, json_parser,
-# microservice_description=microservice_description,
-# # nlp_fns=nlp_fns
-# )
-# reflections = ask_gpt(
-# sub_task_tree_reflections_prompt, identity_parser,
-# microservice_description=microservice_description,
-# # nlp_fns=nlp_fns,
-# sub_task_tree=sub_task_tree_dict,
-# )
-# solutions = ask_gpt(
-# sub_task_tree_solutions_prompt, identity_parser,
-# # nlp_fns=nlp_fns,
-# microservice_description=microservice_description, sub_task_tree=sub_task_tree_dict,
-# reflections=reflections,
-# )
-# sub_task_tree_updated = ask_gpt(
-# sub_task_tree_update_prompt,
-# json_parser,
-# microservice_description=microservice_description,
-# # nlp_fns=nlp_fns,
-# sub_task_tree=sub_task_tree_dict, solutions=solutions
-# )
-# # for task_dict in self.iterate_over_sub_tasks(sub_task_tree_updated):
-# # task_dict.update(self.get_additional_task_info(task_dict['task']))
-#
-# sub_task_tree = TaskTree.parse_obj(sub_task_tree_updated)
-# return sub_task_tree
+def construct_sub_task_tree(self, microservice_description):
+ """
+ takes a microservice description and recursively constructs a tree of sub-tasks that need to be done to implement the microservice
+ """
+ #
+ # nlp_fns = self.get_nlp_fns(
+ # microservice_description
+ # )
+
+ sub_task_tree_dict = ask_gpt(
+ construct_sub_task_tree_prompt, json_parser,
+ microservice_description=microservice_description,
+ # nlp_fns=nlp_fns
+ )
+ reflections = ask_gpt(
+ sub_task_tree_reflections_prompt, identity_parser,
+ microservice_description=microservice_description,
+ # nlp_fns=nlp_fns,
+ sub_task_tree=sub_task_tree_dict,
+ )
+ solutions = ask_gpt(
+ sub_task_tree_solutions_prompt, identity_parser,
+ # nlp_fns=nlp_fns,
+ microservice_description=microservice_description, sub_task_tree=sub_task_tree_dict,
+ reflections=reflections,
+ )
+ sub_task_tree_updated = ask_gpt(
+ sub_task_tree_update_prompt,
+ json_parser,
+ microservice_description=microservice_description,
+ # nlp_fns=nlp_fns,
+ sub_task_tree=sub_task_tree_dict, solutions=solutions
+ )
+ # for task_dict in self.iterate_over_sub_tasks(sub_task_tree_updated):
+ # task_dict.update(self.get_additional_task_info(task_dict['task']))
+
+ sub_task_tree = TaskTree.parse_obj(sub_task_tree_updated)
+ return sub_task_tree
# def get_additional_task_info(self, sub_task_description):
# additional_info_dict = self.get_additional_infos(
@@ -280,71 +282,71 @@ Example:
# Note: You must ignore facts that are unknown.
# Note: You must ignore facts that are unclear.'''
-# construct_sub_task_tree_prompt = client_description + '''
-# Recursively constructs a tree of functions that need to be implemented for the endpoint_function that retrieves a json string and returns a json string.
-# Example:
-# Input: "Input: list of integers, Output: Audio file of short story where each number is mentioned exactly once."
-# Output:
-# {{
-# "description": "Create an audio file containing a short story in which each integer from the provided list is seamlessly incorporated, ensuring that every integer is mentioned exactly once.",
-# "python_fn_signature": "def generate_integer_story_audio(numbers: List[int]) -> str:",
-# "sub_fns": [
-# {{
-# "description": "Generate sentence from integer.",
-# "python_fn_signature": "def generate_sentence_from_integer(number: int) -> int:",
-# "sub_fns": []
-# }},
-# {{
-# "description": "Convert the story into an audio file.",
-# "python_fn_signature": "def convert_story_to_audio(story: str) -> bytes:",
-# "sub_fns": []
-# }}
-# ]
-# }}
-#
-# Note: you must only output the json string - nothing else.
-# Note: you must pretty print the json string.'''
+construct_sub_task_tree_prompt = client_description + '''
+Recursively constructs a tree of functions that need to be implemented for the endpoint_function that retrieves a json string and returns a json string.
+Example:
+Input: "Input: list of integers, Output: Audio file of short story where each number is mentioned exactly once."
+Output:
+{{
+ "description": "Create an audio file containing a short story in which each integer from the provided list is seamlessly incorporated, ensuring that every integer is mentioned exactly once.",
+ "python_fn_signature": "def generate_integer_story_audio(numbers: List[int]) -> str:",
+ "sub_fns": [
+ {{
+ "description": "Generate sentence from integer.",
+ "python_fn_signature": "def generate_sentence_from_integer(number: int) -> int:",
+ "sub_fns": []
+ }},
+ {{
+ "description": "Convert the story into an audio file.",
+ "python_fn_signature": "def convert_story_to_audio(story: str) -> bytes:",
+ "sub_fns": []
+ }}
+ ]
+}}
-# sub_task_tree_reflections_prompt = client_description + '''
-# Sub task tree:
-# ```
-# {sub_task_tree}
-# ```
-# Write down 3 arguments why the sub task tree might not perfectly represents the information mentioned in the microservice description. (5 words per argument)'''
-#
-# sub_task_tree_solutions_prompt = client_description + '''
-# Sub task tree:
-# ```
-# {sub_task_tree}
-# ```
-# Reflections:
-# ```
-# {reflections}
-# ```
-# For each constructive criticism, write a solution (5 words) that address the criticism.'''
-#
-# sub_task_tree_update_prompt = client_description + '''
-# Sub task tree:
-# ```
-# {sub_task_tree}
-# ```
-# Solutions:
-# ```
-# {solutions}
-# ```
-# Update the sub task tree by applying the solutions. (pretty print the json string)'''
-#
-# ask_questions_prompt = client_description + '''
-# Request json schema:
-# ```
-# {request_schema}
-# ```
-# Response json schema:
-# ```
-# {response_schema}
-# ```
-# Ask the user up to 5 unique detailed questions (5 words) about the microservice description that are not yet answered.
-# '''
+Note: you must only output the json string - nothing else.
+Note: you must pretty print the json string.'''
+
+sub_task_tree_reflections_prompt = client_description + '''
+Sub task tree:
+```
+{sub_task_tree}
+```
+Write down 3 arguments why the sub task tree might not perfectly represents the information mentioned in the microservice description. (5 words per argument)'''
+
+sub_task_tree_solutions_prompt = client_description + '''
+Sub task tree:
+```
+{sub_task_tree}
+```
+Reflections:
+```
+{reflections}
+```
+For each constructive criticism, write a solution (5 words) that address the criticism.'''
+
+sub_task_tree_update_prompt = client_description + '''
+Sub task tree:
+```
+{sub_task_tree}
+```
+Solutions:
+```
+{solutions}
+```
+Update the sub task tree by applying the solutions. (pretty print the json string)'''
+
+ask_questions_prompt = client_description + '''
+Request json schema:
+```
+{request_schema}
+```
+Response json schema:
+```
+{response_schema}
+```
+Ask the user up to 5 unique detailed questions (5 words) about the microservice description that are not yet answered.
+'''
# answer_questions_prompt = client_description + '''
# Request json schema:
diff --git a/dev_gpt/options/generate/pm/task_tree_schema.py b/dev_gpt/options/generate/pm/task_tree_schema.py
index 41035fc..39f3518 100644
--- a/dev_gpt/options/generate/pm/task_tree_schema.py
+++ b/dev_gpt/options/generate/pm/task_tree_schema.py
@@ -1,22 +1,22 @@
-# from typing import Dict, List, Union, Optional
-# from pydantic import BaseModel, Field
-#
-# class JSONSchema(BaseModel):
-# type: str
-# format: Union[str, None] = None
-# items: Union['JSONSchema', None] = None
-# properties: Dict[str, 'JSONSchema'] = Field(default_factory=dict)
-# additionalProperties: Union[bool, 'JSONSchema'] = True
-# required: List[str] = Field(default_factory=list)
-#
-# class Config:
-# arbitrary_types_allowed = True
-#
-# class TaskTree(BaseModel):
-# description: Optional[str]
-# python_fn_signature: str
-# sub_fns: List['TaskTree']
-#
-# JSONSchema.update_forward_refs()
-# TaskTree.update_forward_refs()
+from typing import Dict, List, Union, Optional
+from pydantic import BaseModel, Field
+
+class JSONSchema(BaseModel):
+ type: str
+ format: Union[str, None] = None
+ items: Union['JSONSchema', None] = None
+ properties: Dict[str, 'JSONSchema'] = Field(default_factory=dict)
+ additionalProperties: Union[bool, 'JSONSchema'] = True
+ required: List[str] = Field(default_factory=list)
+
+ class Config:
+ arbitrary_types_allowed = True
+
+class TaskTree(BaseModel):
+ description: Optional[str]
+ python_fn_signature: str
+ sub_fns: List['TaskTree']
+
+JSONSchema.update_forward_refs()
+TaskTree.update_forward_refs()
#
diff --git a/dev_gpt/options/generate/static_files/gateway/app_template.py b/dev_gpt/options/generate/static_files/gateway/app_template.py
new file mode 100644
index 0000000..a9ec3cd
--- /dev/null
+++ b/dev_gpt/options/generate/static_files/gateway/app_template.py
@@ -0,0 +1,53 @@
+import json
+import os
+
+import streamlit as st
+from jina import Client, Document, DocumentArray
+import io
+
+st.set_page_config(
+ page_title="",
+ page_icon="",
+ layout="centered",
+ initial_sidebar_state="auto",
+)
+
+st.title(" ")
+st.markdown(
+ "<10 word description here>"
+ "To generate and deploy your own microservice, click [here](https://github.com/jina-ai/dev-gpt)."
+)
+st.subheader(" ") # only if input parameters are needed
+with st.form(key="input_form"):
+ #
+ input_json_dict = {} #
+
+ input_json_dict_string = json.dumps(input_json_dict)
+ submitted = st.form_submit_button("")
+
+# Process input and call microservice
+if submitted:
+ with st.spinner("..."):
+ client = Client(host="http://localhost:8080")
+ d = Document(text=input_json_dict_string)
+ response = client.post("/", inputs=DocumentArray([d]))
+
+ output_data = json.loads(response[0].text)
+ #
+
+# Display curl command
+deployment_id = os.environ.get("K8S_NAMESPACE_NAME", "")
+api_endpoint = (
+ f"https://dev-gpt-{deployment_id.split('-')[1]}.wolf.jina.ai/post"
+ if deployment_id
+ else "http://localhost:8080/post"
+)
+
+with st.expander("See curl command"):
+ st.markdown("You can use the following curl command to send a request to the microservice from the command line:")
+ escaped_input_json_dict_string = input_json_dict_string.replace('"', '\\"')
+
+ st.code(
+ f'curl -X "POST" "{api_endpoint}" -H "accept: application/json" -H "Content-Type: application/json" -d \'{{"data": [{{"text": "{escaped_input_json_dict_string}"}}]}}\'',
+ language="bash",
+ )
diff --git a/dev_gpt/options/generate/static_files/gateway/custom_gateway.py b/dev_gpt/options/generate/static_files/gateway/custom_gateway.py
index 766a6c4..c84e9f9 100644
--- a/dev_gpt/options/generate/static_files/gateway/custom_gateway.py
+++ b/dev_gpt/options/generate/static_files/gateway/custom_gateway.py
@@ -79,6 +79,7 @@ class CustomGateway(CompositeGateway):
f'Please, let http port ({http_port}) be 8080 for nginx to work'
)
kwargs['runtime_args']['port'][http_idx] = 8082
+ kwargs['cors'] = True
super().__init__(**kwargs)
# remove potential clashing arguments from kwargs
diff --git a/dev_gpt/options/generate/static_files/gateway/requirements.txt b/dev_gpt/options/generate/static_files/gateway/requirements.txt
index 91d328c..a5ab956 100644
--- a/dev_gpt/options/generate/static_files/gateway/requirements.txt
+++ b/dev_gpt/options/generate/static_files/gateway/requirements.txt
@@ -1,3 +1,4 @@
streamlit==1.16.0
+altair==4.2.2
extra-streamlit-components==0.1.55
jina==3.15.1.dev14
\ No newline at end of file
diff --git a/dev_gpt/options/generate/static_files/microservice/google_custom_search.py b/dev_gpt/options/generate/static_files/microservice/google_custom_search.py
new file mode 100644
index 0000000..f112129
--- /dev/null
+++ b/dev_gpt/options/generate/static_files/microservice/google_custom_search.py
@@ -0,0 +1,28 @@
+import os
+from typing import Optional
+
+import requests
+
+
+def google_search(search_term, search_type, top_n):
+ google_api_key: Optional[str] = os.environ['GOOGLE_API_KEY']
+ google_cse_id: Optional[str] = os.environ['GOOGLE_CSE_ID']
+ url = "https://www.googleapis.com/customsearch/v1"
+ params = {
+ 'q': search_term,
+ 'key': google_api_key,
+ 'cx': google_cse_id,
+ **({'searchType': search_type} if search_type == 'image' else {}),
+ 'num': top_n
+ }
+ response = requests.get(url, params=params)
+ response.raise_for_status()
+ return response.json()
+
+def search_images(search_term, top_n):
+ response = google_search(search_term, search_type="image", top_n=top_n)
+ return [item["link"] for item in response["items"]]
+
+def search_web(search_term, top_n):
+ response = google_search(search_term, search_type="web", top_n=top_n)
+ return [item["snippet"] for item in response["items"]]
diff --git a/dev_gpt/options/generate/static_files/microservice/gpt_3_5_turbo.py b/dev_gpt/options/generate/static_files/microservice/gpt_3_5_turbo.py
new file mode 100644
index 0000000..8b618ef
--- /dev/null
+++ b/dev_gpt/options/generate/static_files/microservice/gpt_3_5_turbo.py
@@ -0,0 +1,24 @@
+import os
+import openai
+
+
+openai.api_key = os.getenv("OPENAI_API_KEY")
+
+
+class GPT_3_5_Turbo:
+ def __init__(self, system_string: str = ''):
+ self.system = system_string
+
+ def __call__(self, prompt_string: str) -> str:
+ response = openai.ChatCompletion.create(
+ model="gpt-3.5-turbo",
+ messages=[{
+ "role": 'system',
+ "content": self.system
+ }, {
+ "role": 'user',
+ "content": prompt_string
+ }]
+ )
+ return response.choices[0]['message']['content']
+
diff --git a/dev_gpt/options/generate/templates_user.py b/dev_gpt/options/generate/templates_user.py
index ed31362..47442e8 100644
--- a/dev_gpt/options/generate/templates_user.py
+++ b/dev_gpt/options/generate/templates_user.py
@@ -16,7 +16,8 @@ The Dockerfile must not attach a virtual display when running test_microservice.
not_allowed_function_string = '''The implemented function and the test must not use the GPU.
The implemented function and the test must not access a database.
The implemented function and the test must not access a display.
-The implemented function and the test must not access external apis except unless it is explicitly mentioned in the description or test case (e.g. by mentioning the api that should be used or by providing a URL to access the data).
+The implemented function and the test must not access external apis unless it is explicitly mentioned.
+The implemented function and the test must not be based on a large collection of hard-coded strings.
The implemented function and the test must not open files from the local file system unless it was created by the implemented function itself.
The implemented function and the test must not use a pre-trained model unless it is explicitly mentioned in the description.
The implemented function and the test must not train a model.
@@ -37,10 +38,15 @@ The executor name must fulfill the following criteria:
- only consists of lower and upper case characters
- end with Executor.
-The output is a the raw string wrapped into ``` and starting with **name.txt** like this:
+Your response must exactly match the following block code format (double asterisks for the file name and triple backticks for the file block):
**name.txt**
```
-PDFParserExecutor
+
+```
+Example for: "Get a png as input and return a vectorized version as svg.":
+**name.txt**
+```
+PngToSvgExecutor
```'''
)
@@ -62,7 +68,7 @@ e) the implementation of the core problem using the package would obey the follo
When answering, just write "yes" or "no".
4. For each approach, list the required python package combinations as discibed in the following.
-You must output the package combinations as json wrapped into tripple backticks ``` and name it **strategies.json**. \
+You must output the package combinations as json wrapped into triple backticks ``` and name it **strategies.json**. \
Note that you can also leave a list empty to indicate that one of the strategies does not require any package and can be done in plain python.
Write the output using double asterisks and triple backticks like this:
**strategies.json**
@@ -78,56 +84,79 @@ Write the output using double asterisks and triple backticks like this:
template_code_wrapping_string = '''The code will go into {file_name_purpose}.
-Note that you must obey the double asterisk and tripple backtick syntax from like this:
+Note that you must obey the double asterisk and triple backtick syntax from like this:
**{file_name}**
```{tag_name}
...code...
```
-You must provide the complete file with the exact same syntax to wrap the code.'''
+You must provide the complete {file_name} wrapped with the exact syntax shown above.'''
-gpt_35_turbo_usage_string = """If need to use gpt_3_5_turbo, then this is an example on how to use it:
+gpt_35_turbo_usage_string = """If you need to use gpt_3_5_turbo, then use it like shown in the following example:
```
-from .apis import GPT_3_5_Turbo
+from .gpt_3_5_turbo import GPT_3_5_Turbo
gpt_3_5_turbo = GPT_3_5_Turbo(
- system=\'\'\'
+ system_string=\'\'\'
You are a tv-reporter who is specialized in C-list celebrities.
-When you get asked something like 'Who was having a date with ?', then you answer with a json like '{{"dates": ["", ""]}}'.
+When you get asked something like 'Who was having a date with ?', then you answer with a string like ", were having a date with "'.
You must not answer something else - only the json.
\'\'\')
-generated_string = gpt(prompt) # fill-in the prompt (str); the output is a string
+generated_string = gpt_3_5_turbo(prompt_string="example user prompt") # prompt_string is the only parameter
```
"""
+google_custom_search_usage_string = """If you need to use google_custom_search, then use it like shown in the following example:
+a) when searching for text:
+```
+from .google_custom_search import search_web
-template_generate_function = PromptTemplate.from_template(
- general_guidelines_string + '''
+# input: search term (str), top_n (int)
+# output: list of strings
+string_list = search_web('', top_n=10)
+```
+b) when searching for images:
+```
+from .google_custom_search import search_images
+
+# input: search term (str), top_n (int)
+# output: list of image urls
+image_url_list = search_images('', top_n=10)
+```
+"""
+
+linebreak = '\n'
+def template_generate_function_constructor(is_using_gpt_3_5_turbo, is_using_google_custom_search):
+ return PromptTemplate.from_template(
+ general_guidelines_string + f'''
Write a python function which receives as \
-input json string (that can be parsed with the python function json.loads) and \
-outputs a json string (that can be parsed with the python function json.loads). \
-The function is called 'func'.
-The function must fulfill the following description: '{microservice_description}'.
-It will be tested with the following scenario: '{test_description}'.
-For the implementation use the following package(s): '{packages}'.
+input json dictionary string (that can be parsed with the python function json.loads) and \
+outputs a json dictionary string (that can be parsed with the python function json.loads). \
+The function is called 'func' and has the following signature:
+def func(input_json_dict_string: str) -> str:
+The function must fulfill the following description: '{{microservice_description}}'.
+It will be tested with the following scenario: '{{test_description}}'.
+For the implementation use the following package(s): '{{packages}}'.
The code must start with the following imports:
-```
-from .apis import GPT_3_5_Turbo
+```{linebreak +'from .gpt_3_5_turbo import GPT_3_5_Turbo' if is_using_gpt_3_5_turbo else ""}{linebreak + 'from .google_custom_search import search_web, search_images' if is_using_google_custom_search else ""}
import json
+import requests
```
Obey the following rules:
-''' + not_allowed_function_string + '''
+{not_allowed_function_string}
Your approach:
1. Identify the core challenge when implementing the function.
2. Think about solutions for these challenges.
3. Decide for one of the solutions.
4. Write the code for the function. Don't write code for the test.
-''' + gpt_35_turbo_usage_string + '\n' + template_code_wrapping_string
-)
+{gpt_35_turbo_usage_string if is_using_gpt_3_5_turbo else ''}
+{google_custom_search_usage_string if is_using_google_custom_search else ''}
+{template_code_wrapping_string}'''
+ )
template_generate_test = PromptTemplate.from_template(
@@ -142,6 +171,7 @@ The test must start with the following imports:
```
from .microservice import func
import json
+import requests
```
''' + not_allowed_function_string + '''
The test must not open local files.
@@ -206,10 +236,11 @@ The output would be:
template_summarize_error = PromptTemplate.from_template(
- '''Here is an error message I encountered during the docker build process:
+ '''Your task is to condense an error encountered during the docker build process. The error message is as follows:
"{error}"
Your task is to summarize the error message as compact and informative as possible \
while maintaining all information necessary to debug the core issue (100 words).
+It should also provide some additional context regarding the specific file and line number where the error occurred. \
Note that you must not suggest a solution to the error.
Warnings are not worth mentioning.'''
)
@@ -234,7 +265,7 @@ Is this error happening because a PACKAGE_MANAGER package is missing or failed t
```json
{{"dependency_installation_failure": ""}}
```
-Note that you must obey the double asterisk and tripple backtick syntax from above.
+Note that you must obey the double asterisk and triple backtick syntax from above.
'''
)
@@ -294,14 +325,23 @@ The output is:
```json
{{"packages": [libgl1-mesa-glx]}}
```
-Note that you must not output the content of any other files like the Dockerfile or requirements.txt.
-Only output the apt-get-packages.json file.
-Note that the first line you output must be: **apt-get-packages.json**
+Only output content of the apt-get-packages.json file. Ensure the response can be parsed by Python json.loads
+Note: you must not output the content of any other. Especially don't output the Dockerfile or requirements.txt.
+Note: the first line you output must be: **apt-get-packages.json**
'''
)
-template_solve_code_issue = PromptTemplate.from_template(
+response_format_suggest_solutions = '''**solutions.json**
+```json
+{{
+ "1": "",
+ "2": "<2nd best solution>"
+}}
+```'''
+
+
+template_suggest_solutions_code_issue = PromptTemplate.from_template(
'''General rules:
''' + not_allowed_function_string + '''
@@ -317,14 +357,73 @@ Here are all the files I use:
Here is the summary of the error that occurred:
{summarized_error}
-To solve this error, you should:
-1. Suggest 3 to 5 possible solutions on how to solve it. You have no access to the documentation of the package.
-2. Decide for the best solution and explain it in detail.
-3. Write down the files that need to be changed, but not files that don't need to be changed.
-Note that any changes needed to make the test pass must be written under the constraint that ''' + IMPLEMENTATION_FILE_NAME + ''' will be used in a different file as well.
+You should suggest 3 to 5 possible solution approaches on how to solve it.
Obey the following rules:
+Do not implement the solution.
+You have no access to the documentation of the package.
+You must not change the Dockerfile.
+Note that any changes needed to make the test pass must be written under the constraint that ''' + IMPLEMENTATION_FILE_NAME + ''' will be used in a different file as well.
''' + f'{not_allowed_function_string}\n{not_allowed_docker_string}\n{gpt_35_turbo_usage_string}' + '''
+
+After thinking about the possible solutions, output them as JSON ranked from best to worst.
+You must use the following format:
+''' + response_format_suggest_solutions + '''
+Ensure the response starts with **solutions.json** and can be parsed by Python json.loads'''
+)
+
+
+response_format_was_error_seen_before = '''**was_error_seen_before.json**
+```json
+{{"was_error_seen_before": ""}}
+```'''
+
+
+template_was_error_seen_before = PromptTemplate.from_template(
+ '''Previously encountered error messages:
+{previous_errors}
+
+Now encountered error message: "{summarized_error}"
+Was this error message encountered before?
+
+Write down your final answer as json in the following format:
+''' + response_format_was_error_seen_before + '''
+Note that you must obey the double asterisk and triple backtick syntax from above. Ensure the response can be parsed by Python json.loads
+'''
+)
+
+
+response_format_was_solution_tried_before = '''**will_lead_to_different_actions.json**
+```json
+{{"will_lead_to_different_actions": ""}}
+```'''
+
+
+template_was_solution_tried_before = PromptTemplate.from_template(
+ '''Previously tried solutions:
+{tried_solutions}
+
+Suggested solution: "{suggested_solution}"
+
+Will the suggested solution lead to different actions than the previously tried solutions?
+
+Write down your final answer as json in the following format:
+''' + response_format_was_solution_tried_before + '''
+Note that you must obey the double asterisk and triple backtick syntax from above. Ensure the response can be parsed by Python json.loads'''
+)
+
+
+template_implement_solution_code_issue = PromptTemplate.from_template(
+ '''Here is the description of the task the function must solve:
+{task_description}
+
+Here is the test scenario the function must pass:
+{test_description}
+Here are all the files I use:
+{all_files_string}
+
+Implemented the suggested solution: {suggested_solution}
+
Output all the files that need change. You must not change the Dockerfile.
Don't output files that don't need change. If you output a file, then write the complete file.
Use the exact following syntax to wrap the code:
@@ -335,13 +434,12 @@ Use the exact following syntax to wrap the code:
```
Example:
-
-**microservice.py**
+**implementation.py**
```python
import json
-def func(json_input: str) -> str:
- return json_input['img_base64']
+def func(input_json_dict_string: str) -> str:
+ return json.dumps('output_param1': input_json_dict_string['img_base64'])
```'''
)
@@ -351,50 +449,30 @@ template_generate_playground = PromptTemplate.from_template(
{code_files_wrapped}
-Create a playground for the executor {microservice_name} using streamlit.
-The playground must look like it was made by a professional designer.
-All the ui elements are well thought out to make them visually appealing and easy to use.
-Don't mention the word Playground in the title.
-The playground contains many emojis that fit the theme of the playground and has an emoji as favicon.
-The playground encourages the user to deploy their own microservice by clicking on this link: https://github.com/jina-ai/dev-gpt
-The playground uses the following code to send a request to the microservice:
+1. Write down the json request model required by microservice.py.
+2. Generate a playground for the microservice {microservice_name} using the following streamlit template by replacing all the placeholders (<...>) with the correct values:
+**app_template.py**
+```python
+{playground_template}
```
-from jina import Client, Document, DocumentArray
-client = Client(host='http://localhost:8080')
-d = Document(text=json.dumps(INPUT_DICTIONARY)) # fill-in dictionary which takes input
-response = client.post('/', inputs=DocumentArray([d])) # always use '/'
-print(response[0].text) # can also be blob in case of image/audio..., this should be visualized in the streamlit app
-```
-Note that the response will always be in response[0].text
-The playground displays a code block containing the microservice specific curl code that can be used to send the request to the microservice.
-While the exact payload in the curl might change, the host and deployment ID always stay the same. Example:
-```
-deployment_id = os.environ.get("K8S_NAMESPACE_NAME", "")
-host = f'https://dev-gpt-{{deployment_id.split("-")[1]}}.wolf.jina.ai/post' if deployment_id else "http://localhost:8080/post"
-with st.expander("See curl command"):
- st.code(
- f'curl -X \\'POST\\' \\'host\\' -H \\'accept: application/json\\' -H \\'Content-Type: application/json\\' -d \\'{{{{"data": [{{{{"text": "hello, world!"}}}}]}}}}\\'',
- language='bash'
- )
-```
-You must provide the complete app.py file using the following syntax to wrap the code:
+Note: Don't mention the word Playground in the title.
+Most importantly: You must generate the complete app.py file using the following syntax to wrap the code:
**app.py**
```python
...
-```
-The playground (app.py) must always use the host on http://localhost:8080 and must not let the user configure the host on the UI.
-The playground (app.py) must not import the executor.
-'''
+```'''
)
template_chain_of_thought = PromptTemplate.from_template(
- '''First, write down an extensive list of obvious and non-obvious observations about {file_name_purpose} that could need an adjustment. Explain why.
-Think if all the changes are required and finally decide for the changes you want to make, but you are not allowed disregard the instructions in the previous message.
-Be very hesitant to change the code. Only make a change if you are sure that it is necessary.
-
-Output only {file_name_purpose}
-Write the whole content of {file_name_purpose} - even if you decided to change only a small thing or even nothing.
+ '''\
+1. write down an extensive list (5 words per item) of obvious and non-obvious observations about {file_name_purpose} that could need an adjustment.
+2. Explain why. (5 words per item)
+3. Think if all the changes are required
+4. decide for the changes you want to make, but you are not allowed disregard the instructions in the previous message.
+5. Write the whole content of {file_name_purpose} - even if you decided to change only a small thing or even nothing.
+Note: Be very hesitant to change the code. Only make a change if you are sure that it is necessary.
+Note: Output only {file_name_purpose}
''' + '\n' + template_code_wrapping_string + '''
Remember:
@@ -427,7 +505,7 @@ Or write the detailed microservice description all mentioned code samples, docum
}}
```
Note that your response must be either prompt.json or final.json. You must not write both.
-Note that you must obey the double asterisk and tripple backtick syntax from above.
+Note that you must obey the double asterisk and triple backtick syntax from above.
Note that the last sequence of characters in your response must be ``` (triple backtick).
Note that prompt.json must not only contain one question.
Note that if urls, secrets, database names, etc. are mentioned, they must be part of the summary.
@@ -471,7 +549,7 @@ Example for the case where the example is already mentioned in the refined descr
}}
```
Note that your response must be either prompt.json or final.json. You must not write both.
-Note that you must obey the double asterisk and tripple backtick syntax from above.
+Note that you must obey the double asterisk and triple backtick syntax from above.
Note that the last sequence of characters in your response must be ``` (triple backtick).
Note that your response must start with the character sequence ** (double asterisk).
Note that prompt.json must only contain one question.
diff --git a/dev_gpt/options/generate/tools/__init__.py b/dev_gpt/options/generate/tools/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/dev_gpt/options/generate/tools/tools.py b/dev_gpt/options/generate/tools/tools.py
new file mode 100644
index 0000000..02eebda
--- /dev/null
+++ b/dev_gpt/options/generate/tools/tools.py
@@ -0,0 +1,9 @@
+import os
+
+
+def get_available_tools():
+ tools = ['gpt-3.5-turbo (for any kind of text processing like summarization, paraphrasing, etc.)']
+ if os.environ.get('GOOGLE_API_KEY') and os.environ.get('GOOGLE_CSE_ID'):
+ tools.append('Google Custom Search API')
+ chars = 'abcdefghijklmnopqrstuvwxyz'
+ return '\n'.join([f'{char}) {tool}' for tool, char in zip(tools, chars)])
\ No newline at end of file
diff --git a/examples/rainbow_tweet/README.md b/examples/rainbow_tweet/README.md
new file mode 100644
index 0000000..914d3f5
--- /dev/null
+++ b/examples/rainbow_tweet/README.md
@@ -0,0 +1,2 @@
+Plugin that lets you convert a negative tweet into a positive one.
+
\ No newline at end of file
diff --git a/examples/rainbow_tweet/chrome_extension/button-icon.svg b/examples/rainbow_tweet/chrome_extension/button-icon.svg
new file mode 100644
index 0000000..bec00b5
--- /dev/null
+++ b/examples/rainbow_tweet/chrome_extension/button-icon.svg
@@ -0,0 +1,79 @@
+
+
+
\ No newline at end of file
diff --git a/examples/rainbow_tweet/chrome_extension/content.js b/examples/rainbow_tweet/chrome_extension/content.js
new file mode 100644
index 0000000..1444931
--- /dev/null
+++ b/examples/rainbow_tweet/chrome_extension/content.js
@@ -0,0 +1,83 @@
+console.log('Twitter Rewrite: Content script loaded');
+let openai_api_key = '';
+
+// Get OPENAI_API_KEY from chrome storage
+chrome.storage.sync.get({
+ openai_api_key: ''
+}, function(items) {
+ openai_api_key = items.openai_api_key;
+});
+
+let observer = new MutationObserver((mutations) => {
+ console.log('Twitter Rewrite: DOM mutation detected');
+ // For each mutation
+ mutations.forEach((mutation) => {
+ // If nodes were added
+ if (mutation.addedNodes) {
+ mutation.addedNodes.forEach((node) => {
+ // If the added node (or its descendants) contains a tweet
+ let tweets = node.querySelectorAll('[data-testid="tweet"]');
+ tweets.forEach((tweet) => {
+ // If the tweet doesn't already have a modify button
+ if (!tweet.querySelector('.modify-button')) {
+ // Create new button
+ let button = document.createElement('button');
+ if (openai_api_key === '') {
+ button.innerText = 'Set OPENAI_API_KEY by clicking the Rainbow-Tweet icon and reload the page';
+ button.disabled = true;
+ } else {
+ button.innerText = '🦄';
+ button.disabled = false;
+ }
+ button.className = 'modify-button';
+
+ // Add event listener for button click
+ button.addEventListener('click', function() {
+ let thisButton = this;
+ // Send tweet to API
+ let originalTweet = tweet.querySelector('[data-testid="tweetText"]').innerText;
+ this.disabled = true;
+ this.innerText = 'Loading...';
+ fetch('https://gptdeploy-61694dd6a3.wolf.jina.ai/post', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json',
+ 'accept': 'application/json'
+ },
+ body: JSON.stringify({
+ "data": [{"text": JSON.stringify({
+ "tweet": originalTweet,
+ "OPENAI_API_KEY": openai_api_key
+ }) }]
+ })
+ })
+ .then(response => response.json())
+ .then(data => {
+ let modifiedTweet = JSON.parse(data.data[0].text).positive_tweet;
+ let rainbowTweet = Array.from(modifiedTweet).map((char, i) =>
+ `${char}`
+ ).join('');
+
+ // Create a new element node to contain the HTML
+ let newTweet = document.createElement('span');
+ newTweet.innerHTML = rainbowTweet;
+ // Replace the old text node with the new element node
+ tweet.querySelector('[data-testid="tweetText"]').replaceWith(newTweet);
+ // Remove the button
+ thisButton.remove();
+ });
+ });
+
+ // Find the actions container and inject the button into it
+ let actionGroups = tweet.querySelectorAll('div[role="group"]');
+ let actionsContainer = actionGroups[actionGroups.length - 1];
+ actionsContainer.appendChild(button);
+ }
+ });
+ });
+ }
+ });
+});
+
+// Start observing the document with the configured parameters
+observer.observe(document.body, { childList: true, subtree: true });
diff --git a/examples/rainbow_tweet/chrome_extension/jina.png b/examples/rainbow_tweet/chrome_extension/jina.png
new file mode 100644
index 0000000..6fe6d80
Binary files /dev/null and b/examples/rainbow_tweet/chrome_extension/jina.png differ
diff --git a/examples/rainbow_tweet/chrome_extension/logo.png b/examples/rainbow_tweet/chrome_extension/logo.png
new file mode 100644
index 0000000..803fbea
Binary files /dev/null and b/examples/rainbow_tweet/chrome_extension/logo.png differ
diff --git a/examples/rainbow_tweet/chrome_extension/manifest.json b/examples/rainbow_tweet/chrome_extension/manifest.json
new file mode 100644
index 0000000..2b9be91
--- /dev/null
+++ b/examples/rainbow_tweet/chrome_extension/manifest.json
@@ -0,0 +1,27 @@
+{
+ "manifest_version": 3,
+ "name": "Rainbow-Tweet",
+ "description": "The Rainbow-Tweet plugin allows the user to convert any tweet into positive language by clicking a button on the tweet.",
+ "version": "0.0.1.0",
+ "icons": {
+ "128": "logo.png"
+ },
+ "action": {
+ "default_icon": {
+ "128": "logo.png"
+ },
+ "default_title": "Configure API Key",
+ "default_popup": "popup.html"
+ },
+ "permissions": [
+ "storage"
+ ],
+ "content_scripts": [
+ {
+ "matches": ["https://twitter.com/*"],
+ "js": ["content.js"],
+ "css": ["styles.css"],
+ "run_at": "document_end"
+ }
+ ]
+}
diff --git a/examples/rainbow_tweet/chrome_extension/popup.css b/examples/rainbow_tweet/chrome_extension/popup.css
new file mode 100644
index 0000000..00e57df
--- /dev/null
+++ b/examples/rainbow_tweet/chrome_extension/popup.css
@@ -0,0 +1,37 @@
+body {
+ font-family: Arial, sans-serif;
+}
+
+.container {
+ width: 300px;
+ padding: 20px;
+}
+
+h1 {
+ color: #444;
+}
+
+.btn {
+ color: white;
+ background-color: #1da1f2;
+ border: none;
+ padding: 10px 20px;
+ margin-top: 10px;
+ cursor: pointer;
+}
+.footer {
+ margin-top: 20px;
+ text-align: center;
+}
+.btn:hover {
+ background-color: #0c84d2;
+}
+
+.form-group {
+ margin-bottom: 15px;
+}
+
+.form-text {
+ font-size: 0.875em;
+ color: #6c757d;
+}
diff --git a/examples/rainbow_tweet/chrome_extension/popup.html b/examples/rainbow_tweet/chrome_extension/popup.html
new file mode 100644
index 0000000..f3259fe
--- /dev/null
+++ b/examples/rainbow_tweet/chrome_extension/popup.html
@@ -0,0 +1,31 @@
+
+
+
+ Twitter Rewrite: Extension Options
+
+
+
+
+
Rainbow-Tweet: Extension Options
+
+
+
+
+
+
+
diff --git a/examples/rainbow_tweet/chrome_extension/popup.js b/examples/rainbow_tweet/chrome_extension/popup.js
new file mode 100644
index 0000000..c2f6b68
--- /dev/null
+++ b/examples/rainbow_tweet/chrome_extension/popup.js
@@ -0,0 +1,35 @@
+// Saving options to chrome.storage
+function save_options() {
+ let openai_api_key = document.getElementById('openai_api_key').value;
+ chrome.storage.sync.set({
+ openai_api_key: openai_api_key
+ }, function() {
+ // Update status to let user know options were saved.
+ let status = document.getElementById('status');
+ status.textContent = 'Options saved.';
+ setTimeout(function() {
+ status.textContent = '';
+ }, 750);
+ });
+}
+
+// Restores options from chrome.storage
+function restore_options() {
+ chrome.storage.sync.get({
+ openai_api_key: ''
+ }, function(items) {
+ document.getElementById('openai_api_key').value = items.openai_api_key;
+ });
+}
+
+document.addEventListener('DOMContentLoaded', restore_options);
+document.getElementById('optionForm').addEventListener('submit', function(event) {
+ event.preventDefault();
+ save_options();
+});
+
+
+
+
+
+
diff --git a/examples/rainbow_tweet/chrome_extension/styles.css b/examples/rainbow_tweet/chrome_extension/styles.css
new file mode 100644
index 0000000..bf6f9c9
--- /dev/null
+++ b/examples/rainbow_tweet/chrome_extension/styles.css
@@ -0,0 +1,78 @@
+.modify-button {
+ /* common styles */
+ border: none;
+ padding: 5px 10px;
+ text-align: center;
+ text-decoration: none;
+ display: inline-block;
+ font-size: 16px;
+ margin: 4px 2px;
+ cursor: pointer;
+ border-radius: 3px;
+ background: transparent; /* Make the button transparent */
+}
+
+/* Dark mode */
+@media (prefers-color-scheme: dark) {
+ .modify-button {
+ color: white; /* Light text for dark mode */
+ }
+}
+
+/* Light mode */
+@media (prefers-color-scheme: light) {
+ .modify-button {
+ color: #000; /* Dark text for light mode */
+ }
+}
+
+
+/* Light mode colors (darker) */
+@keyframes rainbow-light {
+ 0% { color: hsl(0, 100%, 30%); }
+ 14% { color: hsl(60, 100%, 30%); }
+ 28% { color: hsl(120, 100%, 30%); }
+ 42% { color: hsl(180, 100%, 30%); }
+ 57% { color: hsl(240, 100%, 30%); }
+ 71% { color: hsl(300, 100%, 30%); }
+ 85% { color: hsl(360, 100%, 30%); }
+ 100% { color: hsl(0, 100%, 30%); }
+}
+
+/* Dark mode colors (brighter) */
+@keyframes rainbow-dark {
+ 0% { color: hsl(0, 100%, 70%); }
+ 14% { color: hsl(60, 100%, 70%); }
+ 28% { color: hsl(120, 100%, 70%); }
+ 42% { color: hsl(180, 100%, 70%); }
+ 57% { color: hsl(240, 100%, 70%); }
+ 71% { color: hsl(300, 100%, 70%); }
+ 85% { color: hsl(360, 100%, 70%); }
+ 100% { color: hsl(0, 100%, 70%); }
+}
+
+/* Apply light mode colors by default */
+.rainbow-text {
+ font-size: 200%;
+ animation: rainbow-light 7s linear infinite;
+ animation-delay: calc(.07s * var(--i));
+}
+
+/* Apply dark mode colors if user prefers dark mode */
+@media (prefers-color-scheme: dark) {
+ .rainbow-text {
+ animation: rainbow-dark 7s linear infinite;
+ animation-delay: calc(.07s * var(--i));
+ }
+}
+
+
+/*!* Rainbow colors for each letter *!*/
+/*!* Rainbow colors for each letter *!*/
+/*.rainbow0 { color: red; background-color: cyan; mix-blend-mode: difference; }*/
+/*.rainbow1 { color: orange; background-color: blue; mix-blend-mode: difference; }*/
+/*.rainbow2 { color: yellow; background-color: purple; mix-blend-mode: difference; }*/
+/*.rainbow3 { color: green; background-color: magenta; mix-blend-mode: difference; }*/
+/*.rainbow4 { color: blue; background-color: orange; mix-blend-mode: difference; }*/
+/*.rainbow5 { color: indigo; background-color: yellow; mix-blend-mode: difference; }*/
+/*.rainbow6 { color: violet; background-color: green; mix-blend-mode: difference; }*/
diff --git a/examples/rainbow_tweet/example_call.bash b/examples/rainbow_tweet/example_call.bash
new file mode 100644
index 0000000..9204f0b
--- /dev/null
+++ b/examples/rainbow_tweet/example_call.bash
@@ -0,0 +1 @@
+curl -X 'POST' 'https://gptdeploy-02e02e4150.wolf.jina.ai/post' -H 'accept: application/json' -H 'Content-Type: application/json' -d '{"data": [{"text": "{\"tweet\":\"today is a bad day i dont like it\"}"}]}'
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/__init__.py b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v1/Dockerfile b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v1/Dockerfile
new file mode 100644
index 0000000..c752b8e
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v1/Dockerfile
@@ -0,0 +1,29 @@
+FROM jinaai/dev-gpt:0.0.6
+
+
+
+RUN apt-get install --no-install-recommends -y
+
+
+
+## install requirements for the executor
+
+COPY requirements.txt .
+
+RUN pip -v install --compile -r requirements.txt
+
+
+
+# setup the workspace
+
+COPY . /workdir/
+
+WORKDIR /workdir
+
+
+
+RUN pytest test_microservice.py
+
+
+
+ENTRYPOINT ["jina", "executor", "--uses", "config.yml"]
\ No newline at end of file
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v1/__init__.py b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v1/__init__.py
new file mode 100644
index 0000000..8a4eb04
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v1/__init__.py
@@ -0,0 +1,15 @@
+from jina import Executor, requests as jina_requests, DocumentArray
+import json
+
+from .microservice import func
+
+
+class PositiveTweetModifierExecutor3163055(Executor):
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ @jina_requests()
+ def endpoint(self, docs: DocumentArray, **kwargs) -> DocumentArray:
+ for d in docs:
+ d.text = func(d.text)
+ return docs
diff --git a/dev_gpt/options/generate/static_files/microservice/apis.py b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v1/apis.py
similarity index 100%
rename from dev_gpt/options/generate/static_files/microservice/apis.py
rename to examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v1/apis.py
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v1/config.yml b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v1/config.yml
new file mode 100644
index 0000000..36e015e
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v1/config.yml
@@ -0,0 +1,5 @@
+jtype: PositiveTweetModifierExecutor3163055
+py_modules:
+ - __init__.py
+metas:
+ name: PositiveTweetModifierExecutor3163055
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v1/microservice.py b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v1/microservice.py
new file mode 100644
index 0000000..8d0b8d1
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v1/microservice.py
@@ -0,0 +1,37 @@
+# This microservice receives an API key for OpenAI (OPENAI_API_KEY) and a tweet containing potentially passive aggressive language as input.
+# It analyzes the input tweet using the OpenAI API to identify passive aggressive language and modifies the language to make it more positive without changing the meaning.
+# The microservice then returns the updated, positive version of the tweet as output.
+
+from .apis import GPT_3_5_Turbo
+import json
+
+
+def func(input_json: str) -> str:
+ # Parse the input JSON string
+ input_data = json.loads(input_json)
+
+ # Extract the OpenAI API key and tweet from the input data
+ openai_api_key = input_data["OPENAI_API_KEY"]
+ tweet = input_data["tweet"]
+
+ # Initialize the GPT-3.5 Turbo API
+ gpt_3_5_turbo = GPT_3_5_Turbo(
+ system=f'''
+You are an AI language model that can modify tweets to make them more positive without changing their meaning.
+When you receive a tweet, you will return a JSON object containing the updated, positive version of the tweet.
+Example:
+Input tweet: "I can't believe you did that. It's so typical of you."
+Output JSON: {{"positive_tweet": "I'm surprised you did that. It's just like you!"}}
+''')
+
+ # Generate the prompt for the GPT-3.5 Turbo API
+ prompt = f"Input tweet: {tweet}"
+
+ # Call the GPT-3.5 Turbo API with the prompt
+ generated_string = gpt_3_5_turbo(prompt)
+
+ # Parse the generated JSON string
+ output_data = json.loads(generated_string)
+
+ # Return the output JSON string
+ return json.dumps(output_data)
\ No newline at end of file
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v1/requirements.txt b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v1/requirements.txt
new file mode 100644
index 0000000..054deb5
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v1/requirements.txt
@@ -0,0 +1,4 @@
+jina==3.15.1.dev14
+docarray==0.21.0
+openai==0.27.5
+pytest
\ No newline at end of file
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v1/test_microservice.py b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v1/test_microservice.py
new file mode 100644
index 0000000..f350248
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v1/test_microservice.py
@@ -0,0 +1,22 @@
+# This test case checks if the output of the microservice is of type 'str' for the positive_tweet property.
+# Since the output of the GPT-3.5 Turbo model is not deterministic, we cannot check for the exact output.
+# Instead, we will test if the output is a valid JSON string and if the 'positive_tweet' property is a string.
+
+from .microservice import func
+import json
+
+def test_positive_tweet_type():
+ # Define the input JSON string
+ input_json = json.dumps({
+ "OPENAI_API_KEY": "",
+ "tweet": "I can't believe you did that. It's so typical of you."
+ })
+
+ # Call the microservice function with the input JSON string
+ output_json = func(input_json)
+
+ # Parse the output JSON string
+ output_data = json.loads(output_json)
+
+ # Check if the 'positive_tweet' property is a string
+ assert isinstance(output_data["positive_tweet"], str)
\ No newline at end of file
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v2/Dockerfile b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v2/Dockerfile
new file mode 100644
index 0000000..c752b8e
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v2/Dockerfile
@@ -0,0 +1,29 @@
+FROM jinaai/dev-gpt:0.0.6
+
+
+
+RUN apt-get install --no-install-recommends -y
+
+
+
+## install requirements for the executor
+
+COPY requirements.txt .
+
+RUN pip -v install --compile -r requirements.txt
+
+
+
+# setup the workspace
+
+COPY . /workdir/
+
+WORKDIR /workdir
+
+
+
+RUN pytest test_microservice.py
+
+
+
+ENTRYPOINT ["jina", "executor", "--uses", "config.yml"]
\ No newline at end of file
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v2/__init__.py b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v2/__init__.py
new file mode 100644
index 0000000..8a4eb04
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v2/__init__.py
@@ -0,0 +1,15 @@
+from jina import Executor, requests as jina_requests, DocumentArray
+import json
+
+from .microservice import func
+
+
+class PositiveTweetModifierExecutor3163055(Executor):
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ @jina_requests()
+ def endpoint(self, docs: DocumentArray, **kwargs) -> DocumentArray:
+ for d in docs:
+ d.text = func(d.text)
+ return docs
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v2/apis.py b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v2/apis.py
new file mode 100644
index 0000000..24dcb01
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v2/apis.py
@@ -0,0 +1,23 @@
+import os
+import openai
+
+
+openai.api_key = os.getenv("OPENAI_API_KEY")
+
+
+class GPT_3_5_Turbo:
+ def __init__(self, system: str = ''):
+ self.system = system
+
+ def __call__(self, prompt: str) -> str:
+ response = openai.ChatCompletion.create(
+ model="gpt-3.5-turbo",
+ messages=[{
+ "role": 'system',
+ "content": self.system
+ }, {
+ "role": 'user',
+ "content": prompt
+ }]
+ )
+ return response.choices[0]['message']['content']
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v2/config.yml b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v2/config.yml
new file mode 100644
index 0000000..36e015e
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v2/config.yml
@@ -0,0 +1,5 @@
+jtype: PositiveTweetModifierExecutor3163055
+py_modules:
+ - __init__.py
+metas:
+ name: PositiveTweetModifierExecutor3163055
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v2/microservice.py b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v2/microservice.py
new file mode 100644
index 0000000..06bd4a0
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v2/microservice.py
@@ -0,0 +1,41 @@
+# This microservice receives an API key for OpenAI (OPENAI_API_KEY) and a tweet containing potentially passive aggressive language as input.
+# It analyzes the input tweet using the OpenAI API to identify passive aggressive language and modifies the language to make it more positive without changing the meaning.
+# The microservice then returns the updated, positive version of the tweet as output.
+
+from .apis import GPT_3_5_Turbo
+import json
+
+
+def func(input_json: str) -> str:
+ # Parse the input JSON string
+ input_data = json.loads(input_json)
+
+ # Extract the OpenAI API key and tweet from the input data
+ openai_api_key = input_data["OPENAI_API_KEY"]
+ tweet = input_data["tweet"]
+
+ # Initialize the GPT-3.5 Turbo API
+ gpt_3_5_turbo = GPT_3_5_Turbo(
+ system=f'''
+You are an AI language model that can modify tweets to make them more positive without changing their meaning.
+When you receive a tweet, you will return a JSON object containing the updated, positive version of the tweet.
+Example:
+Input tweet: "I can't believe you did that. It's so typical of you."
+Output JSON: {{"positive_tweet": "I'm surprised you did that. It's just like you!"}}
+''')
+
+ # Generate the prompt for the GPT-3.5 Turbo API
+ prompt = f"Input tweet: {tweet}"
+
+ # Call the GPT-3.5 Turbo API with the prompt
+ generated_string = gpt_3_5_turbo(prompt)
+
+ # Check if the generated_string is a valid JSON string
+ try:
+ output_data = json.loads(generated_string)
+ except json.JSONDecodeError:
+ # If the generated_string is not a valid JSON string, return an error message
+ return json.dumps({"error": "Invalid JSON string generated by the GPT-3.5 Turbo API"})
+
+ # Return the output JSON string
+ return json.dumps(output_data)
\ No newline at end of file
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v2/requirements.txt b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v2/requirements.txt
new file mode 100644
index 0000000..054deb5
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v2/requirements.txt
@@ -0,0 +1,4 @@
+jina==3.15.1.dev14
+docarray==0.21.0
+openai==0.27.5
+pytest
\ No newline at end of file
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v2/test_microservice.py b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v2/test_microservice.py
new file mode 100644
index 0000000..f350248
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v2/test_microservice.py
@@ -0,0 +1,22 @@
+# This test case checks if the output of the microservice is of type 'str' for the positive_tweet property.
+# Since the output of the GPT-3.5 Turbo model is not deterministic, we cannot check for the exact output.
+# Instead, we will test if the output is a valid JSON string and if the 'positive_tweet' property is a string.
+
+from .microservice import func
+import json
+
+def test_positive_tweet_type():
+ # Define the input JSON string
+ input_json = json.dumps({
+ "OPENAI_API_KEY": "",
+ "tweet": "I can't believe you did that. It's so typical of you."
+ })
+
+ # Call the microservice function with the input JSON string
+ output_json = func(input_json)
+
+ # Parse the output JSON string
+ output_data = json.loads(output_json)
+
+ # Check if the 'positive_tweet' property is a string
+ assert isinstance(output_data["positive_tweet"], str)
\ No newline at end of file
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/Dockerfile b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/Dockerfile
new file mode 100644
index 0000000..c752b8e
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/Dockerfile
@@ -0,0 +1,29 @@
+FROM jinaai/dev-gpt:0.0.6
+
+
+
+RUN apt-get install --no-install-recommends -y
+
+
+
+## install requirements for the executor
+
+COPY requirements.txt .
+
+RUN pip -v install --compile -r requirements.txt
+
+
+
+# setup the workspace
+
+COPY . /workdir/
+
+WORKDIR /workdir
+
+
+
+RUN pytest test_microservice.py
+
+
+
+ENTRYPOINT ["jina", "executor", "--uses", "config.yml"]
\ No newline at end of file
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/__init__.py b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/__init__.py
new file mode 100644
index 0000000..8a4eb04
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/__init__.py
@@ -0,0 +1,15 @@
+from jina import Executor, requests as jina_requests, DocumentArray
+import json
+
+from .microservice import func
+
+
+class PositiveTweetModifierExecutor3163055(Executor):
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+
+ @jina_requests()
+ def endpoint(self, docs: DocumentArray, **kwargs) -> DocumentArray:
+ for d in docs:
+ d.text = func(d.text)
+ return docs
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/apis.py b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/apis.py
new file mode 100644
index 0000000..89a0768
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/apis.py
@@ -0,0 +1,23 @@
+import os
+import openai
+
+
+
+
+
+class GPT_3_5_Turbo:
+ def __init__(self, system: str = ''):
+ self.system = system
+
+ def __call__(self, prompt: str) -> str:
+ response = openai.ChatCompletion.create(
+ model="gpt-3.5-turbo",
+ messages=[{
+ "role": 'system',
+ "content": self.system
+ }, {
+ "role": 'user',
+ "content": prompt
+ }]
+ )
+ return response.choices[0]['message']['content']
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/config.yml b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/config.yml
new file mode 100644
index 0000000..36e015e
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/config.yml
@@ -0,0 +1,5 @@
+jtype: PositiveTweetModifierExecutor3163055
+py_modules:
+ - __init__.py
+metas:
+ name: PositiveTweetModifierExecutor3163055
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/flow.yml b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/flow.yml
new file mode 100644
index 0000000..0eda86a
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/flow.yml
@@ -0,0 +1,20 @@
+jtype: Flow
+with:
+ port: 8080
+ protocol: http
+jcloud:
+ version: 3.15.1.dev14
+ labels:
+ creator: microchain
+ name: gptdeploy
+gateway:
+ uses: jinaai+docker://auth0-unified-448f11965ce142b6/GatewayPositiveTweetModifierExecutor3163055:latest
+
+executors:
+ - name: positivetweetmodifierexecutor3163055
+ uses: jinaai+docker://auth0-unified-448f11965ce142b6/PositiveTweetModifierExecutor3163055:latest
+
+ jcloud:
+ resources:
+ instance: C2
+ capacity: spot
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/gateway/Dockerfile b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/gateway/Dockerfile
new file mode 100644
index 0000000..660b5d7
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/gateway/Dockerfile
@@ -0,0 +1,14 @@
+FROM jinaai/jina:3.15.1-dev14-py39-standard
+
+
+RUN apt-get update && apt-get install --no-install-recommends -y git pip nginx && rm -rf /var/lib/apt/lists/*
+
+## install requirements for the executor
+COPY requirements.txt .
+RUN pip install --compile -r requirements.txt
+
+# setup the workspace
+COPY . /workdir/
+WORKDIR /workdir
+
+ENTRYPOINT ["jina", "gateway", "--uses", "config.yml"]
\ No newline at end of file
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/gateway/__init__.py b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/gateway/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/gateway/app.py b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/gateway/app.py
new file mode 100644
index 0000000..bb2c11e
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/gateway/app.py
@@ -0,0 +1,58 @@
+import json
+import os
+import streamlit as st
+from jina import Client, Document, DocumentArray
+
+# Set the favicon and title
+st.set_page_config(
+ page_title="Positive Tweet Modifier",
+ page_icon=":smiley:",
+ layout="wide",
+)
+
+# Define the input dictionary
+INPUT_DICTIONARY = {
+ "OPENAI_API_KEY": "",
+ "tweet": "I can't believe you did that. It's so typical of you.",
+}
+
+# Define the function to send a request to the microservice
+def send_request(input_dict):
+ client = Client(host='http://localhost:8080')
+ d = Document(text=json.dumps(input_dict))
+ response = client.post('/', inputs=DocumentArray([d]))
+ return response[0].text
+
+# Create the UI
+st.title("Positive Tweet Modifier :speech_balloon:")
+st.write("Transform negative tweets into positive ones using GPT-3.5 Turbo! :sunglasses:")
+
+# Input form
+st.subheader("Input")
+tweet = st.text_area("Enter a negative tweet:", value=INPUT_DICTIONARY["tweet"], height=100)
+api_key = st.text_input("Enter your OPENAI_API_KEY:", value=INPUT_DICTIONARY["OPENAI_API_KEY"])
+
+# Send request button
+if st.button("Transform Tweet"):
+ INPUT_DICTIONARY["tweet"] = tweet
+ INPUT_DICTIONARY["OPENAI_API_KEY"] = api_key
+ response_text = send_request(INPUT_DICTIONARY)
+ response_data = json.loads(response_text)
+
+ # Display the result
+ st.subheader("Result")
+ st.write(f"Positive Tweet: {response_data['positive_tweet']} :thumbsup:")
+
+# Deploy your own microservice
+st.markdown(
+ "Want to deploy your own microservice? [Click here!](https://github.com/jina-ai/dev-gpt)"
+)
+
+# Display the curl command
+deployment_id = os.environ.get("K8S_NAMESPACE_NAME", "")
+host = f'https://dev-gpt-{deployment_id.split("-")[1]}.wolf.jina.ai/post' if deployment_id else "http://localhost:8080/post"
+with st.expander("See curl command"):
+ st.code(
+ f'curl -X \'POST\' \'{host}\' -H \'accept: application/json\' -H \'Content-Type: application/json\' -d \'{{"data": [{{"text": "hello, world!"}}]}}\'',
+ language='bash'
+ )
\ No newline at end of file
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/gateway/app_config.toml b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/gateway/app_config.toml
new file mode 100644
index 0000000..24ef3ce
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/gateway/app_config.toml
@@ -0,0 +1,4 @@
+[server]
+
+baseUrlPath = "/playground"
+headless = true
\ No newline at end of file
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/gateway/config.yml b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/gateway/config.yml
new file mode 100644
index 0000000..5357216
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/gateway/config.yml
@@ -0,0 +1,5 @@
+jtype: GatewayPositiveTweetModifierExecutor3163055
+py_modules:
+ - custom_gateway.py
+metas:
+ name: GatewayPositiveTweetModifierExecutor3163055
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/gateway/custom_gateway.py b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/gateway/custom_gateway.py
new file mode 100644
index 0000000..d6292f7
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/gateway/custom_gateway.py
@@ -0,0 +1,154 @@
+import os
+import shutil
+import subprocess
+from time import sleep
+from typing import List, Tuple
+
+import streamlit.web.bootstrap
+from jina import Gateway
+from jina.serve.runtimes.gateway.composite import CompositeGateway
+from streamlit.file_util import get_streamlit_file_path
+from streamlit.web.server import Server as StreamlitServer
+
+
+cur_dir = os.path.dirname(__file__)
+
+
+def cmd(command, std_output=False, wait=True):
+ if isinstance(command, str):
+ command = command.split()
+ if not std_output:
+ process = subprocess.Popen(
+ command, stdout=subprocess.PIPE, stderr=subprocess.PIPE
+ )
+ else:
+ process = subprocess.Popen(command)
+ if wait:
+ output, error = process.communicate()
+ return output, error
+
+
+class PlaygroundGateway(Gateway):
+ def __init__(self, **kwargs):
+ super().__init__(**kwargs)
+ self.streamlit_script = 'app.py'
+ # copy playground/config.toml to streamlit config.toml
+ streamlit_config_toml_src = os.path.join(cur_dir, 'app_config.toml')
+ streamlit_config_toml_dest = get_streamlit_file_path("config.toml")
+ # create streamlit_config_toml_dest if it doesn't exist
+ os.makedirs(os.path.dirname(streamlit_config_toml_dest), exist_ok=True)
+ shutil.copyfile(streamlit_config_toml_src, streamlit_config_toml_dest)
+
+ async def setup_server(self):
+ streamlit.web.bootstrap._fix_sys_path(self.streamlit_script)
+ streamlit.web.bootstrap._fix_matplotlib_crash()
+ streamlit.web.bootstrap._fix_tornado_crash()
+ streamlit.web.bootstrap._fix_sys_argv(self.streamlit_script, ())
+ streamlit.web.bootstrap._fix_pydeck_mapbox_api_warning()
+ streamlit_cmd = f'streamlit run {self.streamlit_script}'
+
+ self.streamlit_server = StreamlitServer(
+ os.path.join(cur_dir, self.streamlit_script), streamlit_cmd
+ )
+
+ async def run_server(self):
+ await self.streamlit_server.start()
+ streamlit.web.bootstrap._on_server_start(self.streamlit_server)
+ streamlit.web.bootstrap._set_up_signal_handler(self.streamlit_server)
+
+ async def shutdown(self):
+ self.streamlit_server.stop()
+
+
+class GatewayPositiveTweetModifierExecutor3163055(CompositeGateway):
+ """The CustomGateway assumes that the gateway has been started with http on port 8080.
+ This is the port on which the nginx process listens. After nginx has been started,
+ it will start the playground on port 8501 and the actual HTTP gateway will start on port 8082.
+
+ Nginx is configured to route the requests in the following way:
+ - /playground -> playground on port 8501
+ - / -> HTTP gateway on port 8082
+ """
+
+ def __init__(self, **kwargs):
+ # need to update port to 8082, as nginx will listen on 8080
+ http_idx = 0
+ http_port = kwargs['runtime_args']['port'][http_idx]
+ if kwargs['runtime_args']['port'][http_idx] != 8080:
+ raise ValueError(
+ f'Please, let http port ({http_port}) be 8080 for nginx to work'
+ )
+ kwargs['runtime_args']['port'][http_idx] = 8082
+ kwargs['cors'] = True
+ super().__init__(**kwargs)
+
+ # remove potential clashing arguments from kwargs
+ kwargs.pop("port", None)
+ kwargs.pop("protocol", None)
+
+ # note order is important
+ self._add_gateway(
+ PlaygroundGateway,
+ 8501,
+ **kwargs,
+ )
+
+ self.setup_nginx()
+ self.nginx_was_shutdown = False
+
+ async def shutdown(self):
+ await super().shutdown()
+ if not self.nginx_was_shutdown:
+ self.shutdown_nginx()
+ self.nginx_was_shutdown = True
+
+ def setup_nginx(self):
+ command = [
+ 'nginx',
+ '-c',
+ os.path.join(cur_dir, '', 'nginx.conf'),
+ ]
+ output, error = self._run_nginx_command(command)
+ self.logger.info('Nginx started')
+ self.logger.info(f'nginx output: {output}')
+ self.logger.info(f'nginx error: {error}')
+
+ def shutdown_nginx(self):
+ command = ['nginx', '-s', 'stop']
+ output, error = self._run_nginx_command(command)
+ self.logger.info('Nginx stopped')
+ self.logger.info(f'nginx output: {output}')
+ self.logger.info(f'nginx error: {error}')
+
+ def _run_nginx_command(self, command: List[str]) -> Tuple[bytes, bytes]:
+ self.logger.info(f'Running command: {command}')
+ output, error = cmd(command)
+ if error != b'':
+ # on CI we need to use sudo; using NOW_CI_RUN isn't good if running test locally
+ self.logger.info(f'nginx error: {error}')
+ command.insert(0, 'sudo')
+ self.logger.info(f'So running command: {command}')
+ output, error = cmd(command)
+ sleep(10)
+ return output, error
+
+ def _add_gateway(self, gateway_cls, port, protocol='http', **kwargs):
+ # ignore metrics_registry since it is not copyable
+ runtime_args = self._deepcopy_with_ignore_attrs(
+ self.runtime_args,
+ [
+ 'metrics_registry',
+ 'tracer_provider',
+ 'grpc_tracing_server_interceptors',
+ 'aio_tracing_client_interceptors',
+ 'tracing_client_interceptor',
+ 'monitoring', # disable it for fastapi gateway
+ ],
+ )
+ runtime_args.port = [port]
+ runtime_args.protocol = [protocol]
+ gateway_kwargs = {k: v for k, v in kwargs.items() if k != 'runtime_args'}
+ gateway_kwargs['runtime_args'] = dict(vars(runtime_args))
+ gateway = gateway_cls(**gateway_kwargs)
+ gateway.streamer = self.streamer
+ self.gateways.insert(0, gateway)
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/gateway/nginx.conf b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/gateway/nginx.conf
new file mode 100644
index 0000000..e44f98d
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/gateway/nginx.conf
@@ -0,0 +1,62 @@
+events {
+ worker_connections 4096; ## Default: 1024
+}
+
+http {
+ server {
+ listen 8080;
+ server_name localhost;
+
+
+ # from https://medium.com/@dasirra/using-streamlit-nginx-docker-to-build-and-put-in-production-dashboards-in-aws-lightsail-781dab8f2836
+ location ^~ /static {
+ proxy_pass http://localhost:8501/static/;
+ }
+ location ^~ /healthz {
+ proxy_pass http://localhost:8501/healthz;
+ }
+ location ^~ /vendor {
+ proxy_pass http://localhost:8501/vendor;
+ }
+ location ^~ /st-allowed-message-origins {
+ proxy_pass http://localhost:8501/st-allowed-message-origins;
+ }
+
+ # for jcloud deployment, very important; actually talks via websocket
+ location ^~ /stream {
+ # inspired from https://discuss.streamlit.io/t/how-to-use-streamlit-with-nginx/378/7
+ proxy_pass http://localhost:8501/stream;
+ proxy_http_version 1.1;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header Host $host;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "upgrade";
+ proxy_read_timeout 86400;
+ }
+ location ^~ /favicon.png {
+ proxy_pass http://localhost:8501/favicon.png;
+ }
+ # to make extra components work
+ location ^~ /component {
+ proxy_pass http://localhost:8501/component;
+ }
+
+ location /playground {
+ # streamlit specific from https://discuss.streamlit.io/t/streamlit-docker-nginx-ssl-https/2195
+ proxy_http_version 1.1;
+ proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+ proxy_set_header Host $host;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "upgrade";
+ proxy_read_timeout 86400;
+ proxy_pass http://localhost:8501;
+ client_max_body_size 50M;
+ }
+
+ location / {
+ proxy_pass http://localhost:8082;
+ client_max_body_size 50M;
+ }
+
+ }
+}
\ No newline at end of file
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/gateway/requirements.txt b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/gateway/requirements.txt
new file mode 100644
index 0000000..a5ab956
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/gateway/requirements.txt
@@ -0,0 +1,4 @@
+streamlit==1.16.0
+altair==4.2.2
+extra-streamlit-components==0.1.55
+jina==3.15.1.dev14
\ No newline at end of file
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/microservice.py b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/microservice.py
new file mode 100644
index 0000000..a9cded0
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/microservice.py
@@ -0,0 +1,45 @@
+# This microservice receives an API key for OpenAI (OPENAI_API_KEY) and a tweet containing potentially passive aggressive language as input.
+# It analyzes the input tweet using the OpenAI API to identify passive aggressive language and modifies the language to make it more positive without changing the meaning.
+# The microservice then returns the updated, positive version of the tweet as output.
+import os
+
+import openai
+
+from .apis import GPT_3_5_Turbo
+import json
+
+
+def func(input_json: str) -> str:
+ # Parse the input JSON string
+ input_data = json.loads(input_json)
+
+ # Extract the OpenAI API key and tweet from the input data
+ openai.api_key = input_data["OPENAI_API_KEY"]
+ print('key updated: ', input_data["OPENAI_API_KEY"])
+ tweet = input_data["tweet"]
+
+ # Initialize the GPT-3.5 Turbo API
+ gpt_3_5_turbo = GPT_3_5_Turbo(
+ system=f'''
+You are an AI language model that can modify tweets to make them more positive without changing their meaning.
+When you receive a tweet, you will return a JSON object containing the updated, positive version of the tweet.
+Example:
+Input tweet: "I can't believe you did that. It's so typical of you."
+Output JSON: {{"positive_tweet": "I'm surprised you did that. It's just like you!"}}
+''')
+
+ # Generate the prompt for the GPT-3.5 Turbo API
+ prompt = f"Input tweet: {tweet}\nOutput JSON:"
+
+ # Call the GPT-3.5 Turbo API with the prompt
+ generated_string = gpt_3_5_turbo(prompt)
+
+ # Check if the generated_string is a valid JSON string
+ try:
+ output_data = json.loads(generated_string)
+ except json.JSONDecodeError:
+ # If the generated_string is not a valid JSON string, return an error message
+ return json.dumps({"error": "Invalid JSON string generated by the GPT-3.5 Turbo API"})
+
+ # Return the output JSON string
+ return json.dumps(output_data)
\ No newline at end of file
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/requirements.txt b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/requirements.txt
new file mode 100644
index 0000000..054deb5
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/requirements.txt
@@ -0,0 +1,4 @@
+jina==3.15.1.dev14
+docarray==0.21.0
+openai==0.27.5
+pytest
\ No newline at end of file
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/run_flow.py b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/run_flow.py
new file mode 100644
index 0000000..560e20c
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/run_flow.py
@@ -0,0 +1,5 @@
+from jina import Flow
+
+flow = Flow.load_config('flow.yml')
+with flow:
+ flow.block()
\ No newline at end of file
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/test_microservice.py b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/test_microservice.py
new file mode 100644
index 0000000..f350248
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/0_gpt_3_5_turbo/v3/test_microservice.py
@@ -0,0 +1,22 @@
+# This test case checks if the output of the microservice is of type 'str' for the positive_tweet property.
+# Since the output of the GPT-3.5 Turbo model is not deterministic, we cannot check for the exact output.
+# Instead, we will test if the output is a valid JSON string and if the 'positive_tweet' property is a string.
+
+from .microservice import func
+import json
+
+def test_positive_tweet_type():
+ # Define the input JSON string
+ input_json = json.dumps({
+ "OPENAI_API_KEY": "",
+ "tweet": "I can't believe you did that. It's so typical of you."
+ })
+
+ # Call the microservice function with the input JSON string
+ output_json = func(input_json)
+
+ # Parse the output JSON string
+ output_data = json.loads(output_json)
+
+ # Check if the 'positive_tweet' property is a string
+ assert isinstance(output_data["positive_tweet"], str)
\ No newline at end of file
diff --git a/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/__init__.py b/examples/rainbow_tweet/microservice/PositiveTweetModifierExecutor3163055/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/examples/rainbow_tweet/microservice/__init__.py b/examples/rainbow_tweet/microservice/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/examples/rainbow_tweet/microservice/name.txt b/examples/rainbow_tweet/microservice/name.txt
new file mode 100644
index 0000000..87ac147
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/name.txt
@@ -0,0 +1 @@
+PositiveTweetModifierExecutor
\ No newline at end of file
diff --git a/examples/rainbow_tweet/microservice/strategies.json b/examples/rainbow_tweet/microservice/strategies.json
new file mode 100644
index 0000000..f2004cd
--- /dev/null
+++ b/examples/rainbow_tweet/microservice/strategies.json
@@ -0,0 +1,5 @@
+[
+ ["openai"],
+ ["openai", "transformers"],
+ ["openai", "textblob"]
+]
\ No newline at end of file
diff --git a/examples/rainbow_tweet/screenshot.png b/examples/rainbow_tweet/screenshot.png
new file mode 100644
index 0000000..767db0e
Binary files /dev/null and b/examples/rainbow_tweet/screenshot.png differ
diff --git a/test/integration/test_generator.py b/test/integration/test_generator.py
index e03f3ce..58d1b77 100644
--- a/test/integration/test_generator.py
+++ b/test/integration/test_generator.py
@@ -22,12 +22,12 @@ def test_generation_level_0(microservice_dir, mock_input_sequence):
generator = Generator(
"The microservice is very simple, it does not take anything as input and only outputs the word 'test'",
microservice_dir,
- 'gpt-3.5-turbo'
+ 'gpt-3.5-turbo',
+ self_healing=False,
)
assert generator.generate() == 0
-
@pytest.mark.parametrize('mock_input_sequence', [['y']], indirect=True)
def test_generation_level_1(microservice_dir, mock_input_sequence):
"""
@@ -46,12 +46,14 @@ Example tweet:
\'When your coworker microwaves fish in the break room... AGAIN. 🐟🤢
But hey, at least SOMEONE's enjoying their lunch. #officelife\'''',
str(microservice_dir),
- 'gpt-3.5-turbo'
+ 'gpt-3.5-turbo',
+ # self_healing=False,
)
assert generator.generate() == 0
-@pytest.mark.parametrize('mock_input_sequence', [['y', 'https://www.africau.edu/images/default/sample.pdf']], indirect=True)
+@pytest.mark.parametrize('mock_input_sequence', [['y', 'https://www.africau.edu/images/default/sample.pdf']],
+ indirect=True)
def test_generation_level_2(microservice_dir, mock_input_sequence):
"""
Requirements:
@@ -66,10 +68,34 @@ def test_generation_level_2(microservice_dir, mock_input_sequence):
generator = Generator(
"The input is a PDF and the output the summarized text (50 words).",
str(microservice_dir),
- 'gpt-3.5-turbo'
+ 'gpt-3.5-turbo',
+ # self_healing=False,
)
assert generator.generate() == 0
+
+@pytest.mark.parametrize('mock_input_sequence', [
+ ['y', 'https://upload.wikimedia.org/wikipedia/commons/4/47/PNG_transparency_demonstration_1.png']], indirect=True)
+def test_generation_level_2_svg(microservice_dir, mock_input_sequence):
+ """
+ Requirements:
+ coding challenge: ✅
+ pip packages: ✅
+ environment: ❌
+ GPT-3.5-turbo: ❌
+ APIs: ❌
+ Databases: ❌
+ """
+ os.environ['VERBOSE'] = 'true'
+ generator = Generator(
+ "Get a png as input and return a vectorized version as svg.",
+ str(microservice_dir),
+ 'gpt-3.5-turbo',
+ # self_healing=False,
+ )
+ assert generator.generate() == 0
+
+
@pytest.mark.parametrize('mock_input_sequence', [['y', 'yfinance.Ticker("MSFT").info']], indirect=True)
def test_generation_level_3(microservice_dir, mock_input_sequence):
"""
@@ -91,10 +117,12 @@ def test_generation_level_3(microservice_dir, mock_input_sequence):
Example input: 'AAPL'
''',
str(microservice_dir),
- 'gpt-3.5-turbo'
+ 'gpt-3.5-turbo',
+ # self_healing=False,
)
assert generator.generate() == 0
+
@pytest.mark.parametrize(
'mock_input_sequence', [
[
@@ -135,11 +163,32 @@ def test_generation_level_4(microservice_dir, mock_input_sequence):
4. Return the the audio file as base64 encoded binary.
''',
str(microservice_dir),
- 'gpt-4'
+ 'gpt-4',
+ # self_healing=False,
)
assert generator.generate() == 0
-@pytest.mark.parametrize('mock_input_sequence', [['y', 'https://upload.wikimedia.org/wikipedia/commons/thumb/4/47/PNG_transparency_demonstration_1.png/560px-PNG_transparency_demonstration_1.png']], indirect=True)
+
+@pytest.mark.parametrize('mock_input_sequence', [['y']], indirect=True)
+def test_generation_level_5_company_logos(microservice_dir, mock_input_sequence):
+ os.environ['VERBOSE'] = 'true'
+ generator = Generator(
+ f'''\
+Given a list of email addresses, get all company names from them.
+For all companies, get the company logo.
+All logos need to be arranged on a square.
+The square is returned as png.
+''',
+ str(microservice_dir),
+ 'gpt-3.5-turbo',
+ # self_healing=False,
+ )
+ assert generator.generate() == 0
+
+
+@pytest.mark.parametrize('mock_input_sequence', [['y',
+ 'https://upload.wikimedia.org/wikipedia/commons/thumb/4/47/PNG_transparency_demonstration_1.png/560px-PNG_transparency_demonstration_1.png']],
+ indirect=True)
def test_generation_level_5(microservice_dir, mock_input_sequence):
"""
Requirements:
@@ -151,7 +200,8 @@ def test_generation_level_5(microservice_dir, mock_input_sequence):
Databases: ❌
"""
os.environ['VERBOSE'] = 'true'
- generator = Generator(f'''
+ generator = Generator(
+ f'''
The input is an image.
Use the following api to get the description of the image:
Request:
@@ -173,9 +223,10 @@ The description is then used to generate a joke.
The joke is the put on the image.
The output is the image with the joke on it.
''',
- str(microservice_dir),
- 'gpt-3.5-turbo'
- )
+ str(microservice_dir),
+ 'gpt-3.5-turbo',
+ # self_healing=False,
+ )
assert generator.generate() == 0
# @pytest.fixture
diff --git a/test/unit/test_search.py b/test/unit/test_search.py
new file mode 100644
index 0000000..fdd38fb
--- /dev/null
+++ b/test/unit/test_search.py
@@ -0,0 +1,13 @@
+from dev_gpt.options.generate.static_files.microservice.google_custom_search import search_web, search_images
+
+
+def test_web_search():
+ results = search_web("jina", 10)
+ assert len(results) == 10
+ assert "jina" in results[0].lower()
+ assert not results[0].startswith("http")
+
+def test_image_search():
+ results = search_images("jina", 10)
+ assert len(results) == 10
+ assert results[0].startswith("http")
diff --git a/test/unit/test_tools.py b/test/unit/test_tools.py
new file mode 100644
index 0000000..692c9c8
--- /dev/null
+++ b/test/unit/test_tools.py
@@ -0,0 +1,13 @@
+import os
+
+from dev_gpt.options.generate.tools.tools import get_available_tools
+
+
+def test_all_tools():
+ tool_lines = get_available_tools().split('\n')
+ assert len(tool_lines) == 2
+
+def test_no_search():
+ os.environ['GOOGLE_API_KEY'] = ''
+ tool_lines = get_available_tools().split('\n')
+ assert len(tool_lines) == 1
\ No newline at end of file