Merge pull request #69 from jina-ai/feat_pm_role

👩‍💼👨‍💼 feat: pm role
This commit is contained in:
Florian Hönicke
2023-04-28 19:56:13 +02:00
committed by GitHub
13 changed files with 443 additions and 81 deletions

33
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,33 @@
name: CI
on:
workflow_dispatch:
pull_request:
jobs:
test:
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
group: [1, 2]
steps:
- uses: actions/checkout@v2
- name: Set up Python 3.8
uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Prepare environment
run: |
python -m pip install --upgrade pip
python -m pip install wheel
pip install --no-cache-dir ".[full,test]"
pip install pytest
pip install pytest-split
- name: Test
id: test
run: |
pytest -v -s -m "not gpu" --splits 9 --group ${{ matrix.group }} --splitting-algorithm least_duration test/
timeout-minutes: 20
env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}

4
.gitignore vendored
View File

@@ -1,7 +1,7 @@
/microservice/
.env
config.yml
executor
data
build
gptdeploy.egg-info
dist

View File

@@ -1,3 +1,3 @@
include requirements.txt
include gptdeploy.cmd
recursive-include src/options/generate/static_files/gateway *.toml *.cmd *.conf *.txt Dockerfile
recursive-include src/options/generate/static_files/ *

2
requirements-test.txt Normal file
View File

@@ -0,0 +1,2 @@
pytest
pytest-split

View File

@@ -1,4 +1,5 @@
import os
from copy import deepcopy
from time import sleep
from typing import List, Any
@@ -8,7 +9,7 @@ from langchain import PromptTemplate
from langchain.callbacks import CallbackManager
from langchain.chat_models import ChatOpenAI
from openai.error import RateLimitError
from langchain.schema import HumanMessage, SystemMessage, BaseMessage
from langchain.schema import HumanMessage, SystemMessage, BaseMessage, AIMessage
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from requests.exceptions import ConnectionError
from urllib3.exceptions import InvalidChunkLength
@@ -47,13 +48,13 @@ class GPTSession:
self.chars_prompt_so_far = 0
self.chars_generation_so_far = 0
def get_conversation(self, system_definition_examples: List[str] = ['gpt', 'executor', 'docarray', 'client']):
def get_conversation(self, messages: List[BaseMessage] = [], print_stream: bool = True, print_costs: bool = True):
messages = deepcopy(messages)
return _GPTConversation(
self.model_name, self.cost_callback, self.task_description, self.test_description, system_definition_examples
self.model_name, self.cost_callback, messages, print_stream, print_costs
)
@staticmethod
def is_gpt4_available():
try:
@@ -74,14 +75,15 @@ class GPTSession:
except openai.error.InvalidRequestError:
return False
def cost_callback(self, chars_prompt, chars_generation):
def cost_callback(self, chars_prompt, chars_generation, print_costs: bool = True):
self.chars_prompt_so_far += chars_prompt
self.chars_generation_so_far += chars_generation
print('\n')
money_prompt = self._calculate_money_spent(self.chars_prompt_so_far, self.pricing_prompt)
money_generation = self._calculate_money_spent(self.chars_generation_so_far, self.pricing_generation)
print('Total money spent so far on openai.com:', f'${money_prompt + money_generation:.3f}')
print('\n')
if print_costs:
print('\n')
money_prompt = self._calculate_money_spent(self.chars_prompt_so_far, self.pricing_prompt)
money_generation = self._calculate_money_spent(self.chars_generation_so_far, self.pricing_generation)
print('Total money spent so far on openai.com:', f'${money_prompt + money_generation:.3f}')
print('\n')
@staticmethod
def _calculate_money_spent(num_chars, price):
@@ -95,31 +97,39 @@ class AssistantStreamingStdOutCallbackHandler(StreamingStdOutCallbackHandler):
class _GPTConversation:
def __init__(self, model: str, cost_callback, task_description, test_description, system_definition_examples: List[str] = ['executor', 'docarray', 'client']):
def __init__(self, model: str, cost_callback, messages: List[BaseMessage], print_stream, print_costs):
self._chat = ChatOpenAI(
model_name=model,
streaming=True,
callback_manager=CallbackManager([AssistantStreamingStdOutCallbackHandler()]),
callback_manager=CallbackManager([AssistantStreamingStdOutCallbackHandler()] if print_stream else []),
verbose=True,
temperature=0,
)
self.cost_callback = cost_callback
self.messages: List[BaseMessage] = []
self.system_message = self._create_system_message(task_description, test_description, system_definition_examples)
if os.environ['VERBOSE'].lower() == 'true' and self.system_message is not None:
print_colored('system', self.system_message.content, 'magenta')
self.messages = messages
self.print_stream = print_stream
self.print_costs = print_costs
for message in messages:
if os.environ['VERBOSE'].lower() == 'true':
if isinstance(message, SystemMessage):
print_colored('system - prompt', message.content, 'magenta')
elif isinstance(message, HumanMessage):
print_colored('user - prompt', message.content, 'blue')
elif isinstance(message, AIMessage):
print_colored('assistant - prompt', message.content, 'green')
def chat(self, prompt: str):
chat_message = HumanMessage(content=prompt)
def chat(self, prompt: str, role: str = 'user'):
MassageClass = HumanMessage if role == 'user' else SystemMessage
chat_message = MassageClass(content=prompt)
self.messages.append(chat_message)
if os.environ['VERBOSE'].lower() == 'true':
print_colored('user', prompt, 'blue')
print_colored('assistant', '', 'green', end='')
color = 'blue' if role == 'user' else 'magenta'
print_colored(role, prompt, color)
if self.print_stream:
print_colored('assistant', '', 'green', end='')
for i in range(10):
try:
response = self._chat(
[self.system_message] + self.messages if self.system_message is not None else self.messages
)
response = self._chat(self.messages)
break
except (ConnectionError, InvalidChunkLength) as e:
print('There was a connection error. Retrying...')
@@ -129,7 +139,7 @@ class _GPTConversation:
if os.environ['VERBOSE'].lower() == 'true':
print()
self.cost_callback(sum([len(m.content) for m in self.messages]), len(response.content))
self.cost_callback(sum([len(m.content) for m in self.messages]), len(response.content), self.print_costs)
self.messages.append(response)
return response.content

View File

@@ -50,8 +50,8 @@ def main(ctx):
@openai_api_key_needed
@main.command()
@click.option('--description', required=True, help='Description of the microservice.')
@click.option('--test', required=True, help='Test scenario for the microservice.')
@click.option('--description', required=False, help='Description of the microservice.')
@click.option('--test', required=False, help='Test scenario for the microservice.')
@click.option('--model', default='gpt-4', help='GPT model to use (default: gpt-4).')
@click.option('--verbose', default=False, is_flag=True, help='Verbose mode.') # only for development
@path_param

View File

@@ -2,36 +2,49 @@ import os
import random
import re
import shutil
from typing import List, Callable, Union
from typing import Callable
from typing import List, Text, Optional
from langchain import PromptTemplate
from langchain.schema import SystemMessage, HumanMessage, AIMessage
from pydantic.dataclasses import dataclass
from src.apis import gpt
from src.apis.gpt import _GPTConversation
from src.apis.jina_cloud import process_error_message, push_executor, is_executor_in_hub
from src.constants import FILE_AND_TAG_PAIRS, NUM_IMPLEMENTATION_STRATEGIES, MAX_DEBUGGING_ITERATIONS, \
PROBLEMATIC_PACKAGES, EXECUTOR_FILE_NAME, TEST_EXECUTOR_FILE_NAME, TEST_EXECUTOR_FILE_TAG, \
REQUIREMENTS_FILE_NAME, REQUIREMENTS_FILE_TAG, DOCKER_FILE_NAME, UNNECESSARY_PACKAGES, IMPLEMENTATION_FILE_NAME, \
IMPLEMENTATION_FILE_TAG
from src.options.generate.templates_system import system_task_iteration, system_task_introduction, system_test_iteration
from src.options.generate.templates_user import template_generate_microservice_name, \
template_generate_possible_packages, \
template_solve_code_issue, \
template_solve_pip_dependency_issue, template_is_dependency_issue, template_generate_playground, \
template_generate_function, template_generate_test, template_generate_requirements, \
template_chain_of_thought, template_summarize_error, \
template_generate_apt_get_install, template_solve_apt_get_dependency_issue
template_generate_apt_get_install, template_solve_apt_get_dependency_issue, template_refinement
from src.options.generate.ui import get_random_employee
from src.utils.io import persist_file, get_all_microservice_files_with_content, get_microservice_path
from src.utils.string_tools import print_colored
@dataclass
class TaskSpecification:
task: Optional[Text]
test: Optional[Text]
class Generator:
def __init__(self, task_description, test_description, path, model='gpt-4'):
self.gpt_session = gpt.GPTSession(task_description, test_description, model=model)
self.task_description = task_description
self.test_description = test_description
self.microservice_specification = TaskSpecification(task=task_description, test=test_description)
self.microservice_root_path = path
def extract_content_from_result(self, plain_text, file_name, match_single_block=False):
pattern = fr"^\*\*{file_name}\*\*\n```(?:\w+\n)?([\s\S]*?)\n```" # the \n at the end makes sure that ``` within the generated code is not matched
def extract_content_from_result(self, plain_text, file_name, match_single_block=False, can_contain_code_block=True):
optional_line_break = '\n' if can_contain_code_block else '' # the \n at the end makes sure that ``` within the generated code is not matched because it is not right before a line break
pattern = fr"^\*\*{file_name}\*\*\n```(?:\w+\n)?([\s\S]*?){optional_line_break}```"
match = re.search(pattern, plain_text, re.MULTILINE)
if match:
return match.group(1).strip()
@@ -98,7 +111,8 @@ metas:
parse_result_fn = self.get_default_parse_result_fn(file_name_s)
print_colored('', f'\n\n############# {section_title} #############', 'blue')
conversation = self.gpt_session.get_conversation(system_definition_examples=system_definition_examples)
system_introduction_message = _GPTConversation._create_system_message(self.microservice_specification.task, self.microservice_specification.test, system_definition_examples)
conversation = self.gpt_session.get_conversation(messages=[system_introduction_message])
template_kwargs = {k: v for k, v in template_kwargs.items() if k in template.input_variables}
if 'file_name' in template.input_variables and len(file_name_s) == 1:
template_kwargs['file_name'] = file_name_s[0]
@@ -136,8 +150,8 @@ metas:
section_title='Microservice',
template=template_generate_function,
destination_folder=MICROSERVICE_FOLDER_v1,
microservice_description=self.task_description,
test_description=self.test_description,
microservice_description=self.microservice_specification.task,
test_description=self.microservice_specification.test,
packages=packages,
file_name_purpose=IMPLEMENTATION_FILE_NAME,
tag_name=IMPLEMENTATION_FILE_TAG,
@@ -150,8 +164,8 @@ metas:
MICROSERVICE_FOLDER_v1,
code_files_wrapped=self.files_to_string({EXECUTOR_FILE_NAME: microservice_content}),
microservice_name=microservice_name,
microservice_description=self.task_description,
test_description=self.test_description,
microservice_description=self.microservice_specification.task,
test_description=self.microservice_specification.test,
file_name_purpose=TEST_EXECUTOR_FILE_NAME,
tag_name=TEST_EXECUTOR_FILE_TAG,
file_name_s=[TEST_EXECUTOR_FILE_NAME],
@@ -161,7 +175,6 @@ metas:
'Requirements',
template_generate_requirements,
MICROSERVICE_FOLDER_v1,
system_definition_examples=None,
code_files_wrapped=self.files_to_string({
IMPLEMENTATION_FILE_NAME: microservice_content,
TEST_EXECUTOR_FILE_NAME: test_microservice_content,
@@ -200,7 +213,7 @@ metas:
print_colored('', '\n\n############# Playground #############', 'blue')
file_name_to_content = get_all_microservice_files_with_content(microservice_path)
conversation = self.gpt_session.get_conversation(None)
conversation = self.gpt_session.get_conversation()
conversation.chat(
template_generate_playground.format(
code_files_wrapped=self.files_to_string(file_name_to_content, ['test_microservice.py']),
@@ -290,7 +303,7 @@ metas:
destination_folder=next_microservice_path,
file_name_s=None,
parse_result_fn=self.parse_result_fn_dockerfile,
system_definition_examples=None,
system_definition_examples=[],
summarized_error=summarized_error,
all_files_string=dock_req_string,
)
@@ -313,14 +326,17 @@ metas:
destination_folder=next_microservice_path,
file_name_s=[IMPLEMENTATION_FILE_NAME, TEST_EXECUTOR_FILE_NAME, REQUIREMENTS_FILE_NAME],
summarized_error=summarized_error,
task_description=self.task_description,
test_description=self.test_description,
task_description=self.microservice_specification.task,
test_description=self.microservice_specification.test,
all_files_string=self.files_to_string({key: val for key, val in file_name_to_content.items() if key != EXECUTOR_FILE_NAME}),
)
class MaxDebugTimeReachedException(BaseException):
pass
class TaskRefinementException(BaseException):
pass
def is_dependency_issue(self, summarized_error, dock_req_string: str, package_manager: str):
# a few heuristics to quickly jump ahead
if any([error_message in summarized_error for error_message in ['AttributeError', 'NameError', 'AssertionError']]):
@@ -329,14 +345,13 @@ metas:
return True
print_colored('', f'Is it a {package_manager} dependency issue?', 'blue')
conversation = self.gpt_session.get_conversation(None)
conversation = self.gpt_session.get_conversation()
answer = conversation.chat(
template_is_dependency_issue.format(summarized_error=summarized_error, all_files_string=dock_req_string).replace('PACKAGE_MANAGER', package_manager)
)
return 'yes' in answer.lower()
def generate_microservice_name(self, description):
print_colored('', '\n\n############# What should be the name of the Microservice? #############', 'blue')
name = self.generate_and_persist_file(
section_title='Generate microservice name',
template=template_generate_microservice_name,
@@ -354,7 +369,7 @@ metas:
destination_folder=self.microservice_root_path,
file_name_s=['packages.csv'],
system_definition_examples=[],
description=self.task_description
description=self.microservice_specification.task
)['packages.csv']
packages_list = [[pkg.strip().lower() for pkg in packages_string.split(',')] for packages_string in packages_csv_string.split('\n')]
packages_list = [
@@ -367,8 +382,9 @@ metas:
return packages_list
def generate(self):
self.refine_specification()
os.makedirs(self.microservice_root_path)
generated_name = self.generate_microservice_name(self.task_description)
generated_name = self.generate_microservice_name(self.microservice_specification.task)
microservice_name = f'{generated_name}{random.randint(0, 10_000_000)}'
packages_list = self.get_possible_packages()
for num_approach, packages in enumerate(packages_list):
@@ -392,7 +408,72 @@ gptdeploy deploy --path {self.microservice_root_path}
break
def summarize_error(self, error):
conversation = self.gpt_session.get_conversation(None)
conversation = self.gpt_session.get_conversation()
error_summary = conversation.chat(template_summarize_error.format(error=error))
return error_summary
def refine_specification(self):
pm = get_random_employee('pm')
print(f'{pm.emoji}👋 Hi, I\'m {pm.name}, a PM at Jina AI. Gathering the requirements for our engineers.')
original_task = self.microservice_specification.task
while True:
try:
self.microservice_specification.test = None
if not original_task:
self.microservice_specification.task = self.get_user_input(pm, 'What should your microservice do?')
self.refine_requirements(pm, system_task_iteration, 'task')
self.refine_requirements(pm, system_test_iteration, 'test')
break
except self.TaskRefinementException as e:
print_colored('', f'{pm.emoji} Could not refine your requirements. Please try again...', 'red')
print(f'''
{pm.emoji} 👍 Great, I will handover the following requirements to our engineers:
Description of the microservice:
{self.microservice_specification.task}
Test scenario:
{self.microservice_specification.test}
''')
def refine_requirements(self, pm, template_init, refinement_type):
user_input = self.microservice_specification.task
messages = [
SystemMessage(content=system_task_introduction + template_init),
]
num_parsing_tries = 0
while True:
conversation = self.gpt_session.get_conversation(messages, print_stream=os.environ['VERBOSE'].lower() == 'true', print_costs=False)
print('thinking...')
agent_response_raw = conversation.chat(
template_refinement.format(
user_input=user_input,
_optional_test=' test' if refinement_type == 'test' else ''
),
role='user'
)
messages.append(HumanMessage(content=user_input))
agent_question = self.extract_content_from_result(agent_response_raw, 'prompt.txt', can_contain_code_block=False)
final = self.extract_content_from_result(agent_response_raw, 'final.txt', can_contain_code_block=False)
if final:
setattr(self.microservice_specification, refinement_type, final)
break
elif agent_question:
messages.append(AIMessage(content=agent_question))
user_input = self.get_user_input(pm, agent_question)
else:
if num_parsing_tries > 2:
raise self.TaskRefinementException()
num_parsing_tries += 1
messages.append(AIMessage(content=agent_response_raw))
messages.append(SystemMessage(content='You did not put your answer into the right format using *** and ```.'))
@staticmethod
def get_user_input(employee, prompt_to_user):
val = input(f'{employee.emoji}{prompt_to_user}\nyou: ')
print()
while not val:
val = input('you: ')
return val

View File

@@ -0,0 +1 @@
# if this file

View File

@@ -16,3 +16,151 @@ and the following test scenario:
You must obey the following rules:
{not_allowed_function_string}
{not_allowed_docker_string}'''
system_task_introduction = f'''
You are a product manager who refines the requirements of a client who wants to create a microservice.
'''
system_task_iteration = '''
The client writes a description of the microservice.
You must only talk to the client about the microservice.
You must not output anything else than what you got told in the following steps.
1.
You must create a check list for the requirements of the microservice.
Input and output have to be accurately specified.
You must use the following format (insert ✅, ❌ or n/a) depending on whether the requirement is fulfilled, not fulfilled or not applicable:
input: <insert one of ✅, ❌ or n/a here>
output: <insert one of ✅, ❌ or n/a here>
api access: <insert one of ✅, ❌ or n/a here>
database access: <insert one of ✅, ❌ or n/a here>
2.
You must do either a or b.
a)
If the description is not sufficiently specified, then ask for the missing information.
Your response must exactly match the following block code format (double asterisks for the file name and triple backticks for the file block):
**prompt.txt**
```text
<prompt to the client here>
```
b)
Otherwise you respond with the summarized description.
The summarized description must contain all the information mentioned by the client.
Your response must exactly match the following block code format (double asterisks for the file name and triple backticks for the file block):
**final.txt**
```text
<task here>
```
The character sequence ``` must always be at the beginning of the line.
You must not add information that was not provided by the client.
Example for the description "given a city, get the weather report for the next 5 days":
input: ✅
output: ✅
api access: ❌
database access: n/a
**prompt.txt**
```text
Please provide the url of the weather api and a valid api key or some other way accessing the api. Or let our engineers try to find a free api.
```
Example for the description "convert png to svg":
input: ✅
output: ✅
api access: n/a
database access: n/a
**final.txt**
```text
The user inserts a png and gets an svg as response.
```
Example for the description "parser":
input: ❌
output: ❌
api access: n/a
database access: n/a
**prompt.txt**
```text
Please provide the input and output format.
```
'''
system_test_iteration = f'''
The client gives you a description of the microservice (web service).
Your task is to describe verbally a unit test for that microservice.
There are two cases:
a) If unit test requires an example input file as input:
In this case you must ask the client to provide the example input file as URL.
You must not accept files that are not URLs.
Your response must exactly match the following block code format (double asterisks for the file name and triple backticks for the file block):
**prompt.txt**
```text
<prompt to the client here>
```
If you did a, you must not do b.
b) Any strings, ints, or bools can be used as input for the unit test.
In this case you must describe the unit test verbally.
Your response must exactly match the following block code format (double asterisks for the file name and triple backticks for the file block):
**final.txt**
```text
<task here>
```
If you did b, you must not do a.
Example 1:
Client:
**client-response.txt**
```
given a city, get the weather report for the next 5 days using OpenWeatherMap with the api key b6907d289e10d714a6e88b30761fae22
```
PM:
**final.txt**
```text
The test takes the city "Berlin" as input and asserts that the weather report for the next 5 days exists in the response.
```
Example 2:
Client:
**client-response.txt**
```
The user inserts a png and gets an svg as response.
```
PM:
**prompt.txt**
```text
Please provide a png example input file as url.
```
Client:
**client-response.txt**
```
https://aquasecurity.github.io/kube-bench/v0.6.5/images/kube-bench-logo-only.png
```
PM:
**final.txt**
```text
The test takes the png https://aquasecurity.github.io/kube-bench/v0.6.5/images/kube-bench-logo-only.png as input and asserts the output is an svg.
```
Example 3:
Client:
**client-response.txt**
```
The microservice takes nothing as input and returns the current time.
```
PM:
**final.txt**
```text
The test takes nothing as input and asserts that the output is a string.
```
'''

View File

@@ -22,8 +22,10 @@ The implemented function and the test must not use a pre-trained model unless it
The implemented function and the test must not train a model.
The implemented function and the test must not contain prototype or placeholder implementations.
The implemented function and the test must run in a docker container based on debian.
The implemented function and the test must use gpt_3_5_turbo_api if the task requires understanding or generating natural language or using any language model. Other language models are not allowed.'''
The implemented function and the test must not use libraries like Flask.
The implemented function and the test must not have a __main__ function.
The implemented function and the test must use gpt_3_5_turbo_api if the task requires understanding or generation of natural language or using any language model. Other language models are not allowed.
The implemented function and the test must not use gpt_3_5_turbo_api or any other language model if the task does not require understanding or generation of natural language.'''
template_generate_microservice_name = PromptTemplate.from_template(
@@ -87,7 +89,7 @@ template_code_wrapping_string = '''The code will go into {file_name_purpose}. Ma
You must provide the complete file with the exact same syntax to wrap the code.'''
gpt_35_turbo_usage_string = """If you use gpt_3_5_turbo_api, then this is an example on how to use it:
gpt_35_turbo_usage_string = """If need to use gpt_3_5_turbo_api, then this is an example on how to use it:
```
from .apis import GPT_3_5_Turbo_API
@@ -151,8 +153,7 @@ template_generate_requirements = PromptTemplate.from_template(
{code_files_wrapped}
Write the content of the requirements.txt file.
The requirements.txt file must include the following packages:
**requirements.txt**
The requirements.txt file must include the following packages in that specified version:
```
jina==3.15.1.dev14
docarray==0.21.0
@@ -347,3 +348,29 @@ The playground (app.py) must always use the host on http://localhost:8080 and mu
The playground (app.py) must not import the executor.
'''
)
# Create a wrapper around google called Joogle. It modifies the page summary preview text of the search results to insert the word Jina as much as possible.
template_refinement = PromptTemplate.from_template(
'''
1.Quickly go through the checklist (input/output well defined? api or db access needed?) and think about if you should ask something to the client or if you should write the final description.
**client-response.txt**
```text
{user_input}
```
2.Either write the prompt.txt or the final.txt file.
Either ask for clarification like this:
**prompt.txt**
```text
<prompt to the client here (must be only one question)>
```
Or write the summarized microservice{_optional_test} description like this:
**final.txt**
```text
<microservice{_optional_test} description here>
```
Note that your response must be either prompt.txt or final.txt. You must not write both.
Note that you must obey the double asterisk and tripple backtick syntax from above.
Note that prompt.txt must not only contain one question.
'''
)

View File

@@ -0,0 +1,79 @@
import random
from dataclasses import dataclass
product_manager_names = [
('Leon', 'm'),
('Saahil', 'm',),
('Susana', 'f')
]
engineer_names = [
('Aaron', 'm'),
('Alaeddine', 'm'),
('Andrei', 'm'),
('Anne', 'f'),
('Bo', 'm'),
('Charlotte', 'f'),
('David', 'm'),
('Deepankar', 'm'),
('Delgermurun', 'm'),
('Edward', 'm'),
('Felix', 'm'),
('Florian', 'm'),
('Georgios', 'm'),
('Girish', 'm'),
('Guillaume', 'm'),
('Isabelle', 'f'),
('Jackmin', 'm'),
('Jie', 'm'),
('Joan', 'm'),
('Johannes', 'm'),
('Joschka', 'm'),
('Lechun', 'm'),
('Louis', 'm'),
('Mark', 'm'),
('Maximilian', 'm'),
('Michael', 'm'),
('Mohamed Aziz', 'm'),
('Mohammad Kalim', 'm'),
('Nikos', 'm'),
('Ran', 'm'),
('Saba', 'f'),
('Sami', 'm'),
('Sha', 'm'),
('Subba Reddy', 'm'),
('Tanguy', 'm'),
('Winston', 'm'),
('Yadh', 'm'),
('Yanlong', 'm'),
('Zac', 'm'),
('Zhaofeng', 'm'),
('Zihao', 'm'),
('Ziniu', 'm')
]
role_to_gender_to_emoji = {
'engineer':{
'm': '👨‍💻',
'f': '👩‍💻'
},
'pm': {
'm': '👨‍💼',
'f': '👩‍💼'
},
'qa_endineer': {
'm': '👨‍🔧',
'f': '👩‍🔧',
},
}
@dataclass
class Employee:
role: str
name: str
gender: str
emoji: str
def get_random_employee(role: str) -> Employee:
name, gender = random.choice(product_manager_names)
emoji = role_to_gender_to_emoji[role][gender]
return Employee(role, name, gender, emoji)

3
test/.test_durations Normal file
View File

@@ -0,0 +1,3 @@
{
"test/test_generator.py::test_generator": 246.233993379
}

View File

@@ -1,29 +1,7 @@
import unittest.mock as mock
import os
from src.options.generate.generator import Generator
from src.apis.gpt import GPTSession
def test_generator(tmpdir):
# Define a mock response
mock_response = {
"choices": [
{
"delta": {
"content": "This is a mock response."
}
}
]
}
# Define a function to replace openai.ChatCompletion.create
def mock_create(*args, **kwargs):
return [mock_response] * kwargs.get("stream", 1)
# Define a function to replace get_openai_api_key
def mock_get_openai_api_key(*args, **kwargs):
pass
# Use mock.patch as a context manager to replace the original methods with the mocks
with mock.patch("openai.ChatCompletion.create", side_effect=mock_create), \
mock.patch.object(GPTSession, "configure_openai_api_key", side_effect=mock_get_openai_api_key):
generator = Generator("my description", "my test")
generator.generate(str(tmpdir))
os.environ['VERBOSE'] = 'true'
generator = Generator("The microservice is very simple, it does not take anything as input and only outputs the word 'test'", "my test", str(tmpdir) + 'microservice', 'gpt-3.5-turbo')
generator.generate()