mirror of
https://github.com/aljazceru/dev-gpt.git
synced 2025-12-21 07:34:20 +01:00
Merge branch 'main' of github.com:jina-ai/microchain into feat_going_meta
This commit is contained in:
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,4 +1,4 @@
|
||||
/executor_level2/
|
||||
/microservice/
|
||||
|
||||
.env
|
||||
config.yml
|
||||
|
||||
2
setup.py
2
setup.py
@@ -7,7 +7,7 @@ def read_requirements():
|
||||
|
||||
setup(
|
||||
name='gptdeploy',
|
||||
version='0.18.18',
|
||||
version='0.18.19',
|
||||
description='Use natural language interface to generate, deploy and update your microservice infrastructure.',
|
||||
long_description=open('README.md', 'r', encoding='utf-8').read(),
|
||||
long_description_content_type='text/markdown',
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
__version__ = '0.18.18'
|
||||
__version__ = '0.18.19'
|
||||
from src.cli import main
|
||||
@@ -12,6 +12,8 @@ from langchain.schema import HumanMessage, SystemMessage, BaseMessage
|
||||
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
|
||||
from requests.exceptions import ConnectionError
|
||||
|
||||
from src.constants import PRICING_GPT4_PROMPT, PRICING_GPT4_GENERATION, PRICING_GPT3_5_TURBO_PROMPT, \
|
||||
PRICING_GPT3_5_TURBO_GENERATION, CHARS_PER_TOKEN
|
||||
from src.options.generate.templates_system import template_system_message_base, executor_example, docarray_example, client_example
|
||||
from src.utils.string_tools import print_colored
|
||||
|
||||
@@ -21,7 +23,23 @@ class GPTSession:
|
||||
self.task_description = task_description
|
||||
self.test_description = test_description
|
||||
self.configure_openai_api_key()
|
||||
self.model_name = 'gpt-4' if model == 'gpt-4' and self.is_gpt4_available() else 'gpt-3.5-turbo'
|
||||
if model == 'gpt-4' and self.is_gpt4_available():
|
||||
self.pricing_prompt = PRICING_GPT4_PROMPT
|
||||
self.pricing_generation = PRICING_GPT4_GENERATION
|
||||
else:
|
||||
if model == 'gpt-4':
|
||||
print_colored('GPT version info', 'GPT-4 is not available. Using GPT-3.5-turbo instead.', 'yellow')
|
||||
model = 'gpt-3.5-turbo'
|
||||
self.pricing_prompt = PRICING_GPT3_5_TURBO_PROMPT
|
||||
self.pricing_generation = PRICING_GPT3_5_TURBO_GENERATION
|
||||
self.model_name = model
|
||||
self.chars_prompt_so_far = 0
|
||||
self.chars_generation_so_far = 0
|
||||
|
||||
def get_conversation(self, system_definition_examples: List[str] = ['executor', 'docarray', 'client']):
|
||||
return _GPTConversation(
|
||||
self.model_name, self.cost_callback, self.task_description, self.test_description, system_definition_examples
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def configure_openai_api_key():
|
||||
@@ -51,11 +69,20 @@ If you have updated it already, please restart your terminal.
|
||||
continue
|
||||
return True
|
||||
except openai.error.InvalidRequestError:
|
||||
print_colored('GPT version info', 'GPT-4 is not available. Using GPT-3.5-turbo instead.', 'yellow')
|
||||
return False
|
||||
|
||||
def get_conversation(self, system_definition_examples: List[str] = ['executor', 'docarray', 'client']):
|
||||
return _GPTConversation(self.model_name, self.task_description, self.test_description, system_definition_examples)
|
||||
def cost_callback(self, chars_prompt, chars_generation):
|
||||
self.chars_prompt_so_far += chars_prompt
|
||||
self.chars_generation_so_far += chars_generation
|
||||
print('\n')
|
||||
money_prompt = self._calculate_money_spent(self.chars_prompt_so_far, self.pricing_prompt)
|
||||
money_generation = self._calculate_money_spent(self.chars_generation_so_far, self.pricing_generation)
|
||||
print('Total money spent so far on openai.com:', f'${money_prompt + money_generation}')
|
||||
print('\n')
|
||||
|
||||
@staticmethod
|
||||
def _calculate_money_spent(num_chars, price):
|
||||
return round(num_chars / CHARS_PER_TOKEN * price / 1000, 3)
|
||||
|
||||
|
||||
class AssistantStreamingStdOutCallbackHandler(StreamingStdOutCallbackHandler):
|
||||
@@ -65,7 +92,7 @@ class AssistantStreamingStdOutCallbackHandler(StreamingStdOutCallbackHandler):
|
||||
|
||||
|
||||
class _GPTConversation:
|
||||
def __init__(self, model: str, task_description, test_description, system_definition_examples: List[str] = ['executor', 'docarray', 'client']):
|
||||
def __init__(self, model: str, cost_callback, task_description, test_description, system_definition_examples: List[str] = ['executor', 'docarray', 'client']):
|
||||
self._chat = ChatOpenAI(
|
||||
model_name=model,
|
||||
streaming=True,
|
||||
@@ -73,6 +100,7 @@ class _GPTConversation:
|
||||
verbose=True,
|
||||
temperature=0,
|
||||
)
|
||||
self.cost_callback = cost_callback
|
||||
self.messages: List[BaseMessage] = []
|
||||
self.system_message = self._create_system_message(task_description, test_description, system_definition_examples)
|
||||
if os.environ['VERBOSE'].lower() == 'true':
|
||||
@@ -96,6 +124,7 @@ class _GPTConversation:
|
||||
|
||||
if os.environ['VERBOSE'].lower() == 'true':
|
||||
print()
|
||||
self.cost_callback(sum([len(m.content) for m in self.messages]), len(response.content))
|
||||
self.messages.append(response)
|
||||
return response.content
|
||||
|
||||
|
||||
@@ -156,7 +156,7 @@ streamlit run {os.path.join(microservice_path, "app.py")} --server.port 8081 --s
|
||||
|
||||
def run_streamlit_app(app_path):
|
||||
subprocess.run(['streamlit', 'run', app_path, 'server.address', '0.0.0.0', '--server.port', '8081', '--', '--host',
|
||||
'grpc://localhost:8080'])
|
||||
'http://localhost:8080'])
|
||||
|
||||
|
||||
def run_locally(executor_name, microservice_version_path):
|
||||
|
||||
@@ -24,6 +24,12 @@ FILE_AND_TAG_PAIRS = [
|
||||
|
||||
FLOW_URL_PLACEHOLDER = 'jcloud.jina.ai'
|
||||
|
||||
PRICING_GPT4_PROMPT = 0.03
|
||||
PRICING_GPT4_GENERATION = 0.06
|
||||
PRICING_GPT3_5_TURBO_PROMPT = 0.002
|
||||
PRICING_GPT3_5_TURBO_GENERATION = 0.002
|
||||
|
||||
CHARS_PER_TOKEN = 3.4
|
||||
|
||||
NUM_IMPLEMENTATION_STRATEGIES = 5
|
||||
MAX_DEBUGGING_ITERATIONS = 10
|
||||
|
||||
@@ -54,7 +54,7 @@ metas:
|
||||
|
||||
|
||||
def generate_and_persist_file(self, section_title, template, destination_folder, file_name, **template_kwargs):
|
||||
print_colored('', f'\n############# {section_title} #############', 'blue')
|
||||
print_colored('', f'\n\n############# {section_title} #############', 'blue')
|
||||
conversation = self.gpt_session.get_conversation()
|
||||
template_kwargs = {k: v for k, v in template_kwargs.items() if k in template.input_variables}
|
||||
content_raw = conversation.chat(
|
||||
@@ -140,7 +140,7 @@ metas:
|
||||
print('\nFirst version of the microservice generated. Start iterating on it to make the tests pass...')
|
||||
|
||||
def generate_playground(self, microservice_name, microservice_path):
|
||||
print_colored('', '\n############# Playground #############', 'blue')
|
||||
print_colored('', '\n\n############# Playground #############', 'blue')
|
||||
|
||||
file_name_to_content = get_all_microservice_files_with_content(microservice_path)
|
||||
conversation = self.gpt_session.get_conversation([])
|
||||
@@ -223,13 +223,14 @@ metas:
|
||||
return 'yes' in answer.lower()
|
||||
|
||||
def generate_microservice_name(self, description):
|
||||
print_colored('', '\n\n############# What should be the name of the Microservice? #############', 'blue')
|
||||
conversation = self.gpt_session.get_conversation()
|
||||
name_raw = conversation.chat(template_generate_microservice_name.format(description=description))
|
||||
name = self.extract_content_from_result(name_raw, 'name.txt')
|
||||
return name
|
||||
|
||||
def get_possible_packages(self):
|
||||
print_colored('', '############# What packages to use? #############', 'blue')
|
||||
print_colored('', '\n\n############# What packages to use? #############', 'blue')
|
||||
conversation = self.gpt_session.get_conversation()
|
||||
packages_raw = conversation.chat(
|
||||
template_generate_possible_packages.format(description=self.task_description)
|
||||
|
||||
@@ -84,7 +84,7 @@ Here is an example of a client file:
|
||||
**client.py**
|
||||
```python
|
||||
from jina import Client, Document, DocumentArray
|
||||
client = Client(host='{FLOW_URL_PLACEHOLDER}')
|
||||
client = Client(host='{FLOW_URL_PLACEHOLDER}', protocol='http')
|
||||
d = Document(uri='...')
|
||||
d.load_uri_to_blob()
|
||||
response = client.post('/', inputs=DocumentArray([d])) # the client must be called on '/'
|
||||
|
||||
@@ -305,7 +305,7 @@ print(response[0].text) # can also be blob in case of image/audio..., this shoul
|
||||
```
|
||||
Note that the response will always be in response[0].text
|
||||
You must provide the complete app.py file with the exact same syntax to wrap the code.
|
||||
The playground (app.py) must read the host from sys.argv because it will be started with a custom host: streamlit run app.py -- --host grpc://...
|
||||
The playground (app.py) must read the host from sys.argv because it will be started with a custom host: streamlit run app.py -- --host http(s)://...
|
||||
The playground (app.py) must not let the user configure the host on the ui.
|
||||
'''
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user