mirror of
https://github.com/aljazceru/dev-gpt.git
synced 2025-12-25 09:24:23 +01:00
feat: relax and clarify instructions for gpt turbo
This commit is contained in:
@@ -166,10 +166,9 @@ response = client.post('/', inputs=DocumentArray([d])) # always use '/'
|
||||
print(response[0].text) # can also be blob in case of image/audio..., this should be visualized in the streamlit app
|
||||
'''
|
||||
)
|
||||
conversation = self.gpt_session.get_conversation()
|
||||
conversation = self.gpt_session.get_conversation([])
|
||||
conversation.query(user_query)
|
||||
playground_content_raw = conversation.query(
|
||||
f"General rules: " + not_allowed() + chain_of_thought_optimization('python', 'app.py'))
|
||||
playground_content_raw = conversation.query(chain_of_thought_optimization('python', 'app.py', 'the playground'))
|
||||
playground_content = self.extract_content_from_result(playground_content_raw, 'app.py')
|
||||
persist_file(playground_content, os.path.join(executor_path, 'app.py'))
|
||||
|
||||
|
||||
23
src/gpt.py
23
src/gpt.py
@@ -7,7 +7,7 @@ from openai.error import RateLimitError, Timeout
|
||||
|
||||
from src.constants import PRICING_GPT4_PROMPT, PRICING_GPT4_GENERATION, PRICING_GPT3_5_TURBO_PROMPT, \
|
||||
PRICING_GPT3_5_TURBO_GENERATION
|
||||
from src.prompt_system import system_base_definition
|
||||
from src.prompt_system import system_base_definition, executor_example, docarray_example, client_example
|
||||
from src.utils.io import timeout_generator_wrapper, GenerationTimeoutError
|
||||
from src.utils.string_tools import print_colored
|
||||
|
||||
@@ -64,18 +64,25 @@ class GPTSession:
|
||||
print('total money so far:', f'${money_prompt + money_generation}')
|
||||
print('\n')
|
||||
|
||||
def get_conversation(self):
|
||||
return _GPTConversation(self.supported_model, self.cost_callback)
|
||||
def get_conversation(self, system_definition_examples: List[str] = ['executor', 'docarray', 'client']):
|
||||
return _GPTConversation(self.supported_model, self.cost_callback, system_definition_examples)
|
||||
|
||||
|
||||
class _GPTConversation:
|
||||
def __init__(self, model: str, cost_callback, prompt_list: List[Tuple[str, str]] = None):
|
||||
def __init__(self, model: str, cost_callback, system_definition_examples: List[str] = ['executor', 'docarray', 'client']):
|
||||
self.model = model
|
||||
if prompt_list is None:
|
||||
prompt_list = [('system', system_base_definition)]
|
||||
system_message = system_base_definition
|
||||
if 'executor' in system_definition_examples:
|
||||
system_message += f'\n{executor_example}'
|
||||
if 'docarray' in system_definition_examples:
|
||||
system_message += f'{docarray_example}'
|
||||
if 'client' in system_definition_examples:
|
||||
system_message += f'{client_example}'
|
||||
|
||||
prompt_list = [('system', system_message)]
|
||||
self.prompt_list = prompt_list
|
||||
self.cost_callback = cost_callback
|
||||
print_colored('system', system_base_definition, 'magenta')
|
||||
print_colored('system', system_message, 'magenta')
|
||||
|
||||
def query(self, prompt: str):
|
||||
print_colored('user', prompt, 'blue')
|
||||
@@ -100,7 +107,7 @@ class _GPTConversation:
|
||||
try:
|
||||
response_generator = openai.ChatCompletion.create(
|
||||
temperature=0,
|
||||
max_tokens=2_000,
|
||||
max_tokens=2_000 if self.model == 'gpt-4' else None,
|
||||
model=self.model,
|
||||
stream=True,
|
||||
messages=[
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
from src.constants import FLOW_URL_PLACEHOLDER
|
||||
|
||||
executor_example = '''
|
||||
Using the Jina framework, users can define executors.
|
||||
executor_example = '''Using the Jina framework, users can define executors.
|
||||
Here is an example of how an executor can be defined. It always starts with a comment:
|
||||
|
||||
**executor.py**
|
||||
@@ -20,11 +19,9 @@ class MyInfoExecutor(Executor):
|
||||
return docs
|
||||
```
|
||||
|
||||
An executor gets a DocumentArray as input and returns a DocumentArray as output.
|
||||
'''
|
||||
An executor gets a DocumentArray as input and returns a DocumentArray as output.'''
|
||||
|
||||
docarray_example = f'''
|
||||
A DocumentArray is a python class that can be seen as a list of Documents.
|
||||
docarray_example = f'''A DocumentArray is a python class that can be seen as a list of Documents.
|
||||
A Document is a python class that represents a single document.
|
||||
Here is the protobuf definition of a Document:
|
||||
|
||||
@@ -86,12 +83,10 @@ d8 = Document()
|
||||
d8.text = json.dumps([{{"id": "1", "text": ["hello", 'test']}}, {{"id": "2", "text": "world"}}])
|
||||
# the document has a helper function load_uri_to_blob:
|
||||
# For instance, d4.load_uri_to_blob() downloads the file from d4.uri and stores it in d4.blob.
|
||||
# If d4.uri was something like 'https://website.web/img.jpg', then d4.blob would be something like b'\\xff\\xd8\\xff\\xe0\\x00\\x10JFIF\\x00\\x01\\x01...
|
||||
'''
|
||||
# If d4.uri was something like 'https://website.web/img.jpg', then d4.blob would be something like b'\\xff\\xd8\\xff\\xe0\\x00\\x10JFIF\\x00\\x01\\x01... '''
|
||||
|
||||
|
||||
client_example = f'''
|
||||
After the executor is deployed, it can be called via Jina Client.
|
||||
client_example = f'''After the executor is deployed, it can be called via Jina Client.
|
||||
Here is an example of a client file:
|
||||
|
||||
**client.py**
|
||||
@@ -102,13 +97,7 @@ d = Document(uri='...')
|
||||
d.load_uri_to_blob()
|
||||
response = client.post('/', inputs=DocumentArray([d])) # the client must be called on '/'
|
||||
print(response[0].text)
|
||||
```
|
||||
'''
|
||||
```'''
|
||||
|
||||
|
||||
system_base_definition = f'''
|
||||
You are a principal engineer working at Jina - an open source company."
|
||||
{executor_example}
|
||||
{docarray_example}
|
||||
{client_example}
|
||||
'''
|
||||
system_base_definition = f'''You are a principal engineer working at Jina - an open source company.'''
|
||||
@@ -17,9 +17,13 @@ def general_guidelines():
|
||||
)
|
||||
|
||||
|
||||
def _task(task, tag_name, file_name):
|
||||
def _task(task, tag_name, file_name, function_name=None):
|
||||
into_string = file_name
|
||||
if function_name:
|
||||
into_string += f"/{function_name}"
|
||||
|
||||
return (
|
||||
task + f"The code will go into {file_name}. Wrap the code into:\n"
|
||||
task + f"The code will go into {into_string}. Make sure to wrap the code into ``` marks even if you only output code:\n"
|
||||
f"**{file_name}**\n"
|
||||
f"```{tag_name}\n"
|
||||
f"...code...\n"
|
||||
@@ -112,17 +116,21 @@ def chain_of_thought_creation():
|
||||
)
|
||||
|
||||
|
||||
def chain_of_thought_optimization(tag_name, file_name):
|
||||
def chain_of_thought_optimization(tag_name, file_name, file_name_function=None):
|
||||
file_name_or_function = file_name
|
||||
if file_name_function:
|
||||
file_name_or_function += f"/{file_name_function}"
|
||||
return _task(
|
||||
f'First, write down an extensive list of obvious and non-obvious observations about {file_name} that could need an adjustment. Explain why. '
|
||||
f'First, write down an extensive list of obvious and non-obvious observations about {file_name_or_function} that could need an adjustment. Explain why. '
|
||||
f"Think if all the changes are required and finally decide for the changes you want to make, "
|
||||
f"but you are not allowed disregard the instructions in the previous message. "
|
||||
f"Be very hesitant to change the code. Only make a change if you are sure that it is necessary. "
|
||||
|
||||
f"Output only {file_name} "
|
||||
f"Write the whole content of {file_name} - even if you decided to change only a small thing or even nothing. ",
|
||||
f"Output only {file_name_or_function} "
|
||||
f"Write the whole content of {file_name_or_function} - even if you decided to change only a small thing or even nothing. ",
|
||||
tag_name,
|
||||
file_name
|
||||
file_name,
|
||||
file_name_function
|
||||
)
|
||||
|
||||
def not_allowed():
|
||||
|
||||
Reference in New Issue
Block a user