mirror of
https://github.com/aljazceru/dev-gpt.git
synced 2025-12-18 14:14:21 +01:00
➕ refactor: summarize error message without line number
This commit is contained in:
@@ -71,8 +71,14 @@ Note: If you are not sure about the details, then come up with the minimal numbe
|
||||
Note: If you can decide to return files as URLs or as base64 encoded strings, then choose the base64 encoded strings.'''
|
||||
|
||||
summarize_description_and_schemas_prompt = '''{context_string}
|
||||
Write an updated microservice description by incorporating information about the request and response parameters in a concise way without losing any information.
|
||||
Note: You must not mention any details about algorithms or the technical implementation.
|
||||
Note: You must not mention that there is a request and response JSON schema
|
||||
Note: You must not use any formatting like triple backticks.
|
||||
Note: If an external API like google_custom_search or gpt_3_5_turbo is mentioned in the description, then you must mention the API in the updated description as well.'''
|
||||
Please write an updated microservice description by incorporating information about the request and response parameters in a concise manner, ensuring all information from the existing description is maintained.
|
||||
|
||||
Constraints:
|
||||
|
||||
- Do not mention any details about algorithms or the technical implementation.
|
||||
- Refrain from indicating there is a request and response JSON schema.
|
||||
- Avoid using any special formatting such as triple backticks.
|
||||
- If a specific tool or API (e.g. google_custom_search, gpt_3_5_turbo) is referred to in the original description, \
|
||||
include it in the updated description using the exact name given. \
|
||||
For instance, if the original description mentions 'gpt_3_5_turbo', \
|
||||
the updated description should also specify 'gpt_3_5_turbo'.'''
|
||||
55
scripts/prompt_improvement.py
Normal file
55
scripts/prompt_improvement.py
Normal file
@@ -0,0 +1,55 @@
|
||||
from dev_gpt.options.generate.chains.auto_refine_description import better_description_prompt
|
||||
|
||||
prompt_template = better_description_prompt
|
||||
|
||||
d = {
|
||||
'microservice_description': 'Input is a tweet that contains passive aggressive language. The output is the positive version of that tweet.'
|
||||
}
|
||||
|
||||
task = '''\
|
||||
The main issue with GPT-3's output is that it violated the given prompt instructions. It used a non-specific formulation "such as GPT-3" to suggest a tool for natural language processing, which is explicitly prohibited in the prompt.
|
||||
|
||||
How can I change the prompt template to make this more clear to GPT-3?'''
|
||||
|
||||
generated_output = '''\
|
||||
Microservice Description:
|
||||
|
||||
This microservice takes a tweet as input that contains passive aggressive language and returns the positive version of that tweet as output. The microservice performs the following tasks:
|
||||
|
||||
1. Use a natural language processing (NLP) tool such as GPT-3 to analyze the input tweet and identify the passive aggressive language.
|
||||
2. Generate a list of positive words and phrases that can be used to replace the passive aggressive language in the tweet.
|
||||
3. Replace the passive aggressive language in the tweet with positive words and phrases.
|
||||
4. Return the positive version of the tweet as output.'''
|
||||
|
||||
|
||||
|
||||
|
||||
value_explanations = [
|
||||
'This is the value of {k}: "{v}"' for k, v in d.items()
|
||||
]
|
||||
|
||||
value_explanations_string = '\n'.join(value_explanations)
|
||||
|
||||
|
||||
fix_template = f'''\
|
||||
Here is the prompt template I used for GPT-3:
|
||||
# start of prompt
|
||||
|
||||
{prompt_template}
|
||||
|
||||
# end of prompt
|
||||
|
||||
{value_explanations_string}
|
||||
|
||||
|
||||
Here is the generated output by GPT-3:
|
||||
# generated output start
|
||||
|
||||
{generated_output}
|
||||
|
||||
# generated output end
|
||||
|
||||
|
||||
###### your task #####
|
||||
{task}
|
||||
'''
|
||||
@@ -3,6 +3,8 @@ from typing import List, Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from dev_gpt.apis.gpt import GPTSession
|
||||
|
||||
|
||||
def input_generator(input_sequence: list) -> Generator[str, None, None]:
|
||||
"""
|
||||
@@ -34,3 +36,7 @@ def microservice_dir(tmpdir) -> str:
|
||||
"""
|
||||
return os.path.join(str(tmpdir), "microservice")
|
||||
|
||||
@pytest.fixture
|
||||
def init_gpt(tmpdir):
|
||||
os.environ['VERBOSE'] = 'true'
|
||||
GPTSession(os.path.join(str(tmpdir), 'log.json'), model='gpt-3.5-turbo')
|
||||
|
||||
@@ -48,9 +48,7 @@ def test_extract_content_from_result(plain_text, expected1, expected2):
|
||||
assert parsed_result2 == expected2
|
||||
|
||||
|
||||
def test_self_healing_json_parser(tmpdir):
|
||||
os.environ['VERBOSE'] = 'true'
|
||||
GPTSession(os.path.join(str(tmpdir), 'log.json'), model='gpt-3.5-turbo')
|
||||
def test_self_healing_json_parser(init_gpt):
|
||||
json_response = '''\
|
||||
```json
|
||||
{
|
||||
|
||||
@@ -1,15 +1,54 @@
|
||||
import os
|
||||
|
||||
from dev_gpt.apis.gpt import GPTSession
|
||||
from dev_gpt.options.generate.chains.auto_refine_description import enhance_description
|
||||
from dev_gpt.apis.gpt import ask_gpt
|
||||
from dev_gpt.options.generate.chains.auto_refine_description import enhance_description, \
|
||||
summarize_description_and_schemas_prompt
|
||||
from dev_gpt.options.generate.parser import identity_parser
|
||||
from dev_gpt.options.generate.prompt_factory import context_to_string
|
||||
|
||||
|
||||
def test_better_description(tmpdir):
|
||||
os.environ['VERBOSE'] = 'true'
|
||||
GPTSession(os.path.join(str(tmpdir), 'log.json'), model='gpt-3.5-turbo')
|
||||
def test_better_description(init_gpt):
|
||||
|
||||
better_description = enhance_description({
|
||||
'microservice_description': 'Input is a tweet that contains passive aggressive language. The output is the positive version of that tweet.'
|
||||
})
|
||||
assert 'gpt_3_5_turbo' in better_description
|
||||
assert 'such as' not in better_description
|
||||
|
||||
def test_update_description_based_on_schema(init_gpt):
|
||||
updated_description = ask_gpt(
|
||||
summarize_description_and_schemas_prompt,
|
||||
identity_parser,
|
||||
context_string=context_to_string({
|
||||
'microservice_description': '''\
|
||||
Microservice Description:
|
||||
|
||||
Given a tweet that contains passive aggressive language, the microservice will generate a positive version of that tweet. The microservice will perform the following tasks:
|
||||
|
||||
1. Use a natural language processing tool such as gpt_3_5_turbo to analyze the sentiment of the input tweet and identify the passive aggressive language.
|
||||
2. Generate a positive version of the tweet using gpt_3_5_turbo or a similar text generation tool.
|
||||
3. Check the generated tweet for any grammatical errors or typos and correct them if necessary.
|
||||
4. Return the positive version of the tweet as output.''',
|
||||
'request_schema': '''\
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"tweet": {
|
||||
"type": "string",
|
||||
"description": "The input tweet that contains passive aggressive language."
|
||||
}
|
||||
},
|
||||
"required": ["tweet"]
|
||||
}''',
|
||||
'response_schema': '''\
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"positive_tweet": {
|
||||
"type": "string",
|
||||
"description": "The positive version of the input tweet generated by the microservice."
|
||||
}
|
||||
},
|
||||
"required": ["positive_tweet"]
|
||||
}'''
|
||||
})
|
||||
)
|
||||
assert 'gpt_3_5_turbo' in updated_description
|
||||
@@ -14,9 +14,7 @@ def test_no_search():
|
||||
tool_lines = get_available_tools().split('\n')
|
||||
assert len(tool_lines) == 1
|
||||
|
||||
def test_get_used_apis(tmpdir):
|
||||
os.environ['VERBOSE'] = 'true'
|
||||
GPTSession(os.path.join(str(tmpdir), 'log.json'), model='gpt-3.5-turbo')
|
||||
def test_get_used_apis(init_gpt):
|
||||
used_apis = PM.get_used_apis('''\
|
||||
This microservice listens for incoming requests and generates a fixed output of "test" upon receiving a request. \
|
||||
The response sent back to the requester includes the output as a string parameter. \
|
||||
@@ -24,9 +22,7 @@ No specific request parameters are required, and the response always follows a f
|
||||
)
|
||||
assert used_apis == []
|
||||
|
||||
def test_get_used_apis_2(tmpdir):
|
||||
os.environ['VERBOSE'] = 'true'
|
||||
GPTSession(os.path.join(str(tmpdir), 'log.json'), model='gpt-3.5-turbo')
|
||||
def test_get_used_apis_2(init_gpt):
|
||||
description = '''\
|
||||
This microservice accepts a 1-minute WAV audio file of speech, encoded as a base64 binary string, and performs the following tasks:
|
||||
|
||||
@@ -39,9 +35,7 @@ The microservice returns the summarized text converted to audio and encoded as a
|
||||
used_apis = PM.get_used_apis(description)
|
||||
assert used_apis == ['Whisper API', 'gpt_3_5_turbo', 'text-to-speech (TTS) library']
|
||||
|
||||
def test_get_used_apis_3(tmpdir):
|
||||
os.environ['VERBOSE'] = 'true'
|
||||
GPTSession(os.path.join(str(tmpdir), 'log.json'), model='gpt-3.5-turbo')
|
||||
def test_get_used_apis_3(init_gpt):
|
||||
description = '''\
|
||||
This microservice takes a PDF file as input and returns a summarized text output. \
|
||||
It uses PDF parsing and natural language processing tools to generate the summary, \
|
||||
@@ -51,9 +45,7 @@ and the output parameter is the summarized text.'''
|
||||
used_apis = PM.get_used_apis(description)
|
||||
assert used_apis == []
|
||||
|
||||
def test_get_used_apis_4(tmpdir):
|
||||
os.environ['VERBOSE'] = 'true'
|
||||
GPTSession(os.path.join(str(tmpdir), 'log.json'), model='gpt-3.5-turbo')
|
||||
def test_get_used_apis_4(init_gpt):
|
||||
description = '''\
|
||||
This microservice receives a tweet as input \
|
||||
and identifies passive aggressive language using natural language processing techniques. \
|
||||
|
||||
@@ -4,9 +4,7 @@ from dev_gpt.apis.gpt import GPTSession
|
||||
from dev_gpt.options.generate.chains.question_answering import answer_yes_no_question
|
||||
|
||||
|
||||
def test_answer_yes_no_question(tmpdir):
|
||||
os.environ['VERBOSE'] = 'true'
|
||||
GPTSession(os.path.join(str(tmpdir), 'log.json'), model='gpt-3.5-turbo')
|
||||
def test_answer_yes_no_question(init_gpt):
|
||||
assert answer_yes_no_question(
|
||||
'''\
|
||||
Microservice description:
|
||||
@@ -20,9 +18,7 @@ The request parameter is "stock_symbol" and the response parameter is "summary".
|
||||
''', 'Based on the microservice description, does the microservice interface with APIs?'
|
||||
)
|
||||
|
||||
def test_answer_yes_no_question_2(tmpdir):
|
||||
os.environ['VERBOSE'] = 'true'
|
||||
GPTSession(os.path.join(str(tmpdir), 'log.json'), model='gpt-3.5-turbo')
|
||||
def test_answer_yes_no_question_2(init_gpt):
|
||||
assert not answer_yes_no_question(
|
||||
'''\
|
||||
Microservice description:
|
||||
|
||||
Reference in New Issue
Block a user