mirror of
https://github.com/aljazceru/dev-gpt.git
synced 2025-12-24 00:54:19 +01:00
➕ refactor: summarize error message without line number
This commit is contained in:
@@ -8,7 +8,7 @@ from dev_gpt.options.generate.chains.user_confirmation_feedback_loop import user
|
||||
from dev_gpt.options.generate.parser import identity_parser, json_parser, self_healing_json_parser
|
||||
from dev_gpt.options.generate.pm.task_tree_schema import TaskTree
|
||||
from dev_gpt.options.generate.prompt_factory import make_prompt_friendly
|
||||
from dev_gpt.options.generate.templates_user import generate_used_tools_prompt
|
||||
from dev_gpt.options.generate.templates_user import generate_used_apis_prompt
|
||||
from dev_gpt.options.generate.ui import get_random_employee
|
||||
|
||||
|
||||
@@ -58,47 +58,47 @@ Description of the microservice:
|
||||
'Request schema': context['request_schema'],
|
||||
'Response schema': context['response_schema'],
|
||||
},
|
||||
condition_question='Does the request schema provided include a property that represents a file?',
|
||||
conditions = [
|
||||
is_question_true('Does the request schema provided include a property that represents a file?'),
|
||||
],
|
||||
question_gen='Generate a question that requests for an example file url.',
|
||||
extension_name='Input Example',
|
||||
)
|
||||
used_tools = self.get_used_tools(microservice_description)
|
||||
microservice_description += self.user_input_extension_if_needed(
|
||||
{
|
||||
'Microservice description': microservice_description,
|
||||
},
|
||||
condition_question=f'''{
|
||||
(f"Other than interacting with {' and '.join(used_tools)}, does the microservice interface with any additional external APIs?")
|
||||
if used_tools else "Based on the microservice description, does the microservice interface with external APIs"
|
||||
}''',
|
||||
question_gen='Generate a question that asks for the endpoint of the API and an example of a request and response when interacting with the API.',
|
||||
extension_name='Example of API usage',
|
||||
post_transformation_fn=translation(from_format='api instruction',
|
||||
to_format='python code snippet raw without formatting')
|
||||
)
|
||||
return microservice_description, test_description
|
||||
used_apis_beside_tools = [x for x in self.get_used_apis(microservice_description) if x not in ['google_custom_search', 'gpt_3_5_turbo']]
|
||||
for api in used_apis_beside_tools:
|
||||
microservice_description += self.user_input_extension_if_needed(
|
||||
{
|
||||
'Microservice description': microservice_description,
|
||||
},
|
||||
conditions=[
|
||||
lambda:True
|
||||
],
|
||||
question_gen=f'Generate a question that asks for the endpoint for {api} and an example of a request and response when interacting with the API.',
|
||||
extension_name=f'Instructions for {api}',
|
||||
post_transformation_fn=translation(from_format='api instruction',
|
||||
to_format='python code snippet raw without formatting')
|
||||
)
|
||||
return microservice_description, test_description
|
||||
|
||||
@staticmethod
|
||||
def get_used_tools(microservice_description):
|
||||
def get_used_apis(microservice_description):
|
||||
return ask_gpt(
|
||||
generate_used_tools_prompt,
|
||||
generate_used_apis_prompt,
|
||||
self_healing_json_parser,
|
||||
microservice_description=microservice_description
|
||||
)
|
||||
)['mentioned_apis']
|
||||
|
||||
def user_input_extension_if_needed(
|
||||
self,
|
||||
context,
|
||||
condition_question,
|
||||
conditions,
|
||||
question_gen,
|
||||
extension_name,
|
||||
post_transformation_fn=None
|
||||
):
|
||||
user_answer = get_user_input_if_needed(
|
||||
context=context,
|
||||
conditions=[
|
||||
is_question_true(condition_question),
|
||||
],
|
||||
conditions=conditions,
|
||||
question_gen_prompt_part=question_gen,
|
||||
)
|
||||
if user_answer:
|
||||
|
||||
@@ -548,19 +548,26 @@ Note that prompt.json must only contain one question.
|
||||
'''
|
||||
)
|
||||
|
||||
generate_used_tools_prompt = '''\
|
||||
generate_used_apis_prompt = '''\
|
||||
Microservice description:
|
||||
{microservice_description}
|
||||
|
||||
Question:
|
||||
Respond with a list as JSON idicating which of the APIs [google_custom_search, gpt_3_5_turbo] are mentioned in the description.
|
||||
Example 1:
|
||||
["google_custom_search", "gpt_3_5_turbo"]
|
||||
Example 2:
|
||||
["google_custom_search"]
|
||||
Example 3:
|
||||
["gpt_3_5_turbo"]
|
||||
Example 4:
|
||||
[]
|
||||
Respond with a list as JSON indicating which web APIs (e.g. google_custom_search, gpt_3_5_turbo) are mentioned in the description.
|
||||
Positive Example 1:
|
||||
{{
|
||||
"mentioned_apis": ["google_custom_search", "gpt_3_5_turbo"]
|
||||
}}
|
||||
Positive Example 2:
|
||||
{{
|
||||
"mentioned_apis": ["google_custom_search"]
|
||||
}}
|
||||
Positive Example 3:
|
||||
{{
|
||||
"mentioned_apis": ["gpt_3_5_turbo"]
|
||||
}}
|
||||
Positive Example 4:
|
||||
{{
|
||||
"mentioned_apis": []
|
||||
}}
|
||||
'''
|
||||
|
||||
|
||||
@@ -136,7 +136,8 @@ data = {{
|
||||
}}
|
||||
response = requests.post(url, headers=headers, data=data)
|
||||
assert response.status_code == 200
|
||||
print('This is the text from the audio file:', response.text)'''
|
||||
print('This is the text from the audio file:', response.text)''',
|
||||
'use any library',
|
||||
# f'''\
|
||||
# import openai
|
||||
# audio_file= open("/path/to/file/audio.mp3", "rb")
|
||||
@@ -160,7 +161,7 @@ def test_generation_level_4(microservice_dir, mock_input_sequence):
|
||||
f'''Given an audio file (1min wav) of speech,
|
||||
1. convert it to text using the Whisper API.
|
||||
2. Summarize the text while still maintaining the key facts.
|
||||
3. Create an audio file of the summarized text using a tts library.
|
||||
3. Create an audio file of the summarized text using via tts.
|
||||
4. Return the the audio file as base64 encoded binary.
|
||||
''',
|
||||
str(microservice_dir),
|
||||
|
||||
@@ -17,9 +17,24 @@ def test_no_search():
|
||||
def test_get_used_tools(tmpdir):
|
||||
os.environ['VERBOSE'] = 'true'
|
||||
GPTSession(os.path.join(str(tmpdir), 'log.json'), model='gpt-3.5-turbo')
|
||||
used_tools = PM.get_used_tools('''\
|
||||
used_tools = PM.get_used_apis('''\
|
||||
This microservice listens for incoming requests and generates a fixed output of "test" upon receiving a request. \
|
||||
The response sent back to the requester includes the output as a string parameter. \
|
||||
No specific request parameters are required, and the response always follows a fixed schema with a single "output" parameter.'''
|
||||
)
|
||||
)
|
||||
assert used_tools == []
|
||||
|
||||
def test_get_used_tools_2(tmpdir):
|
||||
os.environ['VERBOSE'] = 'true'
|
||||
GPTSession(os.path.join(str(tmpdir), 'log.json'), model='gpt-3.5-turbo')
|
||||
description = '''\
|
||||
This microservice accepts a 1-minute WAV audio file of speech, encoded as a base64 binary string, and performs the following tasks:
|
||||
|
||||
1. Converts the audio file to text using the Whisper API.
|
||||
2. Summarizes the text while preserving key facts using gpt_3_5_turbo.
|
||||
3. Generates an audio file of the summarized text using a text-to-speech (TTS) library.
|
||||
4. Encodes the resulting audio file as a base64 binary string.
|
||||
|
||||
The microservice returns the summarized text converted to audio and encoded as a base64 binary string.'''
|
||||
used_tools = PM.get_used_apis(description)
|
||||
assert used_tools == ['Whisper API', 'gpt_3_5_turbo', 'text-to-speech (TTS) library']
|
||||
|
||||
Reference in New Issue
Block a user