mirror of
https://github.com/aljazceru/dev-gpt.git
synced 2026-01-06 07:14:19 +01:00
➕ refactor: summarize error message without line number
This commit is contained in:
@@ -3,6 +3,8 @@ from typing import List, Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from dev_gpt.apis.gpt import GPTSession
|
||||
|
||||
|
||||
def input_generator(input_sequence: list) -> Generator[str, None, None]:
|
||||
"""
|
||||
@@ -34,3 +36,7 @@ def microservice_dir(tmpdir) -> str:
|
||||
"""
|
||||
return os.path.join(str(tmpdir), "microservice")
|
||||
|
||||
@pytest.fixture
|
||||
def init_gpt(tmpdir):
|
||||
os.environ['VERBOSE'] = 'true'
|
||||
GPTSession(os.path.join(str(tmpdir), 'log.json'), model='gpt-3.5-turbo')
|
||||
|
||||
@@ -48,9 +48,7 @@ def test_extract_content_from_result(plain_text, expected1, expected2):
|
||||
assert parsed_result2 == expected2
|
||||
|
||||
|
||||
def test_self_healing_json_parser(tmpdir):
|
||||
os.environ['VERBOSE'] = 'true'
|
||||
GPTSession(os.path.join(str(tmpdir), 'log.json'), model='gpt-3.5-turbo')
|
||||
def test_self_healing_json_parser(init_gpt):
|
||||
json_response = '''\
|
||||
```json
|
||||
{
|
||||
|
||||
@@ -1,15 +1,54 @@
|
||||
import os
|
||||
|
||||
from dev_gpt.apis.gpt import GPTSession
|
||||
from dev_gpt.options.generate.chains.auto_refine_description import enhance_description
|
||||
from dev_gpt.apis.gpt import ask_gpt
|
||||
from dev_gpt.options.generate.chains.auto_refine_description import enhance_description, \
|
||||
summarize_description_and_schemas_prompt
|
||||
from dev_gpt.options.generate.parser import identity_parser
|
||||
from dev_gpt.options.generate.prompt_factory import context_to_string
|
||||
|
||||
|
||||
def test_better_description(tmpdir):
|
||||
os.environ['VERBOSE'] = 'true'
|
||||
GPTSession(os.path.join(str(tmpdir), 'log.json'), model='gpt-3.5-turbo')
|
||||
def test_better_description(init_gpt):
|
||||
|
||||
better_description = enhance_description({
|
||||
'microservice_description': 'Input is a tweet that contains passive aggressive language. The output is the positive version of that tweet.'
|
||||
})
|
||||
assert 'gpt_3_5_turbo' in better_description
|
||||
assert 'such as' not in better_description
|
||||
|
||||
def test_update_description_based_on_schema(init_gpt):
|
||||
updated_description = ask_gpt(
|
||||
summarize_description_and_schemas_prompt,
|
||||
identity_parser,
|
||||
context_string=context_to_string({
|
||||
'microservice_description': '''\
|
||||
Microservice Description:
|
||||
|
||||
Given a tweet that contains passive aggressive language, the microservice will generate a positive version of that tweet. The microservice will perform the following tasks:
|
||||
|
||||
1. Use a natural language processing tool such as gpt_3_5_turbo to analyze the sentiment of the input tweet and identify the passive aggressive language.
|
||||
2. Generate a positive version of the tweet using gpt_3_5_turbo or a similar text generation tool.
|
||||
3. Check the generated tweet for any grammatical errors or typos and correct them if necessary.
|
||||
4. Return the positive version of the tweet as output.''',
|
||||
'request_schema': '''\
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"tweet": {
|
||||
"type": "string",
|
||||
"description": "The input tweet that contains passive aggressive language."
|
||||
}
|
||||
},
|
||||
"required": ["tweet"]
|
||||
}''',
|
||||
'response_schema': '''\
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"positive_tweet": {
|
||||
"type": "string",
|
||||
"description": "The positive version of the input tweet generated by the microservice."
|
||||
}
|
||||
},
|
||||
"required": ["positive_tweet"]
|
||||
}'''
|
||||
})
|
||||
)
|
||||
assert 'gpt_3_5_turbo' in updated_description
|
||||
@@ -14,9 +14,7 @@ def test_no_search():
|
||||
tool_lines = get_available_tools().split('\n')
|
||||
assert len(tool_lines) == 1
|
||||
|
||||
def test_get_used_apis(tmpdir):
|
||||
os.environ['VERBOSE'] = 'true'
|
||||
GPTSession(os.path.join(str(tmpdir), 'log.json'), model='gpt-3.5-turbo')
|
||||
def test_get_used_apis(init_gpt):
|
||||
used_apis = PM.get_used_apis('''\
|
||||
This microservice listens for incoming requests and generates a fixed output of "test" upon receiving a request. \
|
||||
The response sent back to the requester includes the output as a string parameter. \
|
||||
@@ -24,9 +22,7 @@ No specific request parameters are required, and the response always follows a f
|
||||
)
|
||||
assert used_apis == []
|
||||
|
||||
def test_get_used_apis_2(tmpdir):
|
||||
os.environ['VERBOSE'] = 'true'
|
||||
GPTSession(os.path.join(str(tmpdir), 'log.json'), model='gpt-3.5-turbo')
|
||||
def test_get_used_apis_2(init_gpt):
|
||||
description = '''\
|
||||
This microservice accepts a 1-minute WAV audio file of speech, encoded as a base64 binary string, and performs the following tasks:
|
||||
|
||||
@@ -39,9 +35,7 @@ The microservice returns the summarized text converted to audio and encoded as a
|
||||
used_apis = PM.get_used_apis(description)
|
||||
assert used_apis == ['Whisper API', 'gpt_3_5_turbo', 'text-to-speech (TTS) library']
|
||||
|
||||
def test_get_used_apis_3(tmpdir):
|
||||
os.environ['VERBOSE'] = 'true'
|
||||
GPTSession(os.path.join(str(tmpdir), 'log.json'), model='gpt-3.5-turbo')
|
||||
def test_get_used_apis_3(init_gpt):
|
||||
description = '''\
|
||||
This microservice takes a PDF file as input and returns a summarized text output. \
|
||||
It uses PDF parsing and natural language processing tools to generate the summary, \
|
||||
@@ -51,9 +45,7 @@ and the output parameter is the summarized text.'''
|
||||
used_apis = PM.get_used_apis(description)
|
||||
assert used_apis == []
|
||||
|
||||
def test_get_used_apis_4(tmpdir):
|
||||
os.environ['VERBOSE'] = 'true'
|
||||
GPTSession(os.path.join(str(tmpdir), 'log.json'), model='gpt-3.5-turbo')
|
||||
def test_get_used_apis_4(init_gpt):
|
||||
description = '''\
|
||||
This microservice receives a tweet as input \
|
||||
and identifies passive aggressive language using natural language processing techniques. \
|
||||
|
||||
@@ -4,9 +4,7 @@ from dev_gpt.apis.gpt import GPTSession
|
||||
from dev_gpt.options.generate.chains.question_answering import answer_yes_no_question
|
||||
|
||||
|
||||
def test_answer_yes_no_question(tmpdir):
|
||||
os.environ['VERBOSE'] = 'true'
|
||||
GPTSession(os.path.join(str(tmpdir), 'log.json'), model='gpt-3.5-turbo')
|
||||
def test_answer_yes_no_question(init_gpt):
|
||||
assert answer_yes_no_question(
|
||||
'''\
|
||||
Microservice description:
|
||||
@@ -20,9 +18,7 @@ The request parameter is "stock_symbol" and the response parameter is "summary".
|
||||
''', 'Based on the microservice description, does the microservice interface with APIs?'
|
||||
)
|
||||
|
||||
def test_answer_yes_no_question_2(tmpdir):
|
||||
os.environ['VERBOSE'] = 'true'
|
||||
GPTSession(os.path.join(str(tmpdir), 'log.json'), model='gpt-3.5-turbo')
|
||||
def test_answer_yes_no_question_2(init_gpt):
|
||||
assert not answer_yes_no_question(
|
||||
'''\
|
||||
Microservice description:
|
||||
|
||||
Reference in New Issue
Block a user