mirror of
https://github.com/aljazceru/dev-gpt.git
synced 2025-12-23 08:34:20 +01:00
refactor: cleanup
This commit is contained in:
@@ -40,12 +40,12 @@ graph TB
|
|||||||
- Creates a Streamlit playground where you can test the microservice.
|
- Creates a Streamlit playground where you can test the microservice.
|
||||||
6. If it fails 10 times in a row, it moves on to the next approach.
|
6. If it fails 10 times in a row, it moves on to the next approach.
|
||||||
|
|
||||||
[//]: # ([](https://user-images.githubusercontent.com/11627845/226220484-17810f7c-b184-4a03-9af2-3a977fbb014b.mov))
|
|
||||||
|
|
||||||
|
|
||||||
# 🤏 limitations for now
|
# 🤏 limitations for now
|
||||||
- stateless microservices only
|
- stateless microservices only
|
||||||
- deterministic microservices only to make sure input and output pairs can be used
|
- deterministic microservices only to make sure input and output pairs can be used
|
||||||
|
|
||||||
# 🔮 vision
|
# 🔮 vision
|
||||||
Use natural language interface to create, deploy and update your microservice infrastructure.
|
Use natural language interface to create, deploy and update your microservice infrastructure.
|
||||||
|
|
||||||
|
|
||||||
|
[//]: # ([](https://user-images.githubusercontent.com/11627845/226220484-17810f7c-b184-4a03-9af2-3a977fbb014b.mov))
|
||||||
|
|||||||
34
main.py
34
main.py
@@ -1,5 +1,7 @@
|
|||||||
import random
|
import random
|
||||||
|
|
||||||
|
import click
|
||||||
|
|
||||||
from src import gpt, jina_cloud
|
from src import gpt, jina_cloud
|
||||||
from src.jina_cloud import push_executor, process_error_message
|
from src.jina_cloud import push_executor, process_error_message
|
||||||
from src.prompt_tasks import general_guidelines, executor_file_task, chain_of_thought_creation, test_executor_file_task, \
|
from src.prompt_tasks import general_guidelines, executor_file_task, chain_of_thought_creation, test_executor_file_task, \
|
||||||
@@ -63,11 +65,12 @@ def wrap_content_in_code_block(executor_content, file_name, tag):
|
|||||||
def create_executor(
|
def create_executor(
|
||||||
executor_description,
|
executor_description,
|
||||||
test_scenario,
|
test_scenario,
|
||||||
|
output_path,
|
||||||
executor_name,
|
executor_name,
|
||||||
package,
|
package,
|
||||||
is_chain_of_thought=False,
|
is_chain_of_thought=False,
|
||||||
):
|
):
|
||||||
EXECUTOR_FOLDER_v1 = get_executor_path(package, 1)
|
EXECUTOR_FOLDER_v1 = get_executor_path(output_path, package, 1)
|
||||||
recreate_folder(EXECUTOR_FOLDER_v1)
|
recreate_folder(EXECUTOR_FOLDER_v1)
|
||||||
recreate_folder('flow')
|
recreate_folder('flow')
|
||||||
|
|
||||||
@@ -163,16 +166,16 @@ print(response[0].text) # can also be blob in case of image/audio..., this shoul
|
|||||||
playground_content = extract_content_from_result(playground_content_raw, 'app.py')
|
playground_content = extract_content_from_result(playground_content_raw, 'app.py')
|
||||||
persist_file(playground_content, f'{executor_path}/app.py')
|
persist_file(playground_content, f'{executor_path}/app.py')
|
||||||
|
|
||||||
def get_executor_path(package, version):
|
def get_executor_path(output_path, package, version):
|
||||||
package_path = '_'.join(package)
|
package_path = '_'.join(package)
|
||||||
return f'executor/{package_path}/v{version}'
|
return f'{output_path}/{package_path}/v{version}'
|
||||||
|
|
||||||
def debug_executor(package, executor_description, test_scenario):
|
def debug_executor(output_path, package, executor_description, test_scenario):
|
||||||
MAX_DEBUGGING_ITERATIONS = 10
|
MAX_DEBUGGING_ITERATIONS = 10
|
||||||
error_before = ''
|
error_before = ''
|
||||||
for i in range(1, MAX_DEBUGGING_ITERATIONS):
|
for i in range(1, MAX_DEBUGGING_ITERATIONS):
|
||||||
previous_executor_path = get_executor_path(package, i)
|
previous_executor_path = get_executor_path(output_path, package, i)
|
||||||
next_executor_path = get_executor_path(package, i + 1)
|
next_executor_path = get_executor_path(output_path, package, i + 1)
|
||||||
log_hubble = push_executor(previous_executor_path)
|
log_hubble = push_executor(previous_executor_path)
|
||||||
error = process_error_message(log_hubble)
|
error = process_error_message(log_hubble)
|
||||||
if error:
|
if error:
|
||||||
@@ -216,7 +219,7 @@ def debug_executor(package, executor_description, test_scenario):
|
|||||||
break
|
break
|
||||||
if i == MAX_DEBUGGING_ITERATIONS - 1:
|
if i == MAX_DEBUGGING_ITERATIONS - 1:
|
||||||
raise MaxDebugTimeReachedException('Could not debug the executor.')
|
raise MaxDebugTimeReachedException('Could not debug the executor.')
|
||||||
return get_executor_path(package, i)
|
return get_executor_path(output_path, package, i)
|
||||||
|
|
||||||
class MaxDebugTimeReachedException(BaseException):
|
class MaxDebugTimeReachedException(BaseException):
|
||||||
pass
|
pass
|
||||||
@@ -243,22 +246,27 @@ PDFParserExecutor
|
|||||||
name = extract_content_from_result(name_raw, 'name.txt')
|
name = extract_content_from_result(name_raw, 'name.txt')
|
||||||
return name
|
return name
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.option('--executor-description', required=True, help='Description of the executor.')
|
||||||
|
@click.option('--test-scenario', required=True, help='Test scenario for the executor.')
|
||||||
|
@click.option('--num_approaches', default=3, type=int, help='Number of num_approaches to use to fulfill the task (default: 3).')
|
||||||
|
@click.option('--output_path', default='executor', help='Path to the output folder (must be empty). ')
|
||||||
def main(
|
def main(
|
||||||
executor_description,
|
executor_description,
|
||||||
test_scenario,
|
test_scenario,
|
||||||
threads=3,
|
num_approaches=3,
|
||||||
|
output_path='executor',
|
||||||
):
|
):
|
||||||
generated_name = generate_executor_name(executor_description)
|
generated_name = generate_executor_name(executor_description)
|
||||||
executor_name = f'{generated_name}{random.randint(0, 1000_000)}'
|
executor_name = f'{generated_name}{random.randint(0, 1000_000)}'
|
||||||
|
|
||||||
packages = get_possible_packages(executor_description, threads)
|
packages = get_possible_packages(executor_description, num_approaches)
|
||||||
recreate_folder('executor')
|
recreate_folder(output_path)
|
||||||
for package in packages:
|
for package in packages:
|
||||||
try:
|
try:
|
||||||
create_executor(executor_description, test_scenario, executor_name, package)
|
create_executor(executor_description, test_scenario, output_path, executor_name, package)
|
||||||
# executor_name = 'MicroChainExecutor790050'
|
# executor_name = 'MicroChainExecutor790050'
|
||||||
executor_path = debug_executor(package, executor_description, test_scenario)
|
executor_path = debug_executor(output_path, package, executor_description, test_scenario)
|
||||||
# print('Executor can be built locally, now we will push it to the cloud.')
|
# print('Executor can be built locally, now we will push it to the cloud.')
|
||||||
# jina_cloud.push_executor(executor_path)
|
# jina_cloud.push_executor(executor_path)
|
||||||
print('Deploy a jina flow')
|
print('Deploy a jina flow')
|
||||||
|
|||||||
@@ -1,6 +1,2 @@
|
|||||||
jina==3.14.1
|
jina==3.14.1
|
||||||
pyrender~=0.1.45
|
click==8.1.3
|
||||||
trimesh~=3.10.0
|
|
||||||
numpy~=1.22.3
|
|
||||||
Pillow~=9.0.1
|
|
||||||
requests~=2.27.1
|
|
||||||
41
src/gpt.py
41
src/gpt.py
@@ -1,6 +1,6 @@
|
|||||||
import os
|
import os
|
||||||
from time import sleep
|
from time import sleep
|
||||||
from typing import Union, List, Tuple
|
from typing import List, Tuple
|
||||||
|
|
||||||
import openai
|
import openai
|
||||||
from openai.error import RateLimitError, Timeout
|
from openai.error import RateLimitError, Timeout
|
||||||
@@ -9,13 +9,39 @@ from src.prompt_system import system_base_definition
|
|||||||
from src.utils.io import timeout_generator_wrapper, GenerationTimeoutError
|
from src.utils.io import timeout_generator_wrapper, GenerationTimeoutError
|
||||||
from src.utils.string_tools import print_colored
|
from src.utils.string_tools import print_colored
|
||||||
|
|
||||||
|
PRICING_GPT4_PROMPT = 0.03
|
||||||
|
PRICING_GPT4_GENERATION = 0.06
|
||||||
|
PRICING_GPT3_5_TURBO_PROMPT = 0.002
|
||||||
|
PRICING_GPT3_5_TURBO_GENERATION = 0.002
|
||||||
|
|
||||||
|
if 'OPENAI_API_KEY' not in os.environ:
|
||||||
|
raise Exception('You need to set OPENAI_API_KEY in your environment')
|
||||||
openai.api_key = os.environ['OPENAI_API_KEY']
|
openai.api_key = os.environ['OPENAI_API_KEY']
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
openai.ChatCompletion.create(
|
||||||
|
model="gpt-4",
|
||||||
|
messages=[{
|
||||||
|
"role": 'system',
|
||||||
|
"content": 'test'
|
||||||
|
}]
|
||||||
|
)
|
||||||
|
supported_model = 'gpt-4'
|
||||||
|
pricing_prompt = PRICING_GPT4_PROMPT
|
||||||
|
pricing_generation = PRICING_GPT4_GENERATION
|
||||||
|
except openai.error.InvalidRequestError:
|
||||||
|
supported_model = 'gpt-3.5-turbo'
|
||||||
|
pricing_prompt = PRICING_GPT3_5_TURBO_PROMPT
|
||||||
|
pricing_generation = PRICING_GPT3_5_TURBO_GENERATION
|
||||||
|
|
||||||
total_chars_prompt = 0
|
total_chars_prompt = 0
|
||||||
total_chars_generation = 0
|
total_chars_generation = 0
|
||||||
|
|
||||||
|
|
||||||
class Conversation:
|
class Conversation:
|
||||||
def __init__(self, prompt_list: List[Tuple[str, str]] = None):
|
def __init__(self, prompt_list: List[Tuple[str, str]] = None, model=supported_model):
|
||||||
|
self.model = model
|
||||||
if prompt_list is None:
|
if prompt_list is None:
|
||||||
prompt_list = [('system', system_base_definition)]
|
prompt_list = [('system', system_base_definition)]
|
||||||
self.prompt_list = prompt_list
|
self.prompt_list = prompt_list
|
||||||
@@ -24,19 +50,18 @@ class Conversation:
|
|||||||
def query(self, prompt: str):
|
def query(self, prompt: str):
|
||||||
print_colored('user', prompt, 'blue')
|
print_colored('user', prompt, 'blue')
|
||||||
self.prompt_list.append(('user', prompt))
|
self.prompt_list.append(('user', prompt))
|
||||||
response = get_response(self.prompt_list)
|
response = self.get_response(self.prompt_list)
|
||||||
self.prompt_list.append(('assistant', response))
|
self.prompt_list.append(('assistant', response))
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
def get_response(self, prompt_list: List[Tuple[str, str]]):
|
||||||
def get_response(prompt_list: List[Tuple[str, str]]):
|
|
||||||
global total_chars_prompt, total_chars_generation
|
global total_chars_prompt, total_chars_generation
|
||||||
for i in range(10):
|
for i in range(10):
|
||||||
try:
|
try:
|
||||||
response_generator = openai.ChatCompletion.create(
|
response_generator = openai.ChatCompletion.create(
|
||||||
temperature=0,
|
temperature=0,
|
||||||
max_tokens=2_000,
|
max_tokens=2_000,
|
||||||
model="gpt-4",
|
model=self.model,
|
||||||
stream=True,
|
stream=True,
|
||||||
messages=[
|
messages=[
|
||||||
{
|
{
|
||||||
@@ -57,8 +82,8 @@ def get_response(prompt_list: List[Tuple[str, str]]):
|
|||||||
complete_string += content
|
complete_string += content
|
||||||
total_chars_generation += len(content)
|
total_chars_generation += len(content)
|
||||||
print('\n')
|
print('\n')
|
||||||
money_prompt = round(total_chars_prompt / 3.4 * 0.03 / 1000, 2)
|
money_prompt = round(total_chars_prompt / 3.4 * pricing_prompt / 1000, 2)
|
||||||
money_generation = round(total_chars_generation / 3.4 * 0.06 / 1000, 2)
|
money_generation = round(total_chars_generation / 3.4 * pricing_generation / 1000, 2)
|
||||||
print('money prompt:', f'${money_prompt}')
|
print('money prompt:', f'${money_prompt}')
|
||||||
print('money generation:', f'${money_generation}')
|
print('money generation:', f'${money_generation}')
|
||||||
print('total money:', f'${money_prompt + money_generation}')
|
print('total money:', f'${money_prompt + money_generation}')
|
||||||
|
|||||||
@@ -7,7 +7,6 @@ from typing import Optional, Dict
|
|||||||
from starlette.middleware.cors import CORSMiddleware
|
from starlette.middleware.cors import CORSMiddleware
|
||||||
from starlette.requests import Request
|
from starlette.requests import Request
|
||||||
from starlette.responses import JSONResponse
|
from starlette.responses import JSONResponse
|
||||||
Flow.plot()
|
|
||||||
from main import main
|
from main import main
|
||||||
|
|
||||||
app = FastAPI()
|
app = FastAPI()
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
# it mocked the executor function to fix the test
|
|
||||||
executor.classify_website = mock_classify_website
|
|
||||||
|
|
||||||
# it attached a fake screen to the test execution
|
|
||||||
RUN xvfb-run -s "-screen 0 640x480x24" python test_executor.py
|
|
||||||
Reference in New Issue
Block a user