Added llm functions

This commit is contained in:
SwiftyOS
2023-09-20 09:57:02 +02:00
parent 8897e47691
commit edcd103958
4 changed files with 87 additions and 29 deletions

View File

@@ -4,6 +4,7 @@ from dotenv import load_dotenv
load_dotenv()
import forge.sdk.forge_log
forge.sdk.forge_log.setup_logger()
@@ -41,6 +42,7 @@ if __name__ == "__main__":
import forge.agent
import forge.sdk.db
from forge.sdk.workspace import LocalWorkspace
print(logo)
database_name = os.getenv("DATABASE_STRING")
workspace = LocalWorkspace(os.getenv("AGENT_WORKSPACE"))

View File

@@ -1,13 +1,12 @@
import asyncio
import os
from uuid import uuid4
import pathlib
from uuid import uuid4
from fastapi import APIRouter, FastAPI, UploadFile
from fastapi.responses import RedirectResponse
from fastapi.staticfiles import StaticFiles
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import FileResponse
from fastapi.responses import FileResponse, RedirectResponse
from fastapi.staticfiles import StaticFiles
from hypercorn.asyncio import serve
from hypercorn.config import Config
@@ -58,18 +57,22 @@ class Agent:
app.include_router(router, prefix="/ap/v1")
script_dir = os.path.dirname(os.path.realpath(__file__))
frontend_path = pathlib.Path(os.path.join(script_dir, "../../../../frontend/build/web")).resolve()
frontend_path = pathlib.Path(
os.path.join(script_dir, "../../../../frontend/build/web")
).resolve()
if os.path.exists(frontend_path):
app.mount("/app", StaticFiles(directory=frontend_path), name="app")
@app.get("/", include_in_schema=False)
async def root():
return RedirectResponse(url='/app/index.html', status_code=307)
else:
LOG.warning(f"Frontend not found. {frontend_path} does not exist. The frontend will not be served")
app.add_middleware(AgentMiddleware, agent=self)
app.mount("/app", StaticFiles(directory=frontend_path), name="app")
@app.get("/", include_in_schema=False)
async def root():
return RedirectResponse(url="/app/index.html", status_code=307)
else:
LOG.warning(
f"Frontend not found. {frontend_path} does not exist. The frontend will not be served"
)
app.add_middleware(AgentMiddleware, agent=self)
config.loglevel = "ERROR"
config.bind = [f"0.0.0.0:{port}"]

View File

@@ -0,0 +1,69 @@
import typing
import openai
from tenacity import retry, stop_after_attempt, wait_random_exponential
from .forge_log import ForgeLogger
LOG = ForgeLogger(__name__)
@retry(wait=wait_random_exponential(min=1, max=40), stop=stop_after_attempt(3))
def chat_completion_request(
messages, functions=None, function_call=None, model=str, custom_labels=None
) -> typing.Union[typing.Dict[str, typing.Any], Exception]:
"""Generate a response to a list of messages using OpenAI's API"""
try:
kwargs = {
"model": model,
"messages": messages,
}
if functions:
kwargs["functions"] = functions
if function_call:
kwargs["function_call"] = function_call
if custom_labels:
kwargs["headers"] = {}
for label in custom_labels.keys():
# This is an example showing adding in the labels as helicone properties
kwargs["headers"][f"Helicone-Property-{label}"] = custom_labels[label]
resp = openai.ChatCompletion.create(**kwargs)
return resp
except Exception as e:
LOG.error("Unable to generate ChatCompletion response")
LOG.error(f"Exception: {e}")
raise
@retry(wait=wait_random_exponential(min=1, max=40), stop=stop_after_attempt(3))
async def create_embedding_request(
messages, model="text-embedding-ada-002"
) -> typing.Union[typing.Dict[str, typing.Any], Exception]:
"""Generate an embedding for a list of messages using OpenAI's API"""
try:
return await openai.Embedding.acreate(
input=[f"{m['role']}: {m['content']}" for m in messages],
engine=model,
)
except Exception as e:
LOG.error("Unable to generate ChatCompletion response")
LOG.error(f"Exception: {e}")
raise
@retry(wait=wait_random_exponential(min=1, max=40), stop=stop_after_attempt(3))
async def transcribe_audio(
audio_file: str,
) -> typing.Union[typing.Dict[str, typing.Any], Exception]:
"""Transcribe an audio file using OpenAI's API"""
try:
return await openai.Audio.transcribe(model="whisper-1", file=audio_file)
except Exception as e:
LOG.error("Unable to generate ChatCompletion response")
LOG.error(f"Exception: {e}")
raise

View File

@@ -37,22 +37,6 @@ base_router = APIRouter()
LOG = ForgeLogger(__name__)
@base_router.get("/", tags=["root"])
async def root():
"""
Root endpoint that returns a welcome message.
"""
return Response(content="Welcome to the Auto-GPT Forge")
@base_router.get("/heartbeat", tags=["server"])
async def check_server_status():
"""
Check if the server is running.
"""
return Response(content="Server is running.", status_code=200)
@base_router.get("/", tags=["root"])
async def root():
"""