marcellopoliti's picture
update system template
d3dec2d
from fastapi import FastAPI
from pydantic import BaseModel
from src.models.openai_llm import run_query
from src.models.openai_stt import speech_to_text
from src.models.openai_tts import text_to_speech
import yaml
from openai import OpenAI
import uvicorn
from fastapi import FastAPI, File, UploadFile
import io
import aiofiles
app = FastAPI()
# read LLM config file
with open("conf/train_llm.yaml", "r") as file_in:
cfg = yaml.safe_load(file_in)
# read system message
# TODO: download from wandb
with open("data/system_template.txt", "r") as file_in:
system_message = file_in.read()
# read STT config file
with open("conf/speech_to_text.yaml", "r") as file_in:
cfg_stt = yaml.safe_load(file_in)
# init client
openai_client = OpenAI()
@app.get("/")
def root():
return "welcome"
@app.post("/llm_query/")
def llm_query(llm_query: str):
res = run_query(
query=llm_query,
openai_params=cfg["openai_parameters"],
system_message=system_message,
client=openai_client,
)
return res
@app.post("/stt_query/")
def stt_query(audio_file: UploadFile):
contents = audio_file.file.read()
buffer = io.BytesIO(contents)
buffer.name = "file.mp3"
return speech_to_text(
audio=buffer,
openai_client=openai_client,
configuration=cfg_stt["openai_parameters"],
)
@app.post("/tts_query/")
def tts_query(input_text: str):
output_path = text_to_speech(client=openai_client, input=input_text)
return output_path