Update app/llm.py
Browse files- app/llm.py +2 -2
app/llm.py
CHANGED
|
@@ -72,7 +72,7 @@ def health():
|
|
| 72 |
|
| 73 |
# Chat Completion API
|
| 74 |
@llm_router.post("/chat/", tags=["llm"])
|
| 75 |
-
async def chat(chatm:ChatModel, user: User = Depends(current_active_user)):
|
| 76 |
try:
|
| 77 |
st = time()
|
| 78 |
output = llm_chat.create_chat_completion(
|
|
@@ -95,7 +95,7 @@ async def chat(chatm:ChatModel, user: User = Depends(current_active_user)):
|
|
| 95 |
|
| 96 |
# Chat Completion API
|
| 97 |
@llm_router.post("/generate", tags=["llm"])
|
| 98 |
-
async def generate(gen:GenModel, user: User = Depends(current_active_user)):
|
| 99 |
gen.system = "You are an helpful medical AI assistant."
|
| 100 |
gen.temperature = 0.5
|
| 101 |
gen.seed = 42
|
|
|
|
| 72 |
|
| 73 |
# Chat Completion API
|
| 74 |
@llm_router.post("/chat/", tags=["llm"])
|
| 75 |
+
async def chat(chatm:ChatModel, user: User = fastapi.Depends(current_active_user)):
|
| 76 |
try:
|
| 77 |
st = time()
|
| 78 |
output = llm_chat.create_chat_completion(
|
|
|
|
| 95 |
|
| 96 |
# Chat Completion API
|
| 97 |
@llm_router.post("/generate", tags=["llm"])
|
| 98 |
+
async def generate(gen:GenModel, user: User = fastapi.Depends(current_active_user)):
|
| 99 |
gen.system = "You are an helpful medical AI assistant."
|
| 100 |
gen.temperature = 0.5
|
| 101 |
gen.seed = 42
|