Update app/llm.py
Browse files- app/llm.py +12 -1
app/llm.py
CHANGED
@@ -11,6 +11,13 @@ from pydantic import BaseModel
|
|
11 |
from fastapi import APIRouter
|
12 |
from app.users import current_active_user
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
class GenModel(BaseModel):
|
15 |
question: str
|
16 |
system: str = "You are a helpful medical AI chat assistant. Help as much as you can.Also continuously ask for possible symptoms in order to atat a conclusive ailment or sickness and possible solutions.Remember, response in English."
|
@@ -37,6 +44,7 @@ llm_chat = llama_cpp.Llama.from_pretrained(
|
|
37 |
n_gpu_layers=0,
|
38 |
#chat_format="llama-2"
|
39 |
)
|
|
|
40 |
llm_generate = llama_cpp.Llama.from_pretrained(
|
41 |
repo_id="Qwen/Qwen1.5-0.5B-Chat-GGUF",
|
42 |
filename="*q4_0.gguf",
|
@@ -74,6 +82,8 @@ def health():
|
|
74 |
# Chat Completion API
|
75 |
@llm_router.post("/chat/", tags=["llm"])
|
76 |
async def chat(chatm:ChatModel):#, user: schemas.BaseUser = fastapi.Depends(current_active_user)):
|
|
|
|
|
77 |
#chatm.system = chatm.system.format("")#user.email)
|
78 |
try:
|
79 |
st = time()
|
@@ -95,7 +105,8 @@ async def chat(chatm:ChatModel):#, user: schemas.BaseUser = fastapi.Depends(curr
|
|
95 |
return JSONResponse(
|
96 |
status_code=500, content={"message": "Internal Server Error"}
|
97 |
)
|
98 |
-
|
|
|
99 |
# Chat Completion API
|
100 |
@llm_router.post("/generate", tags=["llm"])
|
101 |
async def generate(gen:GenModel):#, user: schemas.BaseUser = fastapi.Depends(current_active_user)):
|
|
|
11 |
from fastapi import APIRouter
|
12 |
from app.users import current_active_user
|
13 |
|
14 |
+
|
15 |
+
from transformers import AutoModelForCausalLM
|
16 |
+
|
17 |
+
model = AutoModelForCausalLM.from_pretrained("bigcode/starcoder")
|
18 |
+
model.to_bettertransformer()
|
19 |
+
|
20 |
+
|
21 |
class GenModel(BaseModel):
|
22 |
question: str
|
23 |
system: str = "You are a helpful medical AI chat assistant. Help as much as you can.Also continuously ask for possible symptoms in order to atat a conclusive ailment or sickness and possible solutions.Remember, response in English."
|
|
|
44 |
n_gpu_layers=0,
|
45 |
#chat_format="llama-2"
|
46 |
)
|
47 |
+
|
48 |
llm_generate = llama_cpp.Llama.from_pretrained(
|
49 |
repo_id="Qwen/Qwen1.5-0.5B-Chat-GGUF",
|
50 |
filename="*q4_0.gguf",
|
|
|
82 |
# Chat Completion API
|
83 |
@llm_router.post("/chat/", tags=["llm"])
|
84 |
async def chat(chatm:ChatModel):#, user: schemas.BaseUser = fastapi.Depends(current_active_user)):
|
85 |
+
|
86 |
+
"""
|
87 |
#chatm.system = chatm.system.format("")#user.email)
|
88 |
try:
|
89 |
st = time()
|
|
|
105 |
return JSONResponse(
|
106 |
status_code=500, content={"message": "Internal Server Error"}
|
107 |
)
|
108 |
+
"""
|
109 |
+
|
110 |
# Chat Completion API
|
111 |
@llm_router.post("/generate", tags=["llm"])
|
112 |
async def generate(gen:GenModel):#, user: schemas.BaseUser = fastapi.Depends(current_active_user)):
|