updated routes
Browse files
app.py
CHANGED
|
@@ -64,17 +64,13 @@ app.add_middleware(
|
|
| 64 |
)
|
| 65 |
"""
|
| 66 |
llm_router = APIRouter(prefix="/llm")
|
| 67 |
-
@llm_router.get("/")
|
| 68 |
-
def index():
|
| 69 |
-
return fastapi.responses.RedirectResponse(url="/docs")
|
| 70 |
|
| 71 |
-
|
| 72 |
-
@llm_router.get("/health")
|
| 73 |
def health():
|
| 74 |
return {"status": "ok"}
|
| 75 |
|
| 76 |
# Chat Completion API
|
| 77 |
-
@llm_router.post("/chat/")
|
| 78 |
async def chat(chatm:ChatModel):
|
| 79 |
try:
|
| 80 |
st = time()
|
|
@@ -97,7 +93,7 @@ async def chat(chatm:ChatModel):
|
|
| 97 |
)
|
| 98 |
|
| 99 |
# Chat Completion API
|
| 100 |
-
@llm_router.post("/generate")
|
| 101 |
async def generate(gen:GenModel):
|
| 102 |
gen.system = "You are an helpful medical AI assistant."
|
| 103 |
gen.temperature = 0.5
|
|
|
|
| 64 |
)
|
| 65 |
"""
|
| 66 |
llm_router = APIRouter(prefix="/llm")
|
|
|
|
|
|
|
|
|
|
| 67 |
|
| 68 |
+
@llm_router.get("/health", tags=["llm"])
|
|
|
|
| 69 |
def health():
|
| 70 |
return {"status": "ok"}
|
| 71 |
|
| 72 |
# Chat Completion API
|
| 73 |
+
@llm_router.post("/chat/", tags=["llm"])
|
| 74 |
async def chat(chatm:ChatModel):
|
| 75 |
try:
|
| 76 |
st = time()
|
|
|
|
| 93 |
)
|
| 94 |
|
| 95 |
# Chat Completion API
|
| 96 |
+
@llm_router.post("/generate", tags=["llm"])
|
| 97 |
async def generate(gen:GenModel):
|
| 98 |
gen.system = "You are an helpful medical AI assistant."
|
| 99 |
gen.temperature = 0.5
|
main.py
CHANGED
|
@@ -53,11 +53,6 @@ def read_items(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
|
|
| 53 |
return items
|
| 54 |
|
| 55 |
|
| 56 |
-
@user_router.get("/")
|
| 57 |
-
async def root():
|
| 58 |
-
return {"message": "Hello World"}
|
| 59 |
-
|
| 60 |
-
|
| 61 |
app = FastAPI(
|
| 62 |
docs_url="/",
|
| 63 |
title="OpenGenAI",
|
|
|
|
| 53 |
return items
|
| 54 |
|
| 55 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 56 |
app = FastAPI(
|
| 57 |
docs_url="/",
|
| 58 |
title="OpenGenAI",
|