Spaces:
Sleeping
Sleeping
Update private_gpt/launcher.py
Browse files- private_gpt/launcher.py +25 -0
private_gpt/launcher.py
CHANGED
|
@@ -7,6 +7,7 @@ from fastapi.middleware.cors import CORSMiddleware
|
|
| 7 |
from fastapi.openapi.utils import get_openapi
|
| 8 |
from injector import Injector
|
| 9 |
from fastapi import APIRouter
|
|
|
|
| 10 |
|
| 11 |
from private_gpt.paths import docs_path
|
| 12 |
from private_gpt.server.chat.chat_router import chat_router
|
|
@@ -103,6 +104,29 @@ def create_app(root_injector: Injector) -> FastAPI:
|
|
| 103 |
|
| 104 |
return {"message": f"Model switched to {new_model}"}
|
| 105 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
# @model_router.post("/switch_model")
|
| 107 |
# async def switch_model(new_model: str):
|
| 108 |
# # Implement logic to switch the LLM model based on the user's request
|
|
@@ -170,6 +194,7 @@ def create_app(root_injector: Injector) -> FastAPI:
|
|
| 170 |
app.include_router(embeddings_router)
|
| 171 |
app.include_router(health_router)
|
| 172 |
app.include_router(model_router)
|
|
|
|
| 173 |
|
| 174 |
settings = root_injector.get(Settings)
|
| 175 |
if settings.server.cors.enabled:
|
|
|
|
| 7 |
from fastapi.openapi.utils import get_openapi
|
| 8 |
from injector import Injector
|
| 9 |
from fastapi import APIRouter
|
| 10 |
+
from fastapi import JSONResponse
|
| 11 |
|
| 12 |
from private_gpt.paths import docs_path
|
| 13 |
from private_gpt.server.chat.chat_router import chat_router
|
|
|
|
| 104 |
|
| 105 |
return {"message": f"Model switched to {new_model}"}
|
| 106 |
|
| 107 |
+
# Define a new APIRouter for the model_list
|
| 108 |
+
model_list_router = APIRouter(prefix="/v1/models_list", dependencies=[Depends(get_current_user)])
|
| 109 |
+
|
| 110 |
+
@model_list_router.get("/", response_model=list[dict])
|
| 111 |
+
async def model_list(current_user: dict = Depends(get_current_user)):
|
| 112 |
+
"""
|
| 113 |
+
Get a list of models with their details.
|
| 114 |
+
"""
|
| 115 |
+
# In this example, hardcoding some sample model data
|
| 116 |
+
models_data = [
|
| 117 |
+
{"id": 1, "name": "gpt-3.5-turbo", "access": ["user", "admin"]},
|
| 118 |
+
{"id": 2, "name": "gpt-4", "access": ["admin"]},
|
| 119 |
+
# Add more models as needed
|
| 120 |
+
]
|
| 121 |
+
|
| 122 |
+
# If the current user is an admin, return the full list
|
| 123 |
+
if "admin" in current_user.get("role", []):
|
| 124 |
+
return models_data
|
| 125 |
+
|
| 126 |
+
# If the current user is a regular user, filter models based on their access
|
| 127 |
+
accessible_models = [model for model in models_data if "user" in model["access"]]
|
| 128 |
+
return accessible_models
|
| 129 |
+
|
| 130 |
# @model_router.post("/switch_model")
|
| 131 |
# async def switch_model(new_model: str):
|
| 132 |
# # Implement logic to switch the LLM model based on the user's request
|
|
|
|
| 194 |
app.include_router(embeddings_router)
|
| 195 |
app.include_router(health_router)
|
| 196 |
app.include_router(model_router)
|
| 197 |
+
app.include_router(model_list_router)
|
| 198 |
|
| 199 |
settings = root_injector.get(Settings)
|
| 200 |
if settings.server.cors.enabled:
|