Spaces:
Sleeping
Sleeping
Update private_gpt/launcher.py
Browse files- private_gpt/launcher.py +2 -1
private_gpt/launcher.py
CHANGED
@@ -91,7 +91,7 @@ def create_app(root_injector: Injector) -> FastAPI:
|
|
91 |
|
92 |
@model_router.post("/switch_model")
|
93 |
async def switch_model(
|
94 |
-
new_model: str, current_user: dict = Depends(get_current_user)
|
95 |
):
|
96 |
# Check if the user has either "admin" or "user" role
|
97 |
if "user" not in current_user.get("role", []):
|
@@ -111,6 +111,7 @@ def create_app(root_injector: Injector) -> FastAPI:
|
|
111 |
# Switch the model using the LLMComponent
|
112 |
llm_component = root_injector.get(LLMComponent)
|
113 |
llm_component.switch_model(new_model, settings=settings)
|
|
|
114 |
|
115 |
# Return a success message
|
116 |
return {"message": f"Model switched to {new_model}"}
|
|
|
91 |
|
92 |
@model_router.post("/switch_model")
|
93 |
async def switch_model(
|
94 |
+
new_model: str, current_user: dict = Depends(get_current_user), app: FastAPI = Depends(get_app)
|
95 |
):
|
96 |
# Check if the user has either "admin" or "user" role
|
97 |
if "user" not in current_user.get("role", []):
|
|
|
111 |
# Switch the model using the LLMComponent
|
112 |
llm_component = root_injector.get(LLMComponent)
|
113 |
llm_component.switch_model(new_model, settings=settings)
|
114 |
+
await app.reload()
|
115 |
|
116 |
# Return a success message
|
117 |
return {"message": f"Model switched to {new_model}"}
|