Spaces:
Sleeping
Sleeping
Update app.py
Browse filesremoves model_ name conflict
app.py
CHANGED
@@ -97,9 +97,8 @@ class PredictionRequest(BaseModel):
|
|
97 |
|
98 |
class PredictionResponse(BaseModel):
|
99 |
generated_text: str
|
100 |
-
|
101 |
|
102 |
-
# Pythhon version check
|
103 |
@app.get("/version")
|
104 |
async def version():
|
105 |
return {
|
@@ -107,26 +106,6 @@ async def version():
|
|
107 |
"models_available": list(MODELS.keys())
|
108 |
}
|
109 |
|
110 |
-
# Environment check
|
111 |
-
@app.get("/debug/environment")
|
112 |
-
async def get_environment_info():
|
113 |
-
import torch
|
114 |
-
import transformers
|
115 |
-
import platform
|
116 |
-
|
117 |
-
return {
|
118 |
-
"python_version": platform.python_version(),
|
119 |
-
"torch_version": torch.__version__,
|
120 |
-
"torch_device": str(torch.device("cuda" if torch.cuda.is_available() else "cpu")),
|
121 |
-
"transformers_version": transformers.__version__,
|
122 |
-
"available_devices": {
|
123 |
-
"cuda_available": torch.cuda.is_available(),
|
124 |
-
"cuda_device_count": torch.cuda.device_count() if torch.cuda.is_available() else 0,
|
125 |
-
"mps_available": hasattr(torch.backends, "mps") and torch.backends.mps.is_available()
|
126 |
-
}
|
127 |
-
}
|
128 |
-
|
129 |
-
# Health check
|
130 |
@app.get("/health")
|
131 |
async def health():
|
132 |
# More comprehensive health check
|
@@ -144,7 +123,6 @@ async def health():
|
|
144 |
"error": str(e)
|
145 |
}
|
146 |
|
147 |
-
# Main /predict endpoint
|
148 |
@app.post("/predict", response_model=PredictionResponse)
|
149 |
async def predict(request: PredictionRequest):
|
150 |
try:
|
@@ -201,7 +179,7 @@ async def predict(request: PredictionRequest):
|
|
201 |
|
202 |
return PredictionResponse(
|
203 |
generated_text=result,
|
204 |
-
|
205 |
)
|
206 |
|
207 |
except Exception as e:
|
|
|
97 |
|
98 |
class PredictionResponse(BaseModel):
|
99 |
generated_text: str
|
100 |
+
selected_model: str # Changed from model_used to avoid namespace conflict
|
101 |
|
|
|
102 |
@app.get("/version")
|
103 |
async def version():
|
104 |
return {
|
|
|
106 |
"models_available": list(MODELS.keys())
|
107 |
}
|
108 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
@app.get("/health")
|
110 |
async def health():
|
111 |
# More comprehensive health check
|
|
|
123 |
"error": str(e)
|
124 |
}
|
125 |
|
|
|
126 |
@app.post("/predict", response_model=PredictionResponse)
|
127 |
async def predict(request: PredictionRequest):
|
128 |
try:
|
|
|
179 |
|
180 |
return PredictionResponse(
|
181 |
generated_text=result,
|
182 |
+
selected_model=request.model
|
183 |
)
|
184 |
|
185 |
except Exception as e:
|