Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Commit
·
54c9e50
1
Parent(s):
5f629ed
fix: set app_port to 7860 to match Docker CMD
Browse files- .huggingface/space.yaml +1 -1
- Dockerfile +3 -3
- main.py +6 -2
.huggingface/space.yaml
CHANGED
@@ -1,2 +1,2 @@
|
|
1 |
sdk: docker
|
2 |
-
app_port:
|
|
|
1 |
sdk: docker
|
2 |
+
app_port: 7860
|
Dockerfile
CHANGED
@@ -22,13 +22,13 @@ RUN mkdir -p /models/clip && \
|
|
22 |
|
23 |
RUN python3 -c "from transformers import AutoTokenizer; AutoTokenizer.from_pretrained('bert-base-uncased').save_pretrained('/models/bert-tokenizer')"
|
24 |
RUN python3 -c "from transformers import CLIPProcessor; CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32').save_pretrained('/models/clip')"
|
25 |
-
EXPOSE
|
26 |
|
27 |
# Install curl if it's not already installed
|
28 |
RUN apt-get update && apt-get install -y curl
|
29 |
|
30 |
# Add the health check
|
31 |
-
HEALTHCHECK CMD curl --fail http://localhost:
|
32 |
|
33 |
#CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
|
34 |
-
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "
|
|
|
22 |
|
23 |
RUN python3 -c "from transformers import AutoTokenizer; AutoTokenizer.from_pretrained('bert-base-uncased').save_pretrained('/models/bert-tokenizer')"
|
24 |
RUN python3 -c "from transformers import CLIPProcessor; CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32').save_pretrained('/models/clip')"
|
25 |
+
EXPOSE 7860
|
26 |
|
27 |
# Install curl if it's not already installed
|
28 |
RUN apt-get update && apt-get install -y curl
|
29 |
|
30 |
# Add the health check
|
31 |
+
HEALTHCHECK CMD curl --fail http://localhost:7860/ || exit 1
|
32 |
|
33 |
#CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
|
34 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860", "--log-level", "debug"]
|
main.py
CHANGED
@@ -17,6 +17,10 @@ def startup_event():
|
|
17 |
def root():
|
18 |
return "<h3>✅ Hugging Face Space is alive</h3>"
|
19 |
|
|
|
|
|
|
|
|
|
20 |
# Example endpoint to trigger model
|
21 |
@app.get("/caption")
|
22 |
def caption():
|
@@ -24,5 +28,5 @@ def caption():
|
|
24 |
return {"error": "Model not loaded"}
|
25 |
return {"result": "dummy caption"} # Replace with real logic
|
26 |
|
27 |
-
if __name__ == "__main__":
|
28 |
-
|
|
|
17 |
def root():
|
18 |
return "<h3>✅ Hugging Face Space is alive</h3>"
|
19 |
|
20 |
+
@app.get("/health")
|
21 |
+
def health_check():
|
22 |
+
return {"status": "ok"}
|
23 |
+
|
24 |
# Example endpoint to trigger model
|
25 |
@app.get("/caption")
|
26 |
def caption():
|
|
|
28 |
return {"error": "Model not loaded"}
|
29 |
return {"result": "dummy caption"} # Replace with real logic
|
30 |
|
31 |
+
# if __name__ == "__main__":
|
32 |
+
# uvicorn.run(app, host="0.0.0.0", port=8000)
|