Gemma3n / app.py
ReactLover's picture
Update app.py
7c4ca02 verified
raw
history blame
1.79 kB
from fastapi import FastAPI, File, UploadFile
from fastapi.responses import JSONResponse, HTMLResponse
from transformers import pipeline
from PIL import Image
import io, os, traceback
# Make sure Hugging Face cache is writable
os.environ["HF_HOME"] = "/app/cache"
os.environ["TRANSFORMERS_CACHE"] = "/app/cache"
os.environ["HF_HUB_CACHE"] = "/app/cache/hub"
app = FastAPI()
# Load SmolVLM with the pipeline API
pipe = pipeline(
"image-to-text",
model="HuggingFaceTB/SmolVLM-256M-Instruct",
device=-1 # CPU for free tier
)
@app.get("/")
def home():
return {
"message": "API is running. Use POST /predict with an image, or visit /upload to test in browser."
}
@app.get("/upload", response_class=HTMLResponse)
def upload_form():
return """
<html>
<body>
<h2>Upload an ID Image</h2>
<form action="/predict" enctype="multipart/form-data" method="post">
<input name="file" type="file">
<input type="submit" value="Upload">
</form>
</body>
</html>
"""
@app.post("/predict")
async def predict_gender(file: UploadFile = File(...)):
try:
# Read uploaded image
image_bytes = await file.read()
image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
# Instruction for the model
prompt = "Is the person on this ID male or female?"
# Run model (pipeline handles image + prompt via generate_kwargs)
result = pipe(image, generate_kwargs={"max_new_tokens": 32, "prompt": prompt})
# Extract model output
answer = result[0]["generated_text"].strip()
return JSONResponse({"gender": answer})
except Exception as e:
traceback.print_exc()
return JSONResponse({"error": str(e)}, status_code=500)