ReactLover commited on
Commit
f334e16
·
verified ·
1 Parent(s): 0b3d9fc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -4,18 +4,18 @@ from transformers import pipeline
4
  from PIL import Image
5
  import io, os, traceback
6
 
7
- # Cache settings
8
  os.environ["HF_HOME"] = "/app/cache"
9
  os.environ["TRANSFORMERS_CACHE"] = "/app/cache"
10
  os.environ["HF_HUB_CACHE"] = "/app/cache/hub"
11
 
12
  app = FastAPI()
13
 
14
- # Load SmolVLM as pure image-to-text
15
  pipe = pipeline(
16
  "image-to-text",
17
- model="HuggingFaceTB/SmolVLM-256M-Instruct",
18
- device=-1 # CPU
19
  )
20
 
21
  @app.get("/")
@@ -45,11 +45,11 @@ async def predict_gender(file: UploadFile = File(...)):
45
  image_bytes = await file.read()
46
  image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
47
 
48
- # Run the model (image only)
49
  result = pipe(image, max_new_tokens=32)
50
  caption = result[0]["generated_text"].strip()
51
 
52
- # Very simple heuristic: check for male/female in caption
53
  gender = "unknown"
54
  lower_caption = caption.lower()
55
  if "male" in lower_caption or "man" in lower_caption:
 
4
  from PIL import Image
5
  import io, os, traceback
6
 
7
+ # Ensure Hugging Face cache directories are writable
8
  os.environ["HF_HOME"] = "/app/cache"
9
  os.environ["TRANSFORMERS_CACHE"] = "/app/cache"
10
  os.environ["HF_HUB_CACHE"] = "/app/cache/hub"
11
 
12
  app = FastAPI()
13
 
14
+ # Load BLIP (stable image captioning model)
15
  pipe = pipeline(
16
  "image-to-text",
17
+ model="Salesforce/blip-image-captioning-base",
18
+ device=-1 # CPU (works on free tier)
19
  )
20
 
21
  @app.get("/")
 
45
  image_bytes = await file.read()
46
  image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
47
 
48
+ # Run BLIP captioning
49
  result = pipe(image, max_new_tokens=32)
50
  caption = result[0]["generated_text"].strip()
51
 
52
+ # Simple heuristic for gender detection
53
  gender = "unknown"
54
  lower_caption = caption.lower()
55
  if "male" in lower_caption or "man" in lower_caption: