Spaces:
Sleeping
Sleeping
# Base image with a lightweight version suitable for Hugging Face Spaces | |
FROM python:3.8-slim | |
# Install system dependencies | |
RUN apt-get update && apt-get install -y \ | |
git \ | |
wget \ | |
&& rm -rf /var/lib/apt/lists/* | |
# Install system-level dependencies for OpenCV | |
RUN apt-get update && apt-get install -y \ | |
libglib2.0-0 \ | |
libsm6 \ | |
libxext6 \ | |
libxrender-dev \ | |
libgl1-mesa-glx \ | |
&& rm -rf /var/lib/apt/lists/* | |
# Install Python packages including Hugging Face Transformers, TorchScript, Flask, TensorFlow, TensorFlow Hub, and OpenCV | |
RUN pip install --no-cache-dir \ | |
torch \ | |
torchvision \ | |
transformers \ | |
requests \ | |
Flask \ | |
Pillow \ | |
huggingface_hub \ | |
tensorflow \ | |
tensorflow_hub \ | |
opencv-python | |
# Set Hugging Face cache to a guaranteed writable directory | |
ENV TRANSFORMERS_CACHE=/tmp/cache | |
RUN mkdir -p /tmp/cache && chmod -R 777 /tmp/cache | |
# Create directories for the models | |
RUN mkdir -p /models/blip /models/clip | |
# Python script to download models using tensorflow_hub and huggingface_hub | |
RUN echo "import os\n\ | |
import tensorflow_hub as hub\n\n\ | |
# Download MoveNet model from TensorFlow Hub (loaded directly in app, not saved)\n\ | |
movenet_model = hub.load('https://tfhub.dev/google/movenet/singlepose/lightning/4')\n\n\ | |
# Download BLIP model and tokenizer using huggingface_hub\n\ | |
from transformers import BlipForConditionalGeneration, BlipProcessor\n\ | |
BlipForConditionalGeneration.from_pretrained('Salesforce/blip-image-captioning-large').save_pretrained('/models/blip')\n\ | |
BlipProcessor.from_pretrained('Salesforce/blip-image-captioning-large').save_pretrained('/models/blip')\n\n\ | |
# Download CLIP model and processor using huggingface_hub\n\ | |
from transformers import CLIPModel, CLIPProcessor\n\ | |
CLIPModel.from_pretrained('openai/clip-vit-large-patch14').save_pretrained('/models/clip')\n\ | |
CLIPProcessor.from_pretrained('openai/clip-vit-large-patch14').save_pretrained('/models/clip')" > download_models.py | |
# Run the script to download models | |
RUN python download_models.py | |
# Copy the inference script (app.py) into the container | |
COPY app.py /app/app.py | |
# Expose the default port for Flask | |
EXPOSE 7860 | |
# Run the Flask app | |
CMD ["python", "/app/app.py"] |