Spaces:
Sleeping
Sleeping
File size: 2,095 Bytes
2a46da5 65c9437 1cae688 65c9437 2a46da5 77902b8 6a6e71d 77902b8 69a4674 1827bea 2a46da5 7686cad 71ba2e2 65c9437 6a6e71d cefb504 1827bea 1cae688 cefb504 65c9437 6a6e71d 65c9437 77902b8 65c9437 77902b8 65c9437 77902b8 cefb504 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
# Base image with a lightweight version suitable for Hugging Face Spaces
FROM python:3.8-slim
# Install system dependencies
RUN apt-get update && apt-get install -y \
git \
wget \
&& rm -rf /var/lib/apt/lists/*
# Install Python packages including Hugging Face Transformers, TorchScript, Flask, TensorFlow, and TensorFlow Hub
RUN pip install --no-cache-dir \
torch \
torchvision \
transformers \
requests \
Flask \
Pillow \
huggingface_hub \
tensorflow \
tensorflow_hub
# Set Hugging Face cache to a guaranteed writable directory
ENV TRANSFORMERS_CACHE=/tmp/cache
RUN mkdir -p /tmp/cache
# Create directories for the models
RUN mkdir -p /models/movenet /models/blip /models/clip
# Python script to download models using tensorflow_hub and huggingface_hub
RUN echo "import os\n\
import tensorflow_hub as hub\n\
\n\
# Download MoveNet model from TensorFlow Hub\n\
movenet_model = hub.load('https://tfhub.dev/google/movenet/singlepose/lightning/4')\n\
movenet_model_path = '/models/movenet/movenet_lightning'\n\
os.makedirs(movenet_model_path, exist_ok=True)\n\
movenet_model.save(movenet_model_path)\n\
\n\
# Download BLIP model and tokenizer using huggingface_hub\n\
from transformers import BlipForConditionalGeneration, BlipProcessor\n\
BlipForConditionalGeneration.from_pretrained('Salesforce/blip-image-captioning-large').save_pretrained('/models/blip')\n\
BlipProcessor.from_pretrained('Salesforce/blip-image-captioning-large').save_pretrained('/models/blip')\n\
\n\
# Download CLIP model and processor using huggingface_hub\n\
from transformers import CLIPModel, CLIPProcessor\n\
CLIPModel.from_pretrained('openai/clip-vit-large-patch14').save_pretrained('/models/clip')\n\
CLIPProcessor.from_pretrained('openai/clip-vit-large-patch14').save_pretrained('/models/clip')" > download_models.py
# Run the script to download models
RUN python download_models.py
# Copy the inference script (app.py) into the container
COPY app.py /app/app.py
# Expose the default port for Flask
EXPOSE 7860
# Run the Flask app
CMD ["python", "/app/app.py"] |