Spaces:
Sleeping
Sleeping
File size: 1,568 Bytes
77902b8 65c9437 1cae688 65c9437 71ba2e2 77902b8 6a6e71d 77902b8 69a4674 7686cad 71ba2e2 65c9437 6a6e71d 7686cad 65c9437 6a6e71d 1cae688 6a6e71d 65c9437 6a6e71d 65c9437 77902b8 65c9437 77902b8 65c9437 77902b8 65c9437 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
# Base image with PyTorch and CUDA for GPU support
FROM pytorch/pytorch:1.10.0-cuda11.3-cudnn8-runtime
# Install system dependencies
RUN apt-get update && apt-get install -y \
git \
wget \
&& rm -rf /var/lib/apt/lists/*
# Install Python packages including Hugging Face Transformers, TorchScript, and Flask
RUN pip install --no-cache-dir \
torch \
torchvision \
transformers \
requests \
Flask \
Pillow
# Set Hugging Face cache to a guaranteed writable directory
ENV TRANSFORMERS_CACHE=/tmp/cache
RUN mkdir -p /tmp/cache
# Create directories for the models
RUN mkdir -p /models/sapiens_pose /models/motionbert
# Python script to download models
RUN echo "import requests\n\
url = 'https://huggingface.co/facebook/sapiens-pose-1b-torchscript/resolve/main/model.pt'\n\
response = requests.get(url)\n\
if response.status_code == 200:\n\
with open('/models/sapiens_pose/model.pt', 'wb') as f:\n\
f.write(response.content)\n\
else:\n\
raise Exception(f'Failed to download model: {response.status_code}')\n\
\n\
from transformers import AutoModel, AutoTokenizer\n\
AutoModel.from_pretrained('walterzhu/MotionBERT').save_pretrained('/models/motionbert')\n\
AutoTokenizer.from_pretrained('walterzhu/MotionBERT').save_pretrained('/models/motionbert')" > download_models.py
# Run the script to download models
RUN python download_models.py
# Copy the inference script (app.py) into the container
COPY app.py /app/app.py
# Expose the default port for Flask
EXPOSE 7860
# Run the Flask app
CMD ["python", "/app/app.py"]
|