Spaces:
Sleeping
Sleeping
# Base image with PyTorch and CUDA for GPU support | |
FROM pytorch/pytorch:1.10.0-cuda11.3-cudnn8-runtime | |
# Install system dependencies | |
RUN apt-get update && apt-get install -y \ | |
git \ | |
wget \ | |
&& rm -rf /var/lib/apt/lists/* | |
# Install Python packages including Hugging Face Transformers, TorchScript, and Flask | |
RUN pip install --no-cache-dir \ | |
torch \ | |
torchvision \ | |
transformers \ | |
requests \ | |
Flask \ | |
Pillow | |
# Set Hugging Face cache to a guaranteed writable directory | |
ENV TRANSFORMERS_CACHE=/tmp/cache | |
RUN mkdir -p /tmp/cache | |
# Create directories for the models | |
RUN mkdir -p /models/sapiens_pose /models/motionbert | |
# Python script to download models | |
RUN echo "import requests\n\ | |
url = 'https://huggingface.co/facebook/sapiens-pose-1b-torchscript/resolve/main/model.pt'\n\ | |
response = requests.get(url)\n\ | |
if response.status_code == 200:\n\ | |
with open('/models/sapiens_pose/model.pt', 'wb') as f:\n\ | |
f.write(response.content)\n\ | |
else:\n\ | |
raise Exception(f'Failed to download model: {response.status_code}')\n\ | |
\n\ | |
from transformers import AutoModel, AutoTokenizer\n\ | |
AutoModel.from_pretrained('walterzhu/MotionBERT').save_pretrained('/models/motionbert')\n\ | |
AutoTokenizer.from_pretrained('walterzhu/MotionBERT').save_pretrained('/models/motionbert')" > download_models.py | |
# Run the script to download models | |
RUN python download_models.py | |
# Copy the inference script (app.py) into the container | |
COPY app.py /app/app.py | |
# Expose the default port for Flask | |
EXPOSE 7860 | |
# Run the Flask app | |
CMD ["python", "/app/app.py"] | |