Spaces:
Running
Running
File size: 1,572 Bytes
159f871 060adf9 9a44503 f6e220a 060adf9 159f871 ada2814 ce11b9b 9a44503 f41c1d7 ce11b9b 159f871 9a44503 159f871 9a44503 ada2814 060adf9 159f871 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
# Use a stable Python base image
FROM python:3.9-slim
# Set environment variables
ENV PYTHONUNBUFFERED=1 \
LANG=C.UTF-8 \
HF_HOME="/app/huggingface_cache" \
HUGGINGFACE_HUB_CACHE="/app/huggingface_cache"
# Set working directory
WORKDIR /app
# Copy the required files
COPY requirements.txt .
COPY main.py .
# Install dependencies using a virtual environment
RUN python -m venv /app/venv && \
/app/venv/bin/pip install --no-cache-dir --upgrade pip && \
/app/venv/bin/pip install --no-cache-dir -r requirements.txt
# Ensure the model cache directory exists
RUN mkdir -p $HF_HOME
# Add Hugging Face token as an environment variable in the container (this will be injected by Hugging Face Spaces' secrets management)
ARG HF_TOKEN
ENV HF_TOKEN=${HF_TOKEN}
ENV HF_HOME="/tmp/huggingface_cache"
ENV HUGGINGFACE_HUB_CACHE="/tmp/huggingface_cache"
RUN mkdir -p $HF_HOME && chmod -R 777 $HF_HOME
# Pre-download models (handle errors gracefully)
RUN /app/venv/bin/python -c "from transformers import pipeline; \
pipeline('sentiment-analysis', model='tabularisai/multilingual-sentiment-analysis', use_auth_token='$HF_TOKEN')" || echo 'Failed to download model 1'
RUN /app/venv/bin/python -c "from transformers import pipeline; \
pipeline('sentiment-analysis', model='siebert/sentiment-roberta-large-english', use_auth_token='$HF_TOKEN')" || echo 'Failed to download model 2'
# Expose port for FastAPI
EXPOSE 7860
# Run FastAPI server using virtual environment
CMD ["/app/venv/bin/uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"] |