RAG_AI / Dockerfile
WebashalarForML's picture
Create Dockerfile
3090f9e verified
raw
history blame
1.1 kB
# Use an official Python runtime as a parent image
FROM python:3.11-slim
# Set environment variables for Python
ENV PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1
# Set environment variables for Hugging Face cache
ENV TRANSFORMERS_CACHE=/app/cache \
HF_HOME=/app/cache
# Set the working directory
WORKDIR /app
# Copy the requirements file into the container at /app
COPY requirements.txt /app/
# Install dependencies
RUN pip install --no-cache-dir -r requirements.txt
# Create and set permissions for the cache directory
RUN mkdir -p /app/cache /app/VectorDB /app/uploads && \
chmod -R 777 /app/cache /app/VectorDB /app/uploads
# Ensure all relevant directories have the correct permissions
RUN chmod -R 777 /app/VectorDB
RUN chmod -R 777 /app/uploads
RUN chmod -R 777 /app
# Copy the rest of the application code to /app
COPY . /app/
# Set environment variables for Flask
ENV FLASK_APP=app.py \
FLASK_ENV=production
# Expose the port the app runs on
EXPOSE 7860
# Command to run the application
CMD ["gunicorn", "-w", "1", "-b", "0.0.0.0:7860", "--timeout", "120", "app:app"]