JulianPhillips commited on
Commit
e9fbc1c
·
verified ·
1 Parent(s): 30e282c

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +14 -11
Dockerfile CHANGED
@@ -7,7 +7,15 @@ RUN apt-get update && apt-get install -y \
7
  wget \
8
  && rm -rf /var/lib/apt/lists/*
9
 
10
- # Install Python packages including Hugging Face Transformers, TorchScript, Flask, TensorFlow, and TensorFlow Hub
 
 
 
 
 
 
 
 
11
  RUN pip install --no-cache-dir \
12
  torch \
13
  torchvision \
@@ -17,7 +25,8 @@ RUN pip install --no-cache-dir \
17
  Pillow \
18
  huggingface_hub \
19
  tensorflow \
20
- tensorflow_hub
 
21
 
22
  # Set Hugging Face cache to a guaranteed writable directory
23
  ENV TRANSFORMERS_CACHE=/tmp/cache
@@ -28,17 +37,11 @@ RUN mkdir -p /models/blip /models/clip
28
 
29
  # Python script to download models using tensorflow_hub and huggingface_hub
30
  RUN echo "import os\n\
31
- import tensorflow_hub as hub\n\
32
- \n\
33
- # Download MoveNet model from TensorFlow Hub (loaded directly in app, not saved)\n\
34
- movenet_model = hub.load('https://tfhub.dev/google/movenet/singlepose/lightning/4')\n\
35
- \n\
36
- # Download BLIP model and tokenizer using huggingface_hub\n\
37
  from transformers import BlipForConditionalGeneration, BlipProcessor\n\
38
  BlipForConditionalGeneration.from_pretrained('Salesforce/blip-image-captioning-large').save_pretrained('/models/blip')\n\
39
- BlipProcessor.from_pretrained('Salesforce/blip-image-captioning-large').save_pretrained('/models/blip')\n\
40
- \n\
41
- # Download CLIP model and processor using huggingface_hub\n\
42
  from transformers import CLIPModel, CLIPProcessor\n\
43
  CLIPModel.from_pretrained('openai/clip-vit-large-patch14').save_pretrained('/models/clip')\n\
44
  CLIPProcessor.from_pretrained('openai/clip-vit-large-patch14').save_pretrained('/models/clip')" > download_models.py
 
7
  wget \
8
  && rm -rf /var/lib/apt/lists/*
9
 
10
+ # Install system-level dependencies for OpenCV
11
+ RUN apt-get update && apt-get install -y \
12
+ libglib2.0-0 \
13
+ libsm6 \
14
+ libxext6 \
15
+ libxrender-dev \
16
+ && rm -rf /var/lib/apt/lists/*
17
+
18
+ # Install Python packages including Hugging Face Transformers, TorchScript, Flask, TensorFlow, TensorFlow Hub, and OpenCV
19
  RUN pip install --no-cache-dir \
20
  torch \
21
  torchvision \
 
25
  Pillow \
26
  huggingface_hub \
27
  tensorflow \
28
+ tensorflow_hub \
29
+ opencv-python
30
 
31
  # Set Hugging Face cache to a guaranteed writable directory
32
  ENV TRANSFORMERS_CACHE=/tmp/cache
 
37
 
38
  # Python script to download models using tensorflow_hub and huggingface_hub
39
  RUN echo "import os\n\
40
+ import tensorflow_hub as hub\n\n\n# Download MoveNet model from TensorFlow Hub (loaded directly in app, not saved)\n\
41
+ movenet_model = hub.load('https://tfhub.dev/google/movenet/singlepose/lightning/4')\n\n\n# Download BLIP model and tokenizer using huggingface_hub\n\
 
 
 
 
42
  from transformers import BlipForConditionalGeneration, BlipProcessor\n\
43
  BlipForConditionalGeneration.from_pretrained('Salesforce/blip-image-captioning-large').save_pretrained('/models/blip')\n\
44
+ BlipProcessor.from_pretrained('Salesforce/blip-image-captioning-large').save_pretrained('/models/blip')\n\n\n# Download CLIP model and processor using huggingface_hub\n\
 
 
45
  from transformers import CLIPModel, CLIPProcessor\n\
46
  CLIPModel.from_pretrained('openai/clip-vit-large-patch14').save_pretrained('/models/clip')\n\
47
  CLIPProcessor.from_pretrained('openai/clip-vit-large-patch14').save_pretrained('/models/clip')" > download_models.py