JulianPhillips commited on
Commit
8bc1c46
·
verified ·
1 Parent(s): c78cb61

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +7 -4
Dockerfile CHANGED
@@ -31,18 +31,21 @@ RUN pip install --no-cache-dir \
31
 
32
  # Set Hugging Face cache to a guaranteed writable directory
33
  ENV TRANSFORMERS_CACHE=/tmp/cache
34
- RUN mkdir -p /tmp/cache
35
 
36
  # Create directories for the models
37
  RUN mkdir -p /models/blip /models/clip
38
 
39
  # Python script to download models using tensorflow_hub and huggingface_hub
40
  RUN echo "import os\n\
41
- import tensorflow_hub as hub\n\n\n# Download MoveNet model from TensorFlow Hub (loaded directly in app, not saved)\n\
42
- movenet_model = hub.load('https://tfhub.dev/google/movenet/singlepose/lightning/4')\n\n\n# Download BLIP model and tokenizer using huggingface_hub\n\
 
 
43
  from transformers import BlipForConditionalGeneration, BlipProcessor\n\
44
  BlipForConditionalGeneration.from_pretrained('Salesforce/blip-image-captioning-large').save_pretrained('/models/blip')\n\
45
- BlipProcessor.from_pretrained('Salesforce/blip-image-captioning-large').save_pretrained('/models/blip')\n\n\n# Download CLIP model and processor using huggingface_hub\n\
 
46
  from transformers import CLIPModel, CLIPProcessor\n\
47
  CLIPModel.from_pretrained('openai/clip-vit-large-patch14').save_pretrained('/models/clip')\n\
48
  CLIPProcessor.from_pretrained('openai/clip-vit-large-patch14').save_pretrained('/models/clip')" > download_models.py
 
31
 
32
  # Set Hugging Face cache to a guaranteed writable directory
33
  ENV TRANSFORMERS_CACHE=/tmp/cache
34
+ RUN mkdir -p /tmp/cache && chmod -R 777 /tmp/cache
35
 
36
  # Create directories for the models
37
  RUN mkdir -p /models/blip /models/clip
38
 
39
  # Python script to download models using tensorflow_hub and huggingface_hub
40
  RUN echo "import os\n\
41
+ import tensorflow_hub as hub\n\n\
42
+ # Download MoveNet model from TensorFlow Hub (loaded directly in app, not saved)\n\
43
+ movenet_model = hub.load('https://tfhub.dev/google/movenet/singlepose/lightning/4')\n\n\
44
+ # Download BLIP model and tokenizer using huggingface_hub\n\
45
  from transformers import BlipForConditionalGeneration, BlipProcessor\n\
46
  BlipForConditionalGeneration.from_pretrained('Salesforce/blip-image-captioning-large').save_pretrained('/models/blip')\n\
47
+ BlipProcessor.from_pretrained('Salesforce/blip-image-captioning-large').save_pretrained('/models/blip')\n\n\
48
+ # Download CLIP model and processor using huggingface_hub\n\
49
  from transformers import CLIPModel, CLIPProcessor\n\
50
  CLIPModel.from_pretrained('openai/clip-vit-large-patch14').save_pretrained('/models/clip')\n\
51
  CLIPProcessor.from_pretrained('openai/clip-vit-large-patch14').save_pretrained('/models/clip')" > download_models.py