JulianPhillips commited on
Commit
cefb504
·
verified ·
1 Parent(s): 85612d7

Update Dockerfile

Browse files
Files changed (1) hide show
  1. Dockerfile +11 -8
Dockerfile CHANGED
@@ -23,7 +23,7 @@ ENV TRANSFORMERS_CACHE=/tmp/cache
23
  RUN mkdir -p /tmp/cache
24
 
25
  # Create directories for the models
26
- RUN mkdir -p /models/movenet /models/motionbert
27
 
28
  # Python script to download models using tensorflow_hub and huggingface_hub
29
  RUN echo "import os\n\
@@ -35,11 +35,15 @@ movenet_model_path = '/models/movenet/movenet_lightning'\n\
35
  os.makedirs(movenet_model_path, exist_ok=True)\n\
36
  movenet_model.save(movenet_model_path)\n\
37
  \n\
38
- # Download MotionBERT model and tokenizer using huggingface_hub\n\
39
- from huggingface_hub import hf_hub_download\n\
40
- from transformers import AutoModel, AutoTokenizer\n\
41
- AutoModel.from_pretrained('walterzhu/MotionBERT').save_pretrained('/models/motionbert')\n\
42
- AutoTokenizer.from_pretrained('walterzhu/MotionBERT').save_pretrained('/models/motionbert')" > download_models.py
 
 
 
 
43
 
44
  # Run the script to download models
45
  RUN python download_models.py
@@ -51,5 +55,4 @@ COPY app.py /app/app.py
51
  EXPOSE 7860
52
 
53
  # Run the Flask app
54
- CMD ["python", "/app/app.py"]
55
-
 
23
  RUN mkdir -p /tmp/cache
24
 
25
  # Create directories for the models
26
+ RUN mkdir -p /models/movenet /models/blip /models/clip
27
 
28
  # Python script to download models using tensorflow_hub and huggingface_hub
29
  RUN echo "import os\n\
 
35
  os.makedirs(movenet_model_path, exist_ok=True)\n\
36
  movenet_model.save(movenet_model_path)\n\
37
  \n\
38
+ # Download BLIP model and tokenizer using huggingface_hub\n\
39
+ from transformers import BlipForConditionalGeneration, BlipProcessor\n\
40
+ BlipForConditionalGeneration.from_pretrained('Salesforce/blip-image-captioning-large').save_pretrained('/models/blip')\n\
41
+ BlipProcessor.from_pretrained('Salesforce/blip-image-captioning-large').save_pretrained('/models/blip')\n\
42
+ \n\
43
+ # Download CLIP model and processor using huggingface_hub\n\
44
+ from transformers import CLIPModel, CLIPProcessor\n\
45
+ CLIPModel.from_pretrained('openai/clip-vit-large-patch14').save_pretrained('/models/clip')\n\
46
+ CLIPProcessor.from_pretrained('openai/clip-vit-large-patch14').save_pretrained('/models/clip')" > download_models.py
47
 
48
  # Run the script to download models
49
  RUN python download_models.py
 
55
  EXPOSE 7860
56
 
57
  # Run the Flask app
58
+ CMD ["python", "/app/app.py"]