ClemSummer commited on
Commit
7d83d86
·
1 Parent(s): 86103f1

Fix: save CLIPProcessor in Docker build

Browse files
Files changed (2) hide show
  1. Dockerfile +1 -1
  2. vit_captioning/generate.py +1 -5
Dockerfile CHANGED
@@ -21,5 +21,5 @@ RUN mkdir -p /models/clip && \
21
  python3 -c "from transformers import CLIPModel; CLIPModel.from_pretrained('openai/clip-vit-base-patch32').save_pretrained('/models/clip')"
22
 
23
  RUN python3 -c "from transformers import AutoTokenizer; AutoTokenizer.from_pretrained('bert-base-uncased').save_pretrained('/models/bert-tokenizer')"
24
-
25
  CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
 
21
  python3 -c "from transformers import CLIPModel; CLIPModel.from_pretrained('openai/clip-vit-base-patch32').save_pretrained('/models/clip')"
22
 
23
  RUN python3 -c "from transformers import AutoTokenizer; AutoTokenizer.from_pretrained('bert-base-uncased').save_pretrained('/models/bert-tokenizer')"
24
+ RUN python3 -c "from transformers import CLIPProcessor; CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32').save_pretrained('/models/clip')"
25
  CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
vit_captioning/generate.py CHANGED
@@ -33,15 +33,11 @@ class CaptionGenerator:
33
  if model_type == "ViTEncoder":
34
  self.encoder = ViTEncoder().to(self.device)
35
  self.encoder_dim = 768
36
- #self.processor = ViTImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k")
37
- #HF needs all model downloads to a special read-write cache dir
38
- self.processor = ViTImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k", cache_dir="/tmp")
39
  elif model_type == "CLIPEncoder":
40
  self.encoder = CLIPEncoder().to(self.device)
41
  self.encoder_dim = 512
42
  #self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
43
- #HF needs all model downloads to a special read-write cache dir
44
- #self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32", cache_dir="/tmp")
45
  self.processor = CLIPProcessor.from_pretrained("/models/clip")
46
  else:
47
  raise ValueError("Unknown model type")
 
33
  if model_type == "ViTEncoder":
34
  self.encoder = ViTEncoder().to(self.device)
35
  self.encoder_dim = 768
36
+ self.processor = ViTImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k")
 
 
37
  elif model_type == "CLIPEncoder":
38
  self.encoder = CLIPEncoder().to(self.device)
39
  self.encoder_dim = 512
40
  #self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
 
 
41
  self.processor = CLIPProcessor.from_pretrained("/models/clip")
42
  else:
43
  raise ValueError("Unknown model type")