nielsr HF staff commited on
Commit
1b1e4db
·
1 Parent(s): f61d812

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -5,15 +5,17 @@ import open_clip
5
 
6
  from huggingface_hub import hf_hub_download
7
 
 
 
8
  torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg')
9
  torch.hub.download_url_to_file('https://huggingface.co/datasets/nielsr/textcaps-sample/resolve/main/stop_sign.png', 'stop_sign.png')
10
  torch.hub.download_url_to_file('https://cdn.openai.com/dall-e-2/demos/text2im/astronaut/horse/photo/0.jpg', 'astronaut.jpg')
11
 
12
  git_processor_large_coco = AutoProcessor.from_pretrained("microsoft/git-large-coco")
13
- git_model_large_coco = AutoModelForCausalLM.from_pretrained("microsoft/git-large-coco", device_map="auto")
14
 
15
  blip_processor_large = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
16
- blip_model_large = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large", device_map="auto")
17
 
18
  blip2_processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-6.7b")
19
  blip2_model_4_bit = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-6.7b", device_map="auto", load_in_4bit=True, torch_dtype=torch.float16)
@@ -21,8 +23,6 @@ blip2_model_4_bit = Blip2ForConditionalGeneration.from_pretrained("Salesforce/bl
21
  instructblip_processor = AutoProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b")
22
  instructblip_model_4_bit = InstructBlipForConditionalGeneration.from_pretrained("Salesforce/instructblip-vicuna-7b", device_map="auto", load_in_4bit=True, torch_dtype=torch.float16)
23
 
24
- device = "cuda" if torch.cuda.is_available() else "cpu"
25
-
26
  def generate_caption(processor, model, image, tokenizer=None, use_float_16=False):
27
  inputs = processor(images=image, return_tensors="pt").to(device)
28
 
 
5
 
6
  from huggingface_hub import hf_hub_download
7
 
8
+ device = "cuda" if torch.cuda.is_available() else "cpu"
9
+
10
  torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg')
11
  torch.hub.download_url_to_file('https://huggingface.co/datasets/nielsr/textcaps-sample/resolve/main/stop_sign.png', 'stop_sign.png')
12
  torch.hub.download_url_to_file('https://cdn.openai.com/dall-e-2/demos/text2im/astronaut/horse/photo/0.jpg', 'astronaut.jpg')
13
 
14
  git_processor_large_coco = AutoProcessor.from_pretrained("microsoft/git-large-coco")
15
+ git_model_large_coco = AutoModelForCausalLM.from_pretrained("microsoft/git-large-coco").to(device)
16
 
17
  blip_processor_large = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
18
+ blip_model_large = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large").to(device)
19
 
20
  blip2_processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-6.7b")
21
  blip2_model_4_bit = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-6.7b", device_map="auto", load_in_4bit=True, torch_dtype=torch.float16)
 
23
  instructblip_processor = AutoProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b")
24
  instructblip_model_4_bit = InstructBlipForConditionalGeneration.from_pretrained("Salesforce/instructblip-vicuna-7b", device_map="auto", load_in_4bit=True, torch_dtype=torch.float16)
25
 
 
 
26
  def generate_caption(processor, model, image, tokenizer=None, use_float_16=False):
27
  inputs = processor(images=image, return_tensors="pt").to(device)
28