Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -168,14 +168,16 @@ pipe = load_and_prepare_model()
|
|
168 |
#checkpoint = "microsoft/Phi-3.5-mini-instruct"
|
169 |
checkpoint = "ford442/Phi-3.5-mini-instruct-bf16"
|
170 |
#captioner = pipeline(model="ydshieh/vit-gpt2-coco-en",device='cuda:0', task="image-to-text")
|
171 |
-
captioner_2 = pipeline(model="Salesforce/blip-image-captioning-base",device='cuda', task="image-to-text")
|
172 |
-
|
173 |
-
model5 = Blip2ForConditionalGeneration.from_pretrained("ford442/blip2-image-to-text-bf16").to('cuda')
|
174 |
-
processor5 = Blip2Processor.from_pretrained("ford442/blip2-image-to-text-bf16", device_map='cuda')
|
175 |
#txt_tokenizer = AutoTokenizer.from_pretrained(checkpoint, device_map='cuda', add_prefix_space=False)
|
176 |
#txt_tokenizer.tokenizer_legacy=False
|
177 |
#model = Phi3ForCausalLM.from_pretrained(checkpoint).to('cuda:0')
|
178 |
#model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map='cuda') #.to('cuda')
|
|
|
|
|
179 |
|
180 |
ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
|
181 |
text_encoder_1=CLIPTextModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder',token=True) #.to(device=device, dtype=torch.bfloat16)
|
|
|
168 |
#checkpoint = "microsoft/Phi-3.5-mini-instruct"
|
169 |
checkpoint = "ford442/Phi-3.5-mini-instruct-bf16"
|
170 |
#captioner = pipeline(model="ydshieh/vit-gpt2-coco-en",device='cuda:0', task="image-to-text")
|
171 |
+
#captioner_2 = pipeline(model="Salesforce/blip-image-captioning-base",device='cuda', task="image-to-text")
|
172 |
+
captioner_2 = pipeline(model="ford442/blip-image-to-text-large-bf16",device='cuda', task="image-to-text")
|
173 |
+
#model5 = Blip2ForConditionalGeneration.from_pretrained("ford442/blip2-image-to-text-bf16").to('cuda')
|
174 |
+
#processor5 = Blip2Processor.from_pretrained("ford442/blip2-image-to-text-bf16", device_map='cuda')
|
175 |
#txt_tokenizer = AutoTokenizer.from_pretrained(checkpoint, device_map='cuda', add_prefix_space=False)
|
176 |
#txt_tokenizer.tokenizer_legacy=False
|
177 |
#model = Phi3ForCausalLM.from_pretrained(checkpoint).to('cuda:0')
|
178 |
#model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map='cuda') #.to('cuda')
|
179 |
+
model5 = InstructBlipForConditionalGeneration.from_pretrained("Salesforce/instructblip-vicuna-7b").to('cuda')
|
180 |
+
processor5 = InstructBlipProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b")
|
181 |
|
182 |
ip_model = IPAdapterXL(pipe, local_folder, ip_ckpt, device)
|
183 |
text_encoder_1=CLIPTextModel.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='text_encoder',token=True) #.to(device=device, dtype=torch.bfloat16)
|