Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -160,9 +160,9 @@ pipe = load_and_prepare_model()
|
|
160 |
# text models
|
161 |
#checkpoint = "microsoft/Phi-3.5-mini-instruct"
|
162 |
checkpoint = "ford442/Phi-3.5-mini-instruct-bf16"
|
163 |
-
|
164 |
#captioner_2 = pipeline(model="Salesforce/blip-image-captioning-base",device='cuda', task="image-to-text")
|
165 |
-
captioner_3 = pipeline(model="ford442/blip-image-to-text-large-bf16",device='cuda', task="image-to-text")
|
166 |
model5 = Blip2ForConditionalGeneration.from_pretrained("ford442/blip2-image-to-text-bf16").to('cuda')
|
167 |
processor5 = Blip2Processor.from_pretrained("ford442/blip2-image-to-text-bf16")
|
168 |
txt_tokenizer = AutoTokenizer.from_pretrained(checkpoint, add_prefix_space=False)
|
@@ -352,43 +352,43 @@ def generate_30(
|
|
352 |
sd_image_a.resize((height,width), Image.LANCZOS)
|
353 |
caption=[]
|
354 |
caption_2=[]
|
355 |
-
|
356 |
#caption.append(captioner_2(sd_image_a))
|
357 |
-
caption.append(captioner_3(sd_image_a))
|
358 |
caption_2.append(captioning(sd_image_a))
|
359 |
if latent_file_2 is not None: # Check if a latent file is provided
|
360 |
sd_image_b = Image.open(latent_file_2.name).convert('RGB')
|
361 |
sd_image_b.resize((height,width), Image.LANCZOS)
|
362 |
-
|
363 |
#caption.append(captioner_2(sd_image_b))
|
364 |
-
caption.append(captioner_3(sd_image_b))
|
365 |
caption_2.append(captioning(sd_image_b))
|
366 |
else:
|
367 |
sd_image_b = None
|
368 |
if latent_file_3 is not None: # Check if a latent file is provided
|
369 |
sd_image_c = Image.open(latent_file_3.name).convert('RGB')
|
370 |
sd_image_c.resize((height,width), Image.LANCZOS)
|
371 |
-
|
372 |
#caption.append(captioner_2(sd_image_c))
|
373 |
-
caption.append(captioner_3(sd_image_c))
|
374 |
caption_2.append(captioning(sd_image_c))
|
375 |
else:
|
376 |
sd_image_c = None
|
377 |
if latent_file_4 is not None: # Check if a latent file is provided
|
378 |
sd_image_d = Image.open(latent_file_4.name).convert('RGB')
|
379 |
sd_image_d.resize((height,width), Image.LANCZOS)
|
380 |
-
|
381 |
#caption.append(captioner_2(sd_image_d))
|
382 |
-
caption.append(captioner_3(sd_image_d))
|
383 |
caption_2.append(captioning(sd_image_d))
|
384 |
else:
|
385 |
sd_image_d = None
|
386 |
if latent_file_5 is not None: # Check if a latent file is provided
|
387 |
sd_image_e = Image.open(latent_file_5.name).convert('RGB')
|
388 |
sd_image_e.resize((height,width), Image.LANCZOS)
|
389 |
-
|
390 |
#caption.append(captioner_2(sd_image_e))
|
391 |
-
caption.append(captioner_3(sd_image_e))
|
392 |
caption_2.append(captioning(sd_image_e))
|
393 |
else:
|
394 |
sd_image_e = None
|
|
|
160 |
# text models
|
161 |
#checkpoint = "microsoft/Phi-3.5-mini-instruct"
|
162 |
checkpoint = "ford442/Phi-3.5-mini-instruct-bf16"
|
163 |
+
captioner = pipeline(model="ydshieh/vit-gpt2-coco-en",device='cuda', task="image-to-text")
|
164 |
#captioner_2 = pipeline(model="Salesforce/blip-image-captioning-base",device='cuda', task="image-to-text")
|
165 |
+
#captioner_3 = pipeline(model="ford442/blip-image-to-text-large-bf16",device='cuda', task="image-to-text")
|
166 |
model5 = Blip2ForConditionalGeneration.from_pretrained("ford442/blip2-image-to-text-bf16").to('cuda')
|
167 |
processor5 = Blip2Processor.from_pretrained("ford442/blip2-image-to-text-bf16")
|
168 |
txt_tokenizer = AutoTokenizer.from_pretrained(checkpoint, add_prefix_space=False)
|
|
|
352 |
sd_image_a.resize((height,width), Image.LANCZOS)
|
353 |
caption=[]
|
354 |
caption_2=[]
|
355 |
+
caption.append(captioner(sd_image_a))
|
356 |
#caption.append(captioner_2(sd_image_a))
|
357 |
+
#caption.append(captioner_3(sd_image_a))
|
358 |
caption_2.append(captioning(sd_image_a))
|
359 |
if latent_file_2 is not None: # Check if a latent file is provided
|
360 |
sd_image_b = Image.open(latent_file_2.name).convert('RGB')
|
361 |
sd_image_b.resize((height,width), Image.LANCZOS)
|
362 |
+
caption.append(captioner(sd_image_b))
|
363 |
#caption.append(captioner_2(sd_image_b))
|
364 |
+
#caption.append(captioner_3(sd_image_b))
|
365 |
caption_2.append(captioning(sd_image_b))
|
366 |
else:
|
367 |
sd_image_b = None
|
368 |
if latent_file_3 is not None: # Check if a latent file is provided
|
369 |
sd_image_c = Image.open(latent_file_3.name).convert('RGB')
|
370 |
sd_image_c.resize((height,width), Image.LANCZOS)
|
371 |
+
caption.append(captioner(sd_image_c))
|
372 |
#caption.append(captioner_2(sd_image_c))
|
373 |
+
#caption.append(captioner_3(sd_image_c))
|
374 |
caption_2.append(captioning(sd_image_c))
|
375 |
else:
|
376 |
sd_image_c = None
|
377 |
if latent_file_4 is not None: # Check if a latent file is provided
|
378 |
sd_image_d = Image.open(latent_file_4.name).convert('RGB')
|
379 |
sd_image_d.resize((height,width), Image.LANCZOS)
|
380 |
+
caption.append(captioner(sd_image_d))
|
381 |
#caption.append(captioner_2(sd_image_d))
|
382 |
+
#caption.append(captioner_3(sd_image_d))
|
383 |
caption_2.append(captioning(sd_image_d))
|
384 |
else:
|
385 |
sd_image_d = None
|
386 |
if latent_file_5 is not None: # Check if a latent file is provided
|
387 |
sd_image_e = Image.open(latent_file_5.name).convert('RGB')
|
388 |
sd_image_e.resize((height,width), Image.LANCZOS)
|
389 |
+
caption.append(captioner(sd_image_e))
|
390 |
#caption.append(captioner_2(sd_image_e))
|
391 |
+
#caption.append(captioner_3(sd_image_e))
|
392 |
caption_2.append(captioning(sd_image_e))
|
393 |
else:
|
394 |
sd_image_e = None
|