1inkusFace commited on
Commit
df4658b
·
verified ·
1 Parent(s): 3e47322

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -5
app.py CHANGED
@@ -109,12 +109,10 @@ def load_and_prepare_model():
109
  vaeX = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False, low_cpu_mem_usage=False, torch_dtype=torch.float32, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
110
  pipe = StableDiffusionXLPipeline.from_pretrained(
111
  'ford442/RealVisXL_V5.0_BF16',
112
- #'ford442/Juggernaut-XI-v11-fp32',
113
  # 'SG161222/RealVisXL_V5.0',
114
  #'John6666/uber-realistic-porn-merge-xl-urpmxl-v3-sdxl',
115
  #torch_dtype=torch.bfloat16,
116
  add_watermarker=False,
117
- # custom_pipeline="lpw_stable_diffusion_xl",
118
  #use_safetensors=True,
119
  token=HF_TOKEN,
120
  text_encoder=None,
@@ -142,7 +140,8 @@ def load_and_prepare_model():
142
  #pipe.vae.do_resize=False
143
  #pipe.vae.do_rescale=False
144
  #pipe.vae.do_convert_rgb=True
145
- #pipe.vae.vae_scale_factor=8 #pipe.unet.set_default_attn_processor()
 
146
  pipe.vae.set_default_attn_processor()
147
  print(f'Pipeline: ')
148
  #print(f'_optional_components: {pipe._optional_components}')
@@ -163,9 +162,7 @@ checkpoint = "microsoft/Phi-3.5-mini-instruct"
163
  captioner = pipeline(model="ydshieh/vit-gpt2-coco-en",device='cuda', task="image-to-text")
164
  captioner_2 = pipeline(model="Salesforce/blip-image-captioning-base",device='cuda', task="image-to-text")
165
  captioner_3 = pipeline(model="Salesforce/blip-image-captioning-large",device='cuda', task="image-to-text")
166
- #model5 = InstructBlipForConditionalGeneration.from_pretrained("Salesforce/instructblip-vicuna-7b").to(torch.bfloat16).to('cuda')
167
  model5 = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b-coco").to('cuda')
168
- #processor5 = InstructBlipProcessor.from_pretrained("Salesforce/instructblip-vicuna-7b").to(torch.bfloat16).to('cuda')
169
  processor5 = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b-coco")
170
  txt_tokenizer = AutoTokenizer.from_pretrained(checkpoint, add_prefix_space=False)
171
  txt_tokenizer.tokenizer_legacy=False
 
109
  vaeX = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False, low_cpu_mem_usage=False, torch_dtype=torch.float32, token=True) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
110
  pipe = StableDiffusionXLPipeline.from_pretrained(
111
  'ford442/RealVisXL_V5.0_BF16',
 
112
  # 'SG161222/RealVisXL_V5.0',
113
  #'John6666/uber-realistic-porn-merge-xl-urpmxl-v3-sdxl',
114
  #torch_dtype=torch.bfloat16,
115
  add_watermarker=False,
 
116
  #use_safetensors=True,
117
  token=HF_TOKEN,
118
  text_encoder=None,
 
140
  #pipe.vae.do_resize=False
141
  #pipe.vae.do_rescale=False
142
  #pipe.vae.do_convert_rgb=True
143
+ #pipe.vae.vae_scale_factor=8
144
+ #pipe.unet.set_default_attn_processor()
145
  pipe.vae.set_default_attn_processor()
146
  print(f'Pipeline: ')
147
  #print(f'_optional_components: {pipe._optional_components}')
 
162
  captioner = pipeline(model="ydshieh/vit-gpt2-coco-en",device='cuda', task="image-to-text")
163
  captioner_2 = pipeline(model="Salesforce/blip-image-captioning-base",device='cuda', task="image-to-text")
164
  captioner_3 = pipeline(model="Salesforce/blip-image-captioning-large",device='cuda', task="image-to-text")
 
165
  model5 = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b-coco").to('cuda')
 
166
  processor5 = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b-coco")
167
  txt_tokenizer = AutoTokenizer.from_pretrained(checkpoint, add_prefix_space=False)
168
  txt_tokenizer.tokenizer_legacy=False