ford442 commited on
Commit
7291b44
·
verified ·
1 Parent(s): 2752d4c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -5
app.py CHANGED
@@ -53,9 +53,11 @@ torch_dtype = torch.bfloat16
53
 
54
  checkpoint = "microsoft/Phi-3.5-mini-instruct"
55
  #vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
56
- vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", torch_dtype=torch.bfloat16).to(torch.device("cuda:0"))
 
57
 
58
- pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16", torch_dtype=torch.bfloat16).to(torch.device("cuda:0"))
 
59
  #pipe = StableDiffusion3Pipeline.from_pretrained("ford442/RealVis_Medium_1.0b_bf16", torch_dtype=torch.bfloat16)
60
  #pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3.5-medium", token=hftoken, torch_dtype=torch.float32, device_map='balanced')
61
 
@@ -144,7 +146,7 @@ def infer(
144
  outputs = model.generate(
145
  input_ids=input_ids,
146
  attention_mask=attention_mask,
147
- max_new_tokens=65,
148
  temperature=0.2,
149
  top_p=0.9,
150
  do_sample=True,
@@ -186,13 +188,16 @@ def infer(
186
  with torch.no_grad():
187
  sd_image = pipe(
188
  prompt=enhanced_prompt, # This conversion is fine
 
 
189
  negative_prompt=negative_prompt,
190
  guidance_scale=guidance_scale,
191
  num_inference_steps=num_inference_steps,
192
  width=width,
193
  height=height,
194
- latents=None,
195
- generator=generator
 
196
  ).images[0]
197
  print('-- got image --')
198
  image_path = f"sd35m_{seed}.png"
 
53
 
54
  checkpoint = "microsoft/Phi-3.5-mini-instruct"
55
  #vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
56
+ #vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", torch_dtype=torch.bfloat16).to(torch.device("cuda:0"))
57
+ vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16")
58
 
59
+ #pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16", torch_dtype=torch.bfloat16).to(torch.device("cuda:0"))
60
+ pipe = StableDiffusion3Pipeline.from_pretrained("ford442/stable-diffusion-3.5-medium-bf16").to(torch.device("cuda:0"))
61
  #pipe = StableDiffusion3Pipeline.from_pretrained("ford442/RealVis_Medium_1.0b_bf16", torch_dtype=torch.bfloat16)
62
  #pipe = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3.5-medium", token=hftoken, torch_dtype=torch.float32, device_map='balanced')
63
 
 
146
  outputs = model.generate(
147
  input_ids=input_ids,
148
  attention_mask=attention_mask,
149
+ max_new_tokens=240,
150
  temperature=0.2,
151
  top_p=0.9,
152
  do_sample=True,
 
188
  with torch.no_grad():
189
  sd_image = pipe(
190
  prompt=enhanced_prompt, # This conversion is fine
191
+ prompt2=prompt,
192
+ prompt3=prompt,
193
  negative_prompt=negative_prompt,
194
  guidance_scale=guidance_scale,
195
  num_inference_steps=num_inference_steps,
196
  width=width,
197
  height=height,
198
+ # latents=None,
199
+ generator=generator,
200
+ target_size=(width,height)
201
  ).images[0]
202
  print('-- got image --')
203
  image_path = f"sd35m_{seed}.png"