ford442 commited on
Commit
36815aa
·
verified ·
1 Parent(s): 6136e66

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -14
app.py CHANGED
@@ -1,9 +1,7 @@
1
  import spaces
2
  import gradio as gr
3
  import numpy as np
4
-
5
  #import tensorrt as trt
6
-
7
  import random
8
  import torch
9
  from diffusers import StableDiffusion3Pipeline, AutoencoderKL, StableDiffusionXLImg2ImgPipeline, EulerAncestralDiscreteScheduler
@@ -30,7 +28,6 @@ torch.backends.cudnn.deterministic = False
30
  #torch.backends.cudnn.benchmark = False
31
  torch.backends.cuda.preferred_blas_library="cublas"
32
  #torch.backends.cuda.preferred_linalg_library="cusolver"
33
-
34
  torch.set_float32_matmul_precision("highest")
35
 
36
  hftoken = os.getenv("HF_AUTH_TOKEN")
@@ -81,7 +78,6 @@ refiner.scheduler=EulerAncestralDiscreteScheduler.from_config(refiner.scheduler.
81
  #refiner.scheduler = EulerAncestralDiscreteScheduler.from_config(refiner.scheduler.config, beta_schedule="scaled_linear")
82
  #refiner.scheduler = EulerAncestralDiscreteScheduler.from_config(refiner.scheduler.config)
83
 
84
-
85
  tokenizer = AutoTokenizer.from_pretrained(checkpoint, add_prefix_space=False, device_map='balanced')
86
  tokenizer.tokenizer_legacy=False
87
  model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map='balanced')
@@ -190,8 +186,8 @@ def infer(
190
  #sd_image_b = pipe.vae.encode(sd_image_a.to(torch.bfloat16)).latent_dist.sample().mul_(0.18215)
191
  print("-- using latent file --")
192
  print('-- generating image --')
193
- with torch.no_grad():
194
- sd_image = pipe(
195
  prompt=enhanced_prompt, # This conversion is fine
196
  negative_prompt=negative_prompt,
197
  guidance_scale=guidance_scale,
@@ -200,11 +196,11 @@ def infer(
200
  height=height,
201
  latents=sd_image_a,
202
  generator=generator
203
- ).images[0]
204
  else:
205
  print('-- generating image --')
206
- with torch.no_grad():
207
- sd_image = pipe(
208
  prompt=enhanced_prompt, # This conversion is fine
209
  prompt_2=enhanced_prompt_2,
210
  prompt_3=prompt,
@@ -215,7 +211,7 @@ def infer(
215
  height=height,
216
  # latents=None,
217
  generator=generator,
218
- ).images[0]
219
  print('-- got image --')
220
  image_path = f"sd35m_{seed}.png"
221
  sd_image.save(image_path,optimize=False,compress_level=0)
@@ -223,8 +219,8 @@ def infer(
223
  # Convert the generated image to a tensor
224
  generated_image_tensor = torch.tensor([np.array(sd_image).transpose(2, 0, 1)]).to('cuda') / 255.0
225
  # Encode the generated image into latents
226
- with torch.no_grad():
227
- generated_latents = pipe.vae.encode(generated_image_tensor.to(torch.bfloat16)).latent_dist.sample().mul_(0.18215)
228
  latent_path = f"sd35m_{seed}.pt"
229
  # Save the latents to a .pt file
230
  torch.save(generated_latents, latent_path)
@@ -252,9 +248,9 @@ examples = [
252
  css = """
253
  #col-container {
254
  margin: 0 auto;
255
- max-width: 768px;
256
  }
257
- body {
258
  background-color: blue;
259
  }
260
  """
 
1
  import spaces
2
  import gradio as gr
3
  import numpy as np
 
4
  #import tensorrt as trt
 
5
  import random
6
  import torch
7
  from diffusers import StableDiffusion3Pipeline, AutoencoderKL, StableDiffusionXLImg2ImgPipeline, EulerAncestralDiscreteScheduler
 
28
  #torch.backends.cudnn.benchmark = False
29
  torch.backends.cuda.preferred_blas_library="cublas"
30
  #torch.backends.cuda.preferred_linalg_library="cusolver"
 
31
  torch.set_float32_matmul_precision("highest")
32
 
33
  hftoken = os.getenv("HF_AUTH_TOKEN")
 
78
  #refiner.scheduler = EulerAncestralDiscreteScheduler.from_config(refiner.scheduler.config, beta_schedule="scaled_linear")
79
  #refiner.scheduler = EulerAncestralDiscreteScheduler.from_config(refiner.scheduler.config)
80
 
 
81
  tokenizer = AutoTokenizer.from_pretrained(checkpoint, add_prefix_space=False, device_map='balanced')
82
  tokenizer.tokenizer_legacy=False
83
  model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map='balanced')
 
186
  #sd_image_b = pipe.vae.encode(sd_image_a.to(torch.bfloat16)).latent_dist.sample().mul_(0.18215)
187
  print("-- using latent file --")
188
  print('-- generating image --')
189
+ #with torch.no_grad():
190
+ sd_image = pipe(
191
  prompt=enhanced_prompt, # This conversion is fine
192
  negative_prompt=negative_prompt,
193
  guidance_scale=guidance_scale,
 
196
  height=height,
197
  latents=sd_image_a,
198
  generator=generator
199
+ ).images[0]
200
  else:
201
  print('-- generating image --')
202
+ #with torch.no_grad():
203
+ sd_image = pipe(
204
  prompt=enhanced_prompt, # This conversion is fine
205
  prompt_2=enhanced_prompt_2,
206
  prompt_3=prompt,
 
211
  height=height,
212
  # latents=None,
213
  generator=generator,
214
+ ).images[0]
215
  print('-- got image --')
216
  image_path = f"sd35m_{seed}.png"
217
  sd_image.save(image_path,optimize=False,compress_level=0)
 
219
  # Convert the generated image to a tensor
220
  generated_image_tensor = torch.tensor([np.array(sd_image).transpose(2, 0, 1)]).to('cuda') / 255.0
221
  # Encode the generated image into latents
222
+ #with torch.no_grad():
223
+ generated_latents = pipe.vae.encode(generated_image_tensor.to(torch.bfloat16)).latent_dist.sample().mul_(0.18215)
224
  latent_path = f"sd35m_{seed}.pt"
225
  # Save the latents to a .pt file
226
  torch.save(generated_latents, latent_path)
 
248
  css = """
249
  #col-container {
250
  margin: 0 auto;
251
+ max-width: 640px;
252
  }
253
+ body{
254
  background-color: blue;
255
  }
256
  """