ford442 commited on
Commit
7a7c822
·
verified ·
1 Parent(s): 94e8a16

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -9
app.py CHANGED
@@ -148,7 +148,7 @@ def load_and_prepare_model(model_id):
148
  #pipe.scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
149
  #pipe.scheduler=EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
150
 
151
- pipe.to(device=device, dtype=torch.bfloat16)
152
  #pipe.to(torch.bfloat16)
153
 
154
  #apply_hidiffusion(pipe)
@@ -251,7 +251,9 @@ def generate_30(
251
  #if juggernaut == True:
252
  # pipe.vae=vaeX
253
  seed = int(randomize_seed_fn(seed, randomize_seed))
254
- generator = torch.Generator(device='cuda').manual_seed(seed)
 
 
255
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
256
  options = {
257
  "prompt": [prompt],
@@ -265,11 +267,12 @@ def generate_30(
265
  "generator": generator,
266
  # "timesteps": sampling_schedule,
267
  "output_type": "pil",
 
268
  }
269
  if use_resolution_binning:
270
  options["use_resolution_binning"] = True
271
  images = []
272
- pipe.scheduler.set_timesteps(num_inference_steps,device)
273
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
274
  uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
275
  batch_options = options.copy()
@@ -302,7 +305,7 @@ def generate_60(
302
  denoise: float = 0.3,
303
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
304
  ):
305
- torch.backends.cudnn.benchmark = True
306
  torch.cuda.empty_cache()
307
  gc.collect()
308
  global models
@@ -310,7 +313,9 @@ def generate_60(
310
  #if juggernaut == True:
311
  # pipe.vae=vaeX
312
  seed = int(randomize_seed_fn(seed, randomize_seed))
313
- generator = torch.Generator(device='cuda').manual_seed(seed)
 
 
314
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
315
  options = {
316
  "prompt": [prompt],
@@ -324,11 +329,12 @@ def generate_60(
324
  "generator": generator,
325
  # "timesteps": sampling_schedule,
326
  "output_type": "pil",
 
327
  }
328
  if use_resolution_binning:
329
  options["use_resolution_binning"] = True
330
  images = []
331
- pipe.scheduler.set_timesteps(num_inference_steps,device)
332
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
333
  uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
334
  batch_options = options.copy()
@@ -361,7 +367,7 @@ def generate_90(
361
  denoise: float = 0.3,
362
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
363
  ):
364
- torch.backends.cudnn.benchmark = True
365
  torch.cuda.empty_cache()
366
  gc.collect()
367
  global models
@@ -369,7 +375,9 @@ def generate_90(
369
  #if juggernaut == True:
370
  # pipe.vae=vaeX
371
  seed = int(randomize_seed_fn(seed, randomize_seed))
372
- generator = torch.Generator(device='cuda').manual_seed(seed)
 
 
373
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
374
  options = {
375
  "prompt": [prompt],
@@ -383,11 +391,12 @@ def generate_90(
383
  "generator": generator,
384
  # "timesteps": sampling_schedule,
385
  "output_type": "pil",
 
386
  }
387
  if use_resolution_binning:
388
  options["use_resolution_binning"] = True
389
  images = []
390
- pipe.scheduler.set_timesteps(num_inference_steps,device)
391
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
392
  uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
393
  batch_options = options.copy()
 
148
  #pipe.scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
149
  #pipe.scheduler=EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
150
 
151
+ pipe.to(device=torch.device('cuda'), dtype=torch.bfloat16)
152
  #pipe.to(torch.bfloat16)
153
 
154
  #apply_hidiffusion(pipe)
 
251
  #if juggernaut == True:
252
  # pipe.vae=vaeX
253
  seed = int(randomize_seed_fn(seed, randomize_seed))
254
+ generator = torch.Generator(device=torch.device('cuda')).manual_seed(seed)
255
+ latent_size = int(height / 8)
256
+ latent_input = randn_tensor(shape=(1,4,latent_size,latent_size),generator=generator, device=torch.device('cuda'), dtype=torch.bfloat16)
257
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
258
  options = {
259
  "prompt": [prompt],
 
267
  "generator": generator,
268
  # "timesteps": sampling_schedule,
269
  "output_type": "pil",
270
+ "latents": latent_input
271
  }
272
  if use_resolution_binning:
273
  options["use_resolution_binning"] = True
274
  images = []
275
+ pipe.scheduler.set_timesteps(num_inference_steps,torch.device('cuda'))
276
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
277
  uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
278
  batch_options = options.copy()
 
305
  denoise: float = 0.3,
306
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
307
  ):
308
+ torch.backends.cudnn.benchmark = False
309
  torch.cuda.empty_cache()
310
  gc.collect()
311
  global models
 
313
  #if juggernaut == True:
314
  # pipe.vae=vaeX
315
  seed = int(randomize_seed_fn(seed, randomize_seed))
316
+ generator = torch.Generator(device=torch.device('cuda')).manual_seed(seed)
317
+ latent_size = int(height / 8)
318
+ latent_input = randn_tensor(shape=(1,4,latent_size,latent_size),generator=generator, device=torch.device('cuda'), dtype=torch.bfloat16)
319
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
320
  options = {
321
  "prompt": [prompt],
 
329
  "generator": generator,
330
  # "timesteps": sampling_schedule,
331
  "output_type": "pil",
332
+ "latents": latent_input
333
  }
334
  if use_resolution_binning:
335
  options["use_resolution_binning"] = True
336
  images = []
337
+ pipe.scheduler.set_timesteps(num_inference_steps,torch.device('cuda'))
338
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
339
  uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
340
  batch_options = options.copy()
 
367
  denoise: float = 0.3,
368
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
369
  ):
370
+ torch.backends.cudnn.benchmark = False
371
  torch.cuda.empty_cache()
372
  gc.collect()
373
  global models
 
375
  #if juggernaut == True:
376
  # pipe.vae=vaeX
377
  seed = int(randomize_seed_fn(seed, randomize_seed))
378
+ generator = torch.Generator(device=torch.device('cuda')).manual_seed(seed)
379
+ latent_size = int(height / 8)
380
+ latent_input = randn_tensor(shape=(1,4,latent_size,latent_size),generator=generator, device=torch.device('cuda'), dtype=torch.bfloat16)
381
  #prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
382
  options = {
383
  "prompt": [prompt],
 
391
  "generator": generator,
392
  # "timesteps": sampling_schedule,
393
  "output_type": "pil",
394
+ "latents": latent_input
395
  }
396
  if use_resolution_binning:
397
  options["use_resolution_binning"] = True
398
  images = []
399
+ pipe.scheduler.set_timesteps(num_inference_steps,torch.device('cuda'))
400
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
401
  uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
402
  batch_options = options.copy()