ford442 commited on
Commit
150c0c2
·
verified ·
1 Parent(s): 833caee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -24
app.py CHANGED
@@ -113,8 +113,8 @@ def load_and_prepare_model():
113
  #unetX = UNet2DConditionModel.from_pretrained('ford442/RealVisXL_V5.0_BF16',subfolder='unet').to(torch.bfloat16) # ,use_safetensors=True FAILS
114
  #sched = EulerAncestralDiscreteScheduler.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='scheduler',beta_schedule="scaled_linear", steps_offset=1,timestep_spacing="trailing"))
115
  #sched = EulerAncestralDiscreteScheduler.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='scheduler', steps_offset=1,timestep_spacing="trailing")
116
- #sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1,use_karras_sigmas=True)
117
- sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
118
  #sched = DPMSolverSDEScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler')
119
  #pipeX = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V5.0").to(torch.bfloat16)
120
  #pipeX = StableDiffusionXLPipeline.from_pretrained("ford442/Juggernaut-XI-v11-fp32",use_safetensors=True)
@@ -130,7 +130,6 @@ def load_and_prepare_model():
130
  token=HF_TOKEN,
131
  # custom_pipeline="lpw_stable_diffusion_xl",
132
  #use_safetensors=True,
133
- # use_auth_token=HF_TOKEN,
134
  # vae=AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",repo_type='model',safety_checker=None),
135
  # vae=AutoencoderKL.from_pretrained("stabilityai/sdxl-vae",repo_type='model',safety_checker=None, torch_dtype=torch.float32),
136
  # vae=AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16",repo_type='model',safety_checker=None),
@@ -232,7 +231,7 @@ def randomize_seed_fn() -> int:
232
  seed = random.randint(0, MAX_SEED)
233
  return seed
234
 
235
- def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise):
236
  filename= f'tst_A_{timestamp}.txt'
237
  with open(filename, "w") as f:
238
  f.write(f"Realvis 5.0 (Tester A) \n")
@@ -240,7 +239,6 @@ def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise):
240
  f.write(f"Prompt: {prompt} \n")
241
  f.write(f"Steps: {num_inference_steps} \n")
242
  f.write(f"Guidance Scale: {guidance_scale} \n")
243
- f.write(f"Denoise Strength: {denoise} \n")
244
  f.write(f"SPACE SETUP: \n")
245
  f.write(f"Use Model Dtype: no \n")
246
  f.write(f"Model Scheduler: Euler_a all_custom before cuda \n")
@@ -263,7 +261,6 @@ def generate_30(
263
  guidance_scale: float = 4,
264
  num_inference_steps: int = 125,
265
  use_resolution_binning: bool = True,
266
- denoise: float = 0.3,
267
  lora_scale: float = 0.5,
268
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
269
  ):
@@ -276,7 +273,6 @@ def generate_30(
276
  "prompt": [prompt],
277
  "negative_prompt": [negative_prompt],
278
  "negative_prompt_2": [neg_prompt_2],
279
- "strength": denoise,
280
  "width": width,
281
  "height": height,
282
  "guidance_scale": guidance_scale,
@@ -290,7 +286,7 @@ def generate_30(
290
  images = []
291
  pipe.scheduler.set_timesteps(num_inference_steps,device)
292
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
293
- uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
294
  batch_options = options.copy()
295
  rv_image = pipe(**batch_options).images[0]
296
  sd_image_path = f"rv50_A_{timestamp}.png"
@@ -321,7 +317,6 @@ def generate_60(
321
  guidance_scale: float = 4,
322
  num_inference_steps: int = 250,
323
  use_resolution_binning: bool = True,
324
- denoise: float = 0.3,
325
  lora_scale: float = 0.5,
326
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
327
  ):
@@ -334,7 +329,6 @@ def generate_60(
334
  "prompt": [prompt],
335
  "negative_prompt": [negative_prompt],
336
  "negative_prompt_2": [neg_prompt_2],
337
- "strength": denoise,
338
  "width": width,
339
  "height": height,
340
  "guidance_scale": guidance_scale,
@@ -348,7 +342,7 @@ def generate_60(
348
  images = []
349
  pipe.scheduler.set_timesteps(num_inference_steps,device)
350
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
351
- uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
352
  batch_options = options.copy()
353
  rv_image = pipe(**batch_options).images[0]
354
  sd_image_path = f"rv50_A_{timestamp}.png"
@@ -379,7 +373,6 @@ def generate_90(
379
  guidance_scale: float = 4,
380
  num_inference_steps: int = 250,
381
  use_resolution_binning: bool = True,
382
- denoise: float = 0.3,
383
  lora_scale: float = 0.5,
384
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
385
  ):
@@ -392,7 +385,6 @@ def generate_90(
392
  "prompt": [prompt],
393
  "negative_prompt": [negative_prompt],
394
  "negative_prompt_2": [neg_prompt_2],
395
- "strength": denoise,
396
  "width": width,
397
  "height": height,
398
  "guidance_scale": guidance_scale,
@@ -406,7 +398,7 @@ def generate_90(
406
  images = []
407
  pipe.scheduler.set_timesteps(num_inference_steps,device)
408
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
409
- uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
410
  batch_options = options.copy()
411
  rv_image = pipe(**batch_options).images[0]
412
  sd_image_path = f"rv50_A_{timestamp}.png"
@@ -494,13 +486,6 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
494
  value="('deformed', 'distorted', 'disfigured':1.3),'not photorealistic':1.5, 'poorly drawn', 'bad anatomy', 'wrong anatomy', 'extra limb', 'missing limb', 'floating limbs', 'poorly drawn hands', 'poorly drawn feet', 'poorly drawn face':1.3, 'out of frame', 'extra limbs', 'bad anatomy', 'bad art', 'beginner', 'distorted face','amateur'",
495
  visible=True,
496
  )
497
- denoise = gr.Slider(
498
- label="Denoising Strength",
499
- minimum=0.0,
500
- maximum=1.0,
501
- step=0.01,
502
- value=0.3,
503
- )
504
  lora_scale = gr.Slider(
505
  label="LORA Scale (Skin)",
506
  minimum=0.0,
@@ -568,7 +553,6 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
568
  height,
569
  guidance_scale,
570
  num_inference_steps,
571
- denoise,
572
  lora_scale,
573
  ],
574
  outputs=[result],
@@ -590,7 +574,6 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
590
  height,
591
  guidance_scale,
592
  num_inference_steps,
593
- denoise,
594
  lora_scale,
595
  ],
596
  outputs=[result],
@@ -612,7 +595,6 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
612
  height,
613
  guidance_scale,
614
  num_inference_steps,
615
- denoise,
616
  lora_scale,
617
  ],
618
  outputs=[result],
 
113
  #unetX = UNet2DConditionModel.from_pretrained('ford442/RealVisXL_V5.0_BF16',subfolder='unet').to(torch.bfloat16) # ,use_safetensors=True FAILS
114
  #sched = EulerAncestralDiscreteScheduler.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='scheduler',beta_schedule="scaled_linear", steps_offset=1,timestep_spacing="trailing"))
115
  #sched = EulerAncestralDiscreteScheduler.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='scheduler', steps_offset=1,timestep_spacing="trailing")
116
+ sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1,use_karras_sigmas=True)
117
+ #sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
118
  #sched = DPMSolverSDEScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler')
119
  #pipeX = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V5.0").to(torch.bfloat16)
120
  #pipeX = StableDiffusionXLPipeline.from_pretrained("ford442/Juggernaut-XI-v11-fp32",use_safetensors=True)
 
130
  token=HF_TOKEN,
131
  # custom_pipeline="lpw_stable_diffusion_xl",
132
  #use_safetensors=True,
 
133
  # vae=AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",repo_type='model',safety_checker=None),
134
  # vae=AutoencoderKL.from_pretrained("stabilityai/sdxl-vae",repo_type='model',safety_checker=None, torch_dtype=torch.float32),
135
  # vae=AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16",repo_type='model',safety_checker=None),
 
231
  seed = random.randint(0, MAX_SEED)
232
  return seed
233
 
234
+ def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
235
  filename= f'tst_A_{timestamp}.txt'
236
  with open(filename, "w") as f:
237
  f.write(f"Realvis 5.0 (Tester A) \n")
 
239
  f.write(f"Prompt: {prompt} \n")
240
  f.write(f"Steps: {num_inference_steps} \n")
241
  f.write(f"Guidance Scale: {guidance_scale} \n")
 
242
  f.write(f"SPACE SETUP: \n")
243
  f.write(f"Use Model Dtype: no \n")
244
  f.write(f"Model Scheduler: Euler_a all_custom before cuda \n")
 
261
  guidance_scale: float = 4,
262
  num_inference_steps: int = 125,
263
  use_resolution_binning: bool = True,
 
264
  lora_scale: float = 0.5,
265
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
266
  ):
 
273
  "prompt": [prompt],
274
  "negative_prompt": [negative_prompt],
275
  "negative_prompt_2": [neg_prompt_2],
 
276
  "width": width,
277
  "height": height,
278
  "guidance_scale": guidance_scale,
 
286
  images = []
287
  pipe.scheduler.set_timesteps(num_inference_steps,device)
288
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
289
+ uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
290
  batch_options = options.copy()
291
  rv_image = pipe(**batch_options).images[0]
292
  sd_image_path = f"rv50_A_{timestamp}.png"
 
317
  guidance_scale: float = 4,
318
  num_inference_steps: int = 250,
319
  use_resolution_binning: bool = True,
 
320
  lora_scale: float = 0.5,
321
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
322
  ):
 
329
  "prompt": [prompt],
330
  "negative_prompt": [negative_prompt],
331
  "negative_prompt_2": [neg_prompt_2],
 
332
  "width": width,
333
  "height": height,
334
  "guidance_scale": guidance_scale,
 
342
  images = []
343
  pipe.scheduler.set_timesteps(num_inference_steps,device)
344
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
345
+ uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
346
  batch_options = options.copy()
347
  rv_image = pipe(**batch_options).images[0]
348
  sd_image_path = f"rv50_A_{timestamp}.png"
 
373
  guidance_scale: float = 4,
374
  num_inference_steps: int = 250,
375
  use_resolution_binning: bool = True,
 
376
  lora_scale: float = 0.5,
377
  progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
378
  ):
 
385
  "prompt": [prompt],
386
  "negative_prompt": [negative_prompt],
387
  "negative_prompt_2": [neg_prompt_2],
 
388
  "width": width,
389
  "height": height,
390
  "guidance_scale": guidance_scale,
 
398
  images = []
399
  pipe.scheduler.set_timesteps(num_inference_steps,device)
400
  timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
401
+ uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
402
  batch_options = options.copy()
403
  rv_image = pipe(**batch_options).images[0]
404
  sd_image_path = f"rv50_A_{timestamp}.png"
 
486
  value="('deformed', 'distorted', 'disfigured':1.3),'not photorealistic':1.5, 'poorly drawn', 'bad anatomy', 'wrong anatomy', 'extra limb', 'missing limb', 'floating limbs', 'poorly drawn hands', 'poorly drawn feet', 'poorly drawn face':1.3, 'out of frame', 'extra limbs', 'bad anatomy', 'bad art', 'beginner', 'distorted face','amateur'",
487
  visible=True,
488
  )
 
 
 
 
 
 
 
489
  lora_scale = gr.Slider(
490
  label="LORA Scale (Skin)",
491
  minimum=0.0,
 
553
  height,
554
  guidance_scale,
555
  num_inference_steps,
 
556
  lora_scale,
557
  ],
558
  outputs=[result],
 
574
  height,
575
  guidance_scale,
576
  num_inference_steps,
 
577
  lora_scale,
578
  ],
579
  outputs=[result],
 
595
  height,
596
  guidance_scale,
597
  num_inference_steps,
 
598
  lora_scale,
599
  ],
600
  outputs=[result],