Update app.py
Browse files
app.py
CHANGED
|
@@ -113,7 +113,7 @@ def load_and_prepare_model(model_id):
|
|
| 113 |
pipeX = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V5.0")
|
| 114 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
| 115 |
model_id,
|
| 116 |
-
|
| 117 |
add_watermarker=False,
|
| 118 |
use_safetensors=True,
|
| 119 |
# vae=AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",repo_type='model',safety_checker=None),
|
|
@@ -126,9 +126,9 @@ def load_and_prepare_model(model_id):
|
|
| 126 |
|
| 127 |
pipe.unet = pipeX.unet
|
| 128 |
#pipe.to(dtype=torch.bfloat16)
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
pipe.to(device, torch.bfloat16)
|
| 132 |
del pipeX
|
| 133 |
#sched = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", algorithm_type="dpmsolver++")
|
| 134 |
#sched = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, beta_schedule="linear", algorithm_type="dpmsolver++")
|
|
@@ -209,12 +209,11 @@ def generate_30(
|
|
| 209 |
f.write(f"Steps: {num_inference_steps} \n")
|
| 210 |
f.write(f"Guidance Scale: {guidance_scale} \n")
|
| 211 |
f.write(f"SPACE SETUP: \n")
|
| 212 |
-
f.write(f"Use Safetensors:
|
| 213 |
-
f.write(f"Use Model Dtype:
|
| 214 |
f.write(f"Model Scheduler: Euler_a custom \n")
|
| 215 |
f.write(f"Model VAE: stabilityai/sdxl-vae no dtype \n")
|
| 216 |
f.write(f"Model UNET: stabilityai no dtype \n")
|
| 217 |
-
f.write(f"Model to cuda/dtype \n")
|
| 218 |
upload_to_ftp(filename)
|
| 219 |
for i in range(0, num_images, BATCH_SIZE):
|
| 220 |
batch_options = options.copy()
|
|
@@ -276,12 +275,11 @@ def generate_60(
|
|
| 276 |
f.write(f"Steps: {num_inference_steps} \n")
|
| 277 |
f.write(f"Guidance Scale: {guidance_scale} \n")
|
| 278 |
f.write(f"SPACE SETUP: \n")
|
| 279 |
-
f.write(f"Use Safetensors:
|
| 280 |
-
f.write(f"Use Model Dtype:
|
| 281 |
f.write(f"Model Scheduler: Euler_a custom \n")
|
| 282 |
f.write(f"Model VAE: stabilityai/sdxl-vae no dtype \n")
|
| 283 |
f.write(f"Model UNET: stabilityai no dtype \n")
|
| 284 |
-
f.write(f"Model to cuda/dtype \n")
|
| 285 |
upload_to_ftp(filename)
|
| 286 |
for i in range(0, num_images, BATCH_SIZE):
|
| 287 |
batch_options = options.copy()
|
|
@@ -343,12 +341,11 @@ def generate_90(
|
|
| 343 |
f.write(f"Steps: {num_inference_steps} \n")
|
| 344 |
f.write(f"Guidance Scale: {guidance_scale} \n")
|
| 345 |
f.write(f"SPACE SETUP: \n")
|
| 346 |
-
f.write(f"Use Safetensors:
|
| 347 |
-
f.write(f"Use Model Dtype:
|
| 348 |
f.write(f"Model Scheduler: Euler_a custom \n")
|
| 349 |
f.write(f"Model VAE: stabilityai/sdxl-vae no dtype \n")
|
| 350 |
f.write(f"Model UNET: stabilityai no dtype \n")
|
| 351 |
-
f.write(f"Model to cuda/dtype \n")
|
| 352 |
upload_to_ftp(filename)
|
| 353 |
for i in range(0, num_images, BATCH_SIZE):
|
| 354 |
batch_options = options.copy()
|
|
|
|
| 113 |
pipeX = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V5.0")
|
| 114 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
| 115 |
model_id,
|
| 116 |
+
torch_dtype=torch.bfloat16,
|
| 117 |
add_watermarker=False,
|
| 118 |
use_safetensors=True,
|
| 119 |
# vae=AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",repo_type='model',safety_checker=None),
|
|
|
|
| 126 |
|
| 127 |
pipe.unet = pipeX.unet
|
| 128 |
#pipe.to(dtype=torch.bfloat16)
|
| 129 |
+
pipe.to(device)
|
| 130 |
+
pipe.to(torch.bfloat16)
|
| 131 |
+
#pipe.to(device, torch.bfloat16)
|
| 132 |
del pipeX
|
| 133 |
#sched = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", algorithm_type="dpmsolver++")
|
| 134 |
#sched = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, beta_schedule="linear", algorithm_type="dpmsolver++")
|
|
|
|
| 209 |
f.write(f"Steps: {num_inference_steps} \n")
|
| 210 |
f.write(f"Guidance Scale: {guidance_scale} \n")
|
| 211 |
f.write(f"SPACE SETUP: \n")
|
| 212 |
+
f.write(f"Use Safetensors: True \n")
|
| 213 |
+
f.write(f"Use Model Dtype: yes \n")
|
| 214 |
f.write(f"Model Scheduler: Euler_a custom \n")
|
| 215 |
f.write(f"Model VAE: stabilityai/sdxl-vae no dtype \n")
|
| 216 |
f.write(f"Model UNET: stabilityai no dtype \n")
|
|
|
|
| 217 |
upload_to_ftp(filename)
|
| 218 |
for i in range(0, num_images, BATCH_SIZE):
|
| 219 |
batch_options = options.copy()
|
|
|
|
| 275 |
f.write(f"Steps: {num_inference_steps} \n")
|
| 276 |
f.write(f"Guidance Scale: {guidance_scale} \n")
|
| 277 |
f.write(f"SPACE SETUP: \n")
|
| 278 |
+
f.write(f"Use Safetensors: True \n")
|
| 279 |
+
f.write(f"Use Model Dtype: yes \n")
|
| 280 |
f.write(f"Model Scheduler: Euler_a custom \n")
|
| 281 |
f.write(f"Model VAE: stabilityai/sdxl-vae no dtype \n")
|
| 282 |
f.write(f"Model UNET: stabilityai no dtype \n")
|
|
|
|
| 283 |
upload_to_ftp(filename)
|
| 284 |
for i in range(0, num_images, BATCH_SIZE):
|
| 285 |
batch_options = options.copy()
|
|
|
|
| 341 |
f.write(f"Steps: {num_inference_steps} \n")
|
| 342 |
f.write(f"Guidance Scale: {guidance_scale} \n")
|
| 343 |
f.write(f"SPACE SETUP: \n")
|
| 344 |
+
f.write(f"Use Safetensors: True \n")
|
| 345 |
+
f.write(f"Use Model Dtype: yes \n")
|
| 346 |
f.write(f"Model Scheduler: Euler_a custom \n")
|
| 347 |
f.write(f"Model VAE: stabilityai/sdxl-vae no dtype \n")
|
| 348 |
f.write(f"Model UNET: stabilityai no dtype \n")
|
|
|
|
| 349 |
upload_to_ftp(filename)
|
| 350 |
for i in range(0, num_images, BATCH_SIZE):
|
| 351 |
batch_options = options.copy()
|