Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -113,7 +113,7 @@ def load_and_prepare_model(model_id):
|
|
113 |
pipeX = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V5.0",use_safetensors=True)
|
114 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
115 |
model_id,
|
116 |
-
|
117 |
# add_watermarker=False,
|
118 |
use_safetensors=True,
|
119 |
# vae=AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",repo_type='model',safety_checker=None),
|
@@ -217,7 +217,7 @@ def generate_30(
|
|
217 |
f.write(f"Guidance Scale: {guidance_scale} \n")
|
218 |
f.write(f"SPACE SETUP: \n")
|
219 |
f.write(f"Use Safetensors: True \n")
|
220 |
-
f.write(f"Use Model Dtype:
|
221 |
f.write(f"Model Scheduler: Euler_a custom before cuda \n")
|
222 |
f.write(f"Model VAE: stabilityai/sdxl-vae before cuda to bfloat after cuda \n")
|
223 |
f.write(f"Model UNET: stabilityai before cuda to bfloat after cuda \n")
|
@@ -282,11 +282,11 @@ def generate_60(
|
|
282 |
f.write(f"Steps: {num_inference_steps} \n")
|
283 |
f.write(f"Guidance Scale: {guidance_scale} \n")
|
284 |
f.write(f"SPACE SETUP: \n")
|
285 |
-
f.write(f"Use Safetensors:
|
286 |
f.write(f"Use Model Dtype: no \n")
|
287 |
-
f.write(f"Model Scheduler: Euler_a custom \n")
|
288 |
-
f.write(f"Model VAE: stabilityai/sdxl-vae
|
289 |
-
f.write(f"Model UNET: stabilityai after cuda \n")
|
290 |
upload_to_ftp(filename)
|
291 |
for i in range(0, num_images, BATCH_SIZE):
|
292 |
batch_options = options.copy()
|
@@ -348,11 +348,11 @@ def generate_90(
|
|
348 |
f.write(f"Steps: {num_inference_steps} \n")
|
349 |
f.write(f"Guidance Scale: {guidance_scale} \n")
|
350 |
f.write(f"SPACE SETUP: \n")
|
351 |
-
f.write(f"Use Safetensors:
|
352 |
f.write(f"Use Model Dtype: no \n")
|
353 |
-
f.write(f"Model Scheduler: Euler_a custom \n")
|
354 |
-
f.write(f"Model VAE: stabilityai/sdxl-vae
|
355 |
-
f.write(f"Model UNET: stabilityai after cuda \n")
|
356 |
upload_to_ftp(filename)
|
357 |
for i in range(0, num_images, BATCH_SIZE):
|
358 |
batch_options = options.copy()
|
|
|
113 |
pipeX = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V5.0",use_safetensors=True)
|
114 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
115 |
model_id,
|
116 |
+
# torch_dtype=torch.bfloat16,
|
117 |
# add_watermarker=False,
|
118 |
use_safetensors=True,
|
119 |
# vae=AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",repo_type='model',safety_checker=None),
|
|
|
217 |
f.write(f"Guidance Scale: {guidance_scale} \n")
|
218 |
f.write(f"SPACE SETUP: \n")
|
219 |
f.write(f"Use Safetensors: True \n")
|
220 |
+
f.write(f"Use Model Dtype: no \n")
|
221 |
f.write(f"Model Scheduler: Euler_a custom before cuda \n")
|
222 |
f.write(f"Model VAE: stabilityai/sdxl-vae before cuda to bfloat after cuda \n")
|
223 |
f.write(f"Model UNET: stabilityai before cuda to bfloat after cuda \n")
|
|
|
282 |
f.write(f"Steps: {num_inference_steps} \n")
|
283 |
f.write(f"Guidance Scale: {guidance_scale} \n")
|
284 |
f.write(f"SPACE SETUP: \n")
|
285 |
+
f.write(f"Use Safetensors: True \n")
|
286 |
f.write(f"Use Model Dtype: no \n")
|
287 |
+
f.write(f"Model Scheduler: Euler_a custom before cuda \n")
|
288 |
+
f.write(f"Model VAE: stabilityai/sdxl-vae before cuda to bfloat after cuda \n")
|
289 |
+
f.write(f"Model UNET: stabilityai before cuda to bfloat after cuda \n")
|
290 |
upload_to_ftp(filename)
|
291 |
for i in range(0, num_images, BATCH_SIZE):
|
292 |
batch_options = options.copy()
|
|
|
348 |
f.write(f"Steps: {num_inference_steps} \n")
|
349 |
f.write(f"Guidance Scale: {guidance_scale} \n")
|
350 |
f.write(f"SPACE SETUP: \n")
|
351 |
+
f.write(f"Use Safetensors: True \n")
|
352 |
f.write(f"Use Model Dtype: no \n")
|
353 |
+
f.write(f"Model Scheduler: Euler_a custom before cuda \n")
|
354 |
+
f.write(f"Model VAE: stabilityai/sdxl-vae before cuda to bfloat after cuda \n")
|
355 |
+
f.write(f"Model UNET: stabilityai before cuda to bfloat after cuda \n")
|
356 |
upload_to_ftp(filename)
|
357 |
for i in range(0, num_images, BATCH_SIZE):
|
358 |
batch_options = options.copy()
|