ford442 commited on
Commit
6311068
Β·
1 Parent(s): c5e56cd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -12
app.py CHANGED
@@ -18,10 +18,10 @@ from typing import Tuple
18
  from transformers import AutoTokenizer, AutoModelForCausalLM
19
  import paramiko
20
 
21
- os.system("chmod +x ./cusparselt.sh")
22
- os.system("./cusparselt.sh")
23
- os.system("chmod +x ./cudnn.sh")
24
- os.system("./cudnn.sh")
25
 
26
  torch.backends.cuda.matmul.allow_tf32 = False
27
  torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
@@ -109,14 +109,12 @@ def load_and_prepare_model(model_id):
109
  "ford442/RealVisXL_V5.0_BF16": torch.bfloat16,
110
  }
111
  dtype = model_dtypes.get(model_id, torch.bfloat16) # Default to float32 if not found
112
- vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", torch_dtype=torch.bfloat16,safety_checker=None).to('cuda')
113
  pipe = StableDiffusionXLPipeline.from_pretrained(
114
  model_id,
115
  torch_dtype=torch.bfloat16,
116
- use_safetensors=True,
117
  add_watermarker=False,
118
  vae=vae,
119
- safety_checker=None,
120
  )
121
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
122
  pipe.to(torch.bfloat16)
@@ -161,8 +159,8 @@ def generate(
161
  seed: int = 1,
162
  width: int = 768,
163
  height: int = 768,
164
- guidance_scale: float = 5,
165
- num_inference_steps: int = 300,
166
  randomize_seed: bool = False,
167
  use_resolution_binning: bool = True,
168
  num_images: int = 1,
@@ -212,7 +210,7 @@ def generate_cpu(
212
  width: int = 768,
213
  height: int = 768,
214
  guidance_scale: float = 5,
215
- num_inference_steps: int = 225,
216
  randomize_seed: bool = False,
217
  use_resolution_binning: bool = True,
218
  num_images: int = 1,
@@ -359,14 +357,14 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
359
  minimum=0.1,
360
  maximum=6,
361
  step=0.1,
362
- value=5.0,
363
  )
364
  num_inference_steps = gr.Slider(
365
  label="Number of inference steps",
366
  minimum=10,
367
  maximum=1000,
368
  step=10,
369
- value=300,
370
  )
371
 
372
  gr.Examples(
 
18
  from transformers import AutoTokenizer, AutoModelForCausalLM
19
  import paramiko
20
 
21
+ #os.system("chmod +x ./cusparselt.sh")
22
+ #os.system("./cusparselt.sh")
23
+ #os.system("chmod +x ./cudnn.sh")
24
+ #os.system("./cudnn.sh")
25
 
26
  torch.backends.cuda.matmul.allow_tf32 = False
27
  torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
 
109
  "ford442/RealVisXL_V5.0_BF16": torch.bfloat16,
110
  }
111
  dtype = model_dtypes.get(model_id, torch.bfloat16) # Default to float32 if not found
112
+ vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", torch_dtype=torch.bfloat16,safety_checker=None).to(torch.bfloat16).to('cuda')
113
  pipe = StableDiffusionXLPipeline.from_pretrained(
114
  model_id,
115
  torch_dtype=torch.bfloat16,
 
116
  add_watermarker=False,
117
  vae=vae,
 
118
  )
119
  pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
120
  pipe.to(torch.bfloat16)
 
159
  seed: int = 1,
160
  width: int = 768,
161
  height: int = 768,
162
+ guidance_scale: float = 4.2,
163
+ num_inference_steps: int = 250,
164
  randomize_seed: bool = False,
165
  use_resolution_binning: bool = True,
166
  num_images: int = 1,
 
210
  width: int = 768,
211
  height: int = 768,
212
  guidance_scale: float = 5,
213
+ num_inference_steps: int = 250,
214
  randomize_seed: bool = False,
215
  use_resolution_binning: bool = True,
216
  num_images: int = 1,
 
357
  minimum=0.1,
358
  maximum=6,
359
  step=0.1,
360
+ value=4.2,
361
  )
362
  num_inference_steps = gr.Slider(
363
  label="Number of inference steps",
364
  minimum=10,
365
  maximum=1000,
366
  step=10,
367
+ value=250,
368
  )
369
 
370
  gr.Examples(