Update app.py
Browse files
app.py
CHANGED
|
@@ -128,7 +128,6 @@ def load_and_prepare_model():
|
|
| 128 |
#torch_dtype=torch.bfloat16,
|
| 129 |
add_watermarker=False,
|
| 130 |
token=True,
|
| 131 |
-
add_watermarker=False,
|
| 132 |
text_encoder=None,
|
| 133 |
text_encoder_2=None,
|
| 134 |
tokenizer=None,
|
|
@@ -251,6 +250,9 @@ def generate_30(
|
|
| 251 |
#pipe.set_adapters(["skin"], adapter_weights=[lora_scale])
|
| 252 |
seed = int(randomize_seed_fn())
|
| 253 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
|
|
|
|
|
|
|
|
|
| 254 |
#prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
| 255 |
options = {
|
| 256 |
"prompt": [prompt],
|
|
@@ -306,6 +308,9 @@ def generate_60(
|
|
| 306 |
#pipe.set_adapters(["skin"], adapter_weights=[lora_scale])
|
| 307 |
seed = int(randomize_seed_fn())
|
| 308 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
|
|
|
|
|
|
|
|
|
| 309 |
options = {
|
| 310 |
"prompt": [prompt],
|
| 311 |
"negative_prompt": [negative_prompt],
|
|
@@ -358,6 +363,9 @@ def generate_90(
|
|
| 358 |
#pipe.set_adapters(["skin"], adapter_weights=[lora_scale])
|
| 359 |
seed = int(randomize_seed_fn())
|
| 360 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
|
|
|
|
|
|
|
|
|
| 361 |
options = {
|
| 362 |
"prompt": [prompt],
|
| 363 |
"negative_prompt": [negative_prompt],
|
|
|
|
| 128 |
#torch_dtype=torch.bfloat16,
|
| 129 |
add_watermarker=False,
|
| 130 |
token=True,
|
|
|
|
| 131 |
text_encoder=None,
|
| 132 |
text_encoder_2=None,
|
| 133 |
tokenizer=None,
|
|
|
|
| 250 |
#pipe.set_adapters(["skin"], adapter_weights=[lora_scale])
|
| 251 |
seed = int(randomize_seed_fn())
|
| 252 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 253 |
+
|
| 254 |
+
pipe.text_encoder=text_encoder.to(device=device, dtype=torch.bfloat16)
|
| 255 |
+
pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
|
| 256 |
#prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
| 257 |
options = {
|
| 258 |
"prompt": [prompt],
|
|
|
|
| 308 |
#pipe.set_adapters(["skin"], adapter_weights=[lora_scale])
|
| 309 |
seed = int(randomize_seed_fn())
|
| 310 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 311 |
+
|
| 312 |
+
pipe.text_encoder=text_encoder.to(device=device, dtype=torch.bfloat16)
|
| 313 |
+
pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
|
| 314 |
options = {
|
| 315 |
"prompt": [prompt],
|
| 316 |
"negative_prompt": [negative_prompt],
|
|
|
|
| 363 |
#pipe.set_adapters(["skin"], adapter_weights=[lora_scale])
|
| 364 |
seed = int(randomize_seed_fn())
|
| 365 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
| 366 |
+
|
| 367 |
+
pipe.text_encoder=text_encoder.to(device=device, dtype=torch.bfloat16)
|
| 368 |
+
pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
|
| 369 |
options = {
|
| 370 |
"prompt": [prompt],
|
| 371 |
"negative_prompt": [negative_prompt],
|