Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -6,6 +6,12 @@
|
|
6 |
# copies of the Software, and to permit persons to whom the Software is
|
7 |
import spaces
|
8 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
import random
|
10 |
import uuid
|
11 |
import gradio as gr
|
@@ -29,12 +35,10 @@ torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
|
29 |
torch.backends.cudnn.allow_tf32 = False
|
30 |
torch.backends.cudnn.deterministic = False
|
31 |
torch.backends.cudnn.benchmark = False
|
32 |
-
|
33 |
-
|
34 |
|
35 |
torch.set_float32_matmul_precision("highest")
|
36 |
-
os.putenv("HF_HUB_ENABLE_HF_TRANSFER","1")
|
37 |
-
os.environ["SAFETENSORS_FAST_GPU"] = "1"
|
38 |
|
39 |
FTP_HOST = "1ink.us"
|
40 |
FTP_USER = "ford442"
|
@@ -249,8 +253,8 @@ def generate_30(
|
|
249 |
#torch.set_default_device('cuda')
|
250 |
#pipe.set_adapters(["skin"], adapter_weights=[lora_scale])
|
251 |
seed = int(randomize_seed_fn())
|
252 |
-
generator = torch.Generator(device='
|
253 |
-
|
254 |
pipe.text_encoder=text_encoder.to(device=device, dtype=torch.bfloat16)
|
255 |
pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
|
256 |
#prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
@@ -307,8 +311,8 @@ def generate_60(
|
|
307 |
#torch.set_default_device('cuda')
|
308 |
#pipe.set_adapters(["skin"], adapter_weights=[lora_scale])
|
309 |
seed = int(randomize_seed_fn())
|
310 |
-
generator = torch.Generator(device='
|
311 |
-
|
312 |
pipe.text_encoder=text_encoder.to(device=device, dtype=torch.bfloat16)
|
313 |
pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
|
314 |
options = {
|
@@ -362,8 +366,8 @@ def generate_90(
|
|
362 |
#torch.set_default_device('cuda')
|
363 |
#pipe.set_adapters(["skin"], adapter_weights=[lora_scale])
|
364 |
seed = int(randomize_seed_fn())
|
365 |
-
generator = torch.Generator(device='
|
366 |
-
|
367 |
pipe.text_encoder=text_encoder.to(device=device, dtype=torch.bfloat16)
|
368 |
pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
|
369 |
options = {
|
|
|
6 |
# copies of the Software, and to permit persons to whom the Software is
|
7 |
import spaces
|
8 |
import os
|
9 |
+
os.putenv('TORCH_LINALG_PREFER_CUSOLVER','1')
|
10 |
+
os.putenv('PYTORCH_CUDA_ALLOC_CONF','max_split_size_mb:128')
|
11 |
+
os.environ["SAFETENSORS_FAST_GPU"] = "1"
|
12 |
+
os.putenv('HF_HUB_ENABLE_HF_TRANSFER','1')
|
13 |
+
|
14 |
+
|
15 |
import random
|
16 |
import uuid
|
17 |
import gradio as gr
|
|
|
35 |
torch.backends.cudnn.allow_tf32 = False
|
36 |
torch.backends.cudnn.deterministic = False
|
37 |
torch.backends.cudnn.benchmark = False
|
38 |
+
torch.backends.cuda.preferred_blas_library="cublas"
|
39 |
+
torch.backends.cuda.preferred_linalg_library="cusolver"
|
40 |
|
41 |
torch.set_float32_matmul_precision("highest")
|
|
|
|
|
42 |
|
43 |
FTP_HOST = "1ink.us"
|
44 |
FTP_USER = "ford442"
|
|
|
253 |
#torch.set_default_device('cuda')
|
254 |
#pipe.set_adapters(["skin"], adapter_weights=[lora_scale])
|
255 |
seed = int(randomize_seed_fn())
|
256 |
+
generator = torch.Generator(device='cpu').manual_seed(seed)
|
257 |
+
torch.set_float32_matmul_precision("highest")
|
258 |
pipe.text_encoder=text_encoder.to(device=device, dtype=torch.bfloat16)
|
259 |
pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
|
260 |
#prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
|
|
311 |
#torch.set_default_device('cuda')
|
312 |
#pipe.set_adapters(["skin"], adapter_weights=[lora_scale])
|
313 |
seed = int(randomize_seed_fn())
|
314 |
+
generator = torch.Generator(device='cpu').manual_seed(seed)
|
315 |
+
torch.set_float32_matmul_precision("highest")
|
316 |
pipe.text_encoder=text_encoder.to(device=device, dtype=torch.bfloat16)
|
317 |
pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
|
318 |
options = {
|
|
|
366 |
#torch.set_default_device('cuda')
|
367 |
#pipe.set_adapters(["skin"], adapter_weights=[lora_scale])
|
368 |
seed = int(randomize_seed_fn())
|
369 |
+
generator = torch.Generator(device='cpu').manual_seed(seed)
|
370 |
+
torch.set_float32_matmul_precision("highest")
|
371 |
pipe.text_encoder=text_encoder.to(device=device, dtype=torch.bfloat16)
|
372 |
pipe.text_encoder_2=text_encoder_2.to(device=device, dtype=torch.bfloat16)
|
373 |
options = {
|