Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -18,17 +18,19 @@ from typing import Tuple
|
|
| 18 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 19 |
import paramiko
|
| 20 |
|
| 21 |
-
os.system("chmod +x ./
|
| 22 |
-
os.system("./
|
|
|
|
|
|
|
| 23 |
|
| 24 |
-
torch.backends.cuda.matmul.allow_tf32 =
|
| 25 |
-
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction =
|
| 26 |
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
| 27 |
-
torch.backends.cudnn.allow_tf32 =
|
| 28 |
torch.backends.cudnn.deterministic = False
|
| 29 |
-
torch.backends.cudnn.benchmark =
|
| 30 |
torch.backends.cuda.preferred_blas_library="cublas"
|
| 31 |
-
torch.backends.cuda.preferred_linalg_library="
|
| 32 |
|
| 33 |
torch.set_float32_matmul_precision("highest")
|
| 34 |
|
|
@@ -123,7 +125,7 @@ def load_and_prepare_model(model_id):
|
|
| 123 |
# Preload and compile both models
|
| 124 |
models = {key: load_and_prepare_model(value) for key, value in MODEL_OPTIONS.items()}
|
| 125 |
|
| 126 |
-
MAX_SEED = np.iinfo(np.
|
| 127 |
|
| 128 |
def upload_to_ftp(filename):
|
| 129 |
try:
|
|
@@ -168,7 +170,7 @@ def generate(
|
|
| 168 |
global models
|
| 169 |
pipe = models[model_choice]
|
| 170 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
| 171 |
-
generator = torch.Generator(device=
|
| 172 |
|
| 173 |
prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
| 174 |
|
|
@@ -220,7 +222,7 @@ def generate_cpu(
|
|
| 220 |
pipe.to("cpu")
|
| 221 |
|
| 222 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
| 223 |
-
generator = torch.Generator(device=
|
| 224 |
|
| 225 |
prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
| 226 |
|
|
|
|
| 18 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 19 |
import paramiko
|
| 20 |
|
| 21 |
+
os.system("chmod +x ./cusparselt.sh")
|
| 22 |
+
os.system("./cusparselt.sh")
|
| 23 |
+
os.system("chmod +x ./cudnn.sh")
|
| 24 |
+
os.system("./cudnn.sh")
|
| 25 |
|
| 26 |
+
torch.backends.cuda.matmul.allow_tf32 = False
|
| 27 |
+
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
| 28 |
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
| 29 |
+
torch.backends.cudnn.allow_tf32 = False
|
| 30 |
torch.backends.cudnn.deterministic = False
|
| 31 |
+
torch.backends.cudnn.benchmark = False
|
| 32 |
torch.backends.cuda.preferred_blas_library="cublas"
|
| 33 |
+
torch.backends.cuda.preferred_linalg_library="cusolver"
|
| 34 |
|
| 35 |
torch.set_float32_matmul_precision("highest")
|
| 36 |
|
|
|
|
| 125 |
# Preload and compile both models
|
| 126 |
models = {key: load_and_prepare_model(value) for key, value in MODEL_OPTIONS.items()}
|
| 127 |
|
| 128 |
+
MAX_SEED = np.iinfo(np.int64).max
|
| 129 |
|
| 130 |
def upload_to_ftp(filename):
|
| 131 |
try:
|
|
|
|
| 170 |
global models
|
| 171 |
pipe = models[model_choice]
|
| 172 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
| 173 |
+
generator = torch.Generator(device='cpu').manual_seed(seed)
|
| 174 |
|
| 175 |
prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
| 176 |
|
|
|
|
| 222 |
pipe.to("cpu")
|
| 223 |
|
| 224 |
seed = int(randomize_seed_fn(seed, randomize_seed))
|
| 225 |
+
generator = torch.Generator(device='cpu').manual_seed(seed)
|
| 226 |
|
| 227 |
prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
| 228 |
|