Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import spaces
|
|
|
2 |
import gradio as gr
|
3 |
import torch
|
4 |
from PIL import Image
|
@@ -39,7 +40,19 @@ pipe_dev.to("cuda")
|
|
39 |
|
40 |
# Flux.1-krea
|
41 |
dtype = torch.bfloat16
|
42 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
44 |
good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-Krea-dev", subfolder="vae", torch_dtype=dtype).to(device)
|
45 |
pipe_krea = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-Krea-dev", torch_dtype=dtype, vae=taef1).to(device)
|
|
|
1 |
import spaces
|
2 |
+
import os
|
3 |
import gradio as gr
|
4 |
import torch
|
5 |
from PIL import Image
|
|
|
40 |
|
41 |
# Flux.1-krea
|
42 |
dtype = torch.bfloat16
|
43 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
44 |
+
|
45 |
+
# ---- CUDA Check ----
|
46 |
+
print("CUDA_VISIBLE_DEVICES=", os.environ.get("CUDA_VISIBLE_DEVICES"))
|
47 |
+
print("torch.__version__ =", torch.__version__)
|
48 |
+
print("torch.version.cuda =", torch.version.cuda)
|
49 |
+
print("cuda available:", torch.cuda.is_available())
|
50 |
+
print("cuda device count:", torch.cuda.device_count())
|
51 |
+
if torch.cuda.is_available():
|
52 |
+
print("current device:", torch.cuda.current_device())
|
53 |
+
print("device name:", torch.cuda.get_device_name(torch.cuda.current_device()))
|
54 |
+
|
55 |
+
# --- Model Loading ---
|
56 |
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
57 |
good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-Krea-dev", subfolder="vae", torch_dtype=dtype).to(device)
|
58 |
pipe_krea = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-Krea-dev", torch_dtype=dtype, vae=taef1).to(device)
|