Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -19,6 +19,7 @@ from safetensors.torch import load_file
|
|
19 |
from transformers import CLIPModel, CLIPProcessor, CLIPTextModel, CLIPTokenizer, CLIPConfig, T5EncoderModel, T5Tokenizer
|
20 |
import gc
|
21 |
from gradio_client import Client
|
|
|
22 |
|
23 |
cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
|
24 |
os.environ["TRANSFORMERS_CACHE"] = cache_path
|
@@ -27,8 +28,10 @@ os.environ["HF_HOME"] = cache_path
|
|
27 |
|
28 |
|
29 |
torch.backends.cuda.matmul.allow_tf32 = True
|
|
|
|
|
30 |
|
31 |
-
pipe =
|
32 |
pipe.to(device="cuda", dtype=torch.bfloat16)
|
33 |
|
34 |
model_id = ("zer0int/LongCLIP-GmP-ViT-L-14")
|
@@ -41,6 +44,8 @@ pipe.text_encoder = clip_model.text_model
|
|
41 |
pipe.tokenizer_max_length = 248
|
42 |
pipe.text_encoder.dtype = torch.bfloat16
|
43 |
|
|
|
|
|
44 |
|
45 |
# Load LoRAs from JSON file
|
46 |
with open('loras.json', 'r') as f:
|
@@ -100,6 +105,7 @@ def generate_image(prompt, trigger_word, steps, seed, cfg_scale, width, height,
|
|
100 |
width=width,
|
101 |
height=height,
|
102 |
generator=generator,
|
|
|
103 |
joint_attention_kwargs={"scale": lora_scale},
|
104 |
).images[0]
|
105 |
return image
|
|
|
19 |
from transformers import CLIPModel, CLIPProcessor, CLIPTextModel, CLIPTokenizer, CLIPConfig, T5EncoderModel, T5Tokenizer
|
20 |
import gc
|
21 |
from gradio_client import Client
|
22 |
+
from live_preview_helpers import calculate_shift, retrieve_timesteps, flux_pipe_call_that_returns_an_iterable_of_images
|
23 |
|
24 |
cache_path = path.join(path.dirname(path.abspath(__file__)), "models")
|
25 |
os.environ["TRANSFORMERS_CACHE"] = cache_path
|
|
|
28 |
|
29 |
|
30 |
torch.backends.cuda.matmul.allow_tf32 = True
|
31 |
+
taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
32 |
+
good_vae = AutoencoderKL.from_pretrained("ostris/Flex.1-alpha", subfolder="vae", torch_dtype=dtype).to(device)
|
33 |
|
34 |
+
pipe = DiffusionPipeline.from_pretrained("AlekseyCalvin/FlexAlpha_Scaled_Soonr", vae=taef1, ignore_mismatched_sizes=True, torch_dtype=torch.bfloat16)
|
35 |
pipe.to(device="cuda", dtype=torch.bfloat16)
|
36 |
|
37 |
model_id = ("zer0int/LongCLIP-GmP-ViT-L-14")
|
|
|
44 |
pipe.tokenizer_max_length = 248
|
45 |
pipe.text_encoder.dtype = torch.bfloat16
|
46 |
|
47 |
+
pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
|
48 |
+
|
49 |
|
50 |
# Load LoRAs from JSON file
|
51 |
with open('loras.json', 'r') as f:
|
|
|
105 |
width=width,
|
106 |
height=height,
|
107 |
generator=generator,
|
108 |
+
good_vae=good_vae,
|
109 |
joint_attention_kwargs={"scale": lora_scale},
|
110 |
).images[0]
|
111 |
return image
|