AlekseyCalvin commited on
Commit
36805f3
·
verified ·
1 Parent(s): e00df9e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -6
app.py CHANGED
@@ -14,6 +14,8 @@ import time
14
  from huggingface_hub import hf_hub_download
15
  from diffusers import FluxTransformer2DModel, FluxPipeline
16
  from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL
 
 
17
  import safetensors.torch
18
  from safetensors.torch import load_file
19
  from transformers import CLIPModel, CLIPProcessor, CLIPTextModel, CLIPTokenizer, CLIPConfig, T5EncoderModel, T5Tokenizer
@@ -26,13 +28,20 @@ os.environ["TRANSFORMERS_CACHE"] = cache_path
26
  os.environ["HF_HUB_CACHE"] = cache_path
27
  os.environ["HF_HOME"] = cache_path
28
 
 
 
 
 
29
 
30
  torch.backends.cuda.matmul.allow_tf32 = True
31
- taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
32
- good_vae = AutoencoderKL.from_pretrained("ostris/Flex.1-alpha", subfolder="vae", torch_dtype=dtype).to(device)
33
 
34
- pipe = DiffusionPipeline.from_pretrained("AlekseyCalvin/FlexAlpha_Scaled_Soonr", vae=taef1, ignore_mismatched_sizes=True, torch_dtype=torch.bfloat16)
35
- pipe.to(device="cuda", dtype=torch.bfloat16)
 
 
 
36
 
37
  model_id = ("zer0int/LongCLIP-GmP-ViT-L-14")
38
  config = CLIPConfig.from_pretrained(model_id)
@@ -44,8 +53,7 @@ pipe.text_encoder = clip_model.text_model
44
  pipe.tokenizer_max_length = 248
45
  pipe.text_encoder.dtype = torch.bfloat16
46
 
47
- pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
48
-
49
 
50
  # Load LoRAs from JSON file
51
  with open('loras.json', 'r') as f:
 
14
  from huggingface_hub import hf_hub_download
15
  from diffusers import FluxTransformer2DModel, FluxPipeline
16
  from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL
17
+ from diffusers.models.transformers import FluxTransformer2DModel
18
+ from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
19
  import safetensors.torch
20
  from safetensors.torch import load_file
21
  from transformers import CLIPModel, CLIPProcessor, CLIPTextModel, CLIPTokenizer, CLIPConfig, T5EncoderModel, T5Tokenizer
 
28
  os.environ["HF_HUB_CACHE"] = cache_path
29
  os.environ["HF_HOME"] = cache_path
30
 
31
+ torch.set_float32_matmul_precision("medium")
32
+
33
+ dtype = torch.bfloat16
34
+ device = "cuda" if torch.cuda.is_available() else "cpu"
35
 
36
  torch.backends.cuda.matmul.allow_tf32 = True
37
+ taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=bfloat16).to(device)
38
+ good_vae = AutoencoderKL.from_pretrained("ostris/Flex.1-alpha", subfolder="vae", torch_dtype=bfloat16).to(device)
39
 
40
+ dtype = torch.bfloat16
41
+ base_model = "AlekseyCalvin/HSTcolor_FlexSoonr"
42
+ pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to("cuda")
43
+ #pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.float16).to("cuda")
44
+ torch.cuda.empty_cache()
45
 
46
  model_id = ("zer0int/LongCLIP-GmP-ViT-L-14")
47
  config = CLIPConfig.from_pretrained(model_id)
 
53
  pipe.tokenizer_max_length = 248
54
  pipe.text_encoder.dtype = torch.bfloat16
55
 
56
+ pipe.vae = AutoencoderKL.from_pretrained("ostris/Flex.1-alpha", subfolder="vae", torch_dtype=dtype).to(device)
 
57
 
58
  # Load LoRAs from JSON file
59
  with open('loras.json', 'r') as f: