Spaces:
Running
on
Zero
Running
on
Zero
Update app2.py
Browse files
app2.py
CHANGED
@@ -5,8 +5,12 @@ import logging
|
|
5 |
import torch
|
6 |
from PIL import Image
|
7 |
from os import path
|
|
|
|
|
|
|
|
|
8 |
import spaces
|
9 |
-
from diffusers import DiffusionPipeline, AutoencoderTiny
|
10 |
from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
|
11 |
from transformers import CLIPModel, CLIPProcessor, CLIPTextModel, CLIPTokenizer, CLIPConfig, T5EncoderModel, T5Tokenizer
|
12 |
from diffusers.models.transformers import FluxTransformer2DModel
|
@@ -14,6 +18,7 @@ import copy
|
|
14 |
import random
|
15 |
import time
|
16 |
import safetensors.torch
|
|
|
17 |
from safetensors.torch import load_file
|
18 |
from huggingface_hub import HfFileSystem, ModelCard
|
19 |
from huggingface_hub import login, hf_hub_download
|
@@ -34,7 +39,7 @@ with open('loras.json', 'r') as f:
|
|
34 |
# Initialize the base model
|
35 |
dtype = torch.bfloat16
|
36 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
37 |
-
base_model = "AlekseyCalvin/
|
38 |
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to("cuda")
|
39 |
#pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.float16).to("cuda")
|
40 |
torch.cuda.empty_cache()
|
@@ -50,12 +55,13 @@ if clipmodel == "norm":
|
|
50 |
maxtokens = 77
|
51 |
clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True).to("cuda")
|
52 |
clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=maxtokens, ignore_mismatched_sizes=True, return_tensors="pt", truncation=True)
|
|
|
53 |
|
54 |
pipe.tokenizer = clip_processor.tokenizer
|
55 |
pipe.text_encoder = clip_model.text_model
|
56 |
pipe.tokenizer_max_length = maxtokens
|
57 |
pipe.text_encoder.dtype = torch.bfloat16
|
58 |
-
|
59 |
|
60 |
MAX_SEED = 2**32-1
|
61 |
|
|
|
5 |
import torch
|
6 |
from PIL import Image
|
7 |
from os import path
|
8 |
+
from torchvision import transforms
|
9 |
+
from dataclasses import dataclass
|
10 |
+
import math
|
11 |
+
from typing import Callable
|
12 |
import spaces
|
13 |
+
from diffusers import DiffusionPipeline, AutoencoderTiny, AutoencoderKL
|
14 |
from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
|
15 |
from transformers import CLIPModel, CLIPProcessor, CLIPTextModel, CLIPTokenizer, CLIPConfig, T5EncoderModel, T5Tokenizer
|
16 |
from diffusers.models.transformers import FluxTransformer2DModel
|
|
|
18 |
import random
|
19 |
import time
|
20 |
import safetensors.torch
|
21 |
+
from tqdm import tqdm
|
22 |
from safetensors.torch import load_file
|
23 |
from huggingface_hub import HfFileSystem, ModelCard
|
24 |
from huggingface_hub import login, hf_hub_download
|
|
|
39 |
# Initialize the base model
|
40 |
dtype = torch.bfloat16
|
41 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
42 |
+
base_model = "AlekseyCalvin/Artsy_Lite_Flux_v1_by_jurdn_Diffusers"
|
43 |
pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=dtype).to("cuda")
|
44 |
#pipe.vae = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.float16).to("cuda")
|
45 |
torch.cuda.empty_cache()
|
|
|
55 |
maxtokens = 77
|
56 |
clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True).to("cuda")
|
57 |
clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=maxtokens, ignore_mismatched_sizes=True, return_tensors="pt", truncation=True)
|
58 |
+
t5 = HFEmbedder("DeepFloyd/t5-v1_1-xxl", max_length=512, torch_dtype=torch.bfloat16).to(device)
|
59 |
|
60 |
pipe.tokenizer = clip_processor.tokenizer
|
61 |
pipe.text_encoder = clip_model.text_model
|
62 |
pipe.tokenizer_max_length = maxtokens
|
63 |
pipe.text_encoder.dtype = torch.bfloat16
|
64 |
+
pipe.text_encoder_2 = t5.text_model
|
65 |
|
66 |
MAX_SEED = 2**32-1
|
67 |
|