Staticaliza commited on
Commit
498164e
·
verified ·
1 Parent(s): 61a7fe1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -14
app.py CHANGED
@@ -9,7 +9,7 @@ import uuid
9
  import json
10
  import os
11
 
12
- from diffusers import StableDiffusionXLPipeline, StableDiffusion3Pipeline, SD3Transformer2DModel
13
  from huggingface_hub import snapshot_download
14
  from peft import PeftModel
15
  from PIL import Image
@@ -32,10 +32,6 @@ DEFAULT_WIDTH = 1024
32
 
33
  headers = {"Content-Type": "application/json", "Authorization": f"Bearer {HF_TOKEN}" }
34
 
35
- repo_large_path = snapshot_download(repo_id="stabilityai/stable-diffusion-3-medium", revision="refs/pr/26", token=HF_TOKEN)
36
- repo_large_transformer_path = SD3Transformer2DModel.from_pretrained(repo_large_path, subfolder="transformer", torch_dtype=torch.float16)
37
- repo_large_transformer = PeftModel.from_pretrained(repo_large_transformer_path, "jasperai/flash-sd3")
38
-
39
  css = '''
40
  .gradio-container{max-width: 560px !important}
41
  h1{text-align:center}
@@ -44,21 +40,36 @@ footer {
44
  }
45
  '''
46
 
 
47
  repo_default = StableDiffusionXLPipeline.from_pretrained("fluently/Fluently-XL-Final", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False)
48
  repo_default.load_lora_weights("ehristoforu/dalle-3-xl-v2", adapter_name="base")
49
  repo_default.set_adapters(["base"], adapter_weights=[0.7])
 
50
 
 
51
  repo_pixel = StableDiffusionXLPipeline.from_pretrained("fluently/Fluently-XL-Final", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False)
52
  repo_pixel.load_lora_weights("artificialguybr/PixelArtRedmond", adapter_name="base")
53
  repo_pixel.load_lora_weights("nerijs/pixel-art-xl", adapter_name="base2")
54
  repo_pixel.set_adapters(["base", "base2"], adapter_weights=[1, 1])
 
 
 
 
 
 
 
 
 
 
55
 
56
  repo_customs = {
57
- "Default": repo_default,
58
- "Realistic": StableDiffusionXLPipeline.from_pretrained("ehristoforu/Visionix-alpha", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False),
59
- "Anime": StableDiffusionXLPipeline.from_pretrained("cagliostrolab/animagine-xl-3.1", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False),
60
- "Pixel": repo_pixel,
61
- "Large": StableDiffusion3Pipeline.from_pretrained(repo_large_path, transformer=repo_large_transformer, torch_dtype=torch.float16, use_safetensors=True),
 
 
62
  }
63
 
64
  # Functions
@@ -120,13 +131,13 @@ def generate(input=DEFAULT_INPUT, filter_input="", negative_input=DEFAULT_NEGATI
120
  parameters = {
121
  "prompt": input,
122
  "negative_prompt": filter_input + negative_input,
123
- "height": height,
124
- "width": width,
125
  "num_inference_steps": steps,
126
  "guidance_scale": guidance,
127
- "num_images_per_prompt": number,
128
  "generator": torch.Generator().manual_seed(seed),
129
- "output_type":"pil",
130
  }
131
 
132
  images = repo(**parameters).images
 
9
  import json
10
  import os
11
 
12
+ from diffusers import StableDiffusionXLPipeline, StableDiffusion3Pipeline, SD3Transformer2DModel, FlashFlowMatchEulerDiscreteScheduler
13
  from huggingface_hub import snapshot_download
14
  from peft import PeftModel
15
  from PIL import Image
 
32
 
33
  headers = {"Content-Type": "application/json", "Authorization": f"Bearer {HF_TOKEN}" }
34
 
 
 
 
 
35
  css = '''
36
  .gradio-container{max-width: 560px !important}
37
  h1{text-align:center}
 
40
  }
41
  '''
42
 
43
+ '''
44
  repo_default = StableDiffusionXLPipeline.from_pretrained("fluently/Fluently-XL-Final", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False)
45
  repo_default.load_lora_weights("ehristoforu/dalle-3-xl-v2", adapter_name="base")
46
  repo_default.set_adapters(["base"], adapter_weights=[0.7])
47
+ '''
48
 
49
+ '''
50
  repo_pixel = StableDiffusionXLPipeline.from_pretrained("fluently/Fluently-XL-Final", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False)
51
  repo_pixel.load_lora_weights("artificialguybr/PixelArtRedmond", adapter_name="base")
52
  repo_pixel.load_lora_weights("nerijs/pixel-art-xl", adapter_name="base2")
53
  repo_pixel.set_adapters(["base", "base2"], adapter_weights=[1, 1])
54
+ '''
55
+
56
+ repo_large_path = snapshot_download(repo_id="stabilityai/stable-diffusion-3-medium", revision="refs/pr/26", token=HF_TOKEN)
57
+ repo_large_transformer_path = SD3Transformer2DModel.from_pretrained(repo_large_path, subfolder="transformer", torch_dtype=torch.float16)
58
+ repo_large_transformer = PeftModel.from_pretrained(repo_large_transformer_path, "jasperai/flash-sd3")
59
+
60
+ pipe.scheduler = FlashFlowMatchEulerDiscreteScheduler.from_pretrained(
61
+ repo_large_path,
62
+ subfolder="scheduler",
63
+ )
64
 
65
  repo_customs = {
66
+ "Default": None, #repo_default,
67
+ "Realistic": None, #StableDiffusionXLPipeline.from_pretrained("ehristoforu/Visionix-alpha", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False),
68
+ "Anime": None, #StableDiffusionXLPipeline.from_pretrained("cagliostrolab/animagine-xl-3.1", torch_dtype=torch.float16, use_safetensors=True, add_watermarker=False),
69
+ "Pixel": None, #repo_pixel,
70
+ "Large": StableDiffusion3Pipeline.from_pretrained(repo_large_path, transformer=repo_large_transformer, torch_dtype=torch.float16, use_safetensors=True,
71
+ text_encoder_3=None,
72
+ tokenizer_3=None),
73
  }
74
 
75
  # Functions
 
131
  parameters = {
132
  "prompt": input,
133
  "negative_prompt": filter_input + negative_input,
134
+ #"height": height,
135
+ #"width": width,
136
  "num_inference_steps": steps,
137
  "guidance_scale": guidance,
138
+ #"num_images_per_prompt": number,
139
  "generator": torch.Generator().manual_seed(seed),
140
+ #"output_type":"pil",
141
  }
142
 
143
  images = repo(**parameters).images