KingNish commited on
Commit
169bac6
·
verified ·
1 Parent(s): 135a7f4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -4
app.py CHANGED
@@ -24,6 +24,29 @@ pipe_best.load_lora_weights("KingNish/Better-Image-XL-Lora", weight_name="exampl
24
  pipe_best.set_adapters(["lora","dalle"], adapter_weights=[1.5, 0.7])
25
  pipe_best.to("cuda")
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  help_text = """
28
  To optimize image results:
29
  - Adjust the **Image CFG weight** if the image isn't changing enough or is changing too much. Lower it to allow bigger changes, or raise it to preserve original details.
@@ -68,7 +91,7 @@ def king(type ,
68
  image_cfg_scale: float = 1.7,
69
  width: int = 1024,
70
  height: int = 1024,
71
- guidance_scale: float = 6,
72
  use_resolution_binning: bool = True,
73
  progress=gr.Progress(track_tqdm=True),
74
  ):
@@ -90,7 +113,16 @@ def king(type ,
90
  if randomize_seed:
91
  seed = random.randint(0, 99999)
92
  generator = torch.Generator().manual_seed(seed)
93
- image = pipe_best( prompt = instruction, guidance_scale = 5, num_inference_steps = steps, width = width, height = height, generator = generator).images[0]
 
 
 
 
 
 
 
 
 
94
  return seed, image
95
 
96
  client = InferenceClient()
@@ -160,7 +192,8 @@ with gr.Blocks(css=css) as demo:
160
  type = gr.Dropdown(["Image Generation","Image Editing"], label="Task", value="Image Generation",interactive=True, info="AI will select option based on your query, but if it selects wrong, please choose correct one.")
161
  with gr.Column(scale=1):
162
  generate_button = gr.Button("Generate")
163
-
 
164
  with gr.Row():
165
  input_image = gr.Image(label="Image", type="pil", interactive=True)
166
 
@@ -207,7 +240,8 @@ with gr.Blocks(css=css) as demo:
207
  text_cfg_scale,
208
  image_cfg_scale,
209
  width,
210
- height
 
211
  ],
212
  outputs=[seed, input_image],
213
  )
 
24
  pipe_best.set_adapters(["lora","dalle"], adapter_weights=[1.5, 0.7])
25
  pipe_best.to("cuda")
26
 
27
+ pipe_ori = StableDiffusionXLPipeline.from_pretrained(repo, torch_dtype=torch.float16, vae=vae)
28
+ pipe_ori.load_lora_weights("RalFinger/origami-style-sdxl-lora", weight_name="ral-orgmi-sdxl.safetensors", adapter_name="origami")
29
+ pipe_ori.set_adapters(["origami"], adapter_weights=[2])
30
+ pipe_ori.to("cuda")
31
+
32
+ pipe_3D = StableDiffusionXLPipeline.from_pretrained(repo, torch_dtype=torch.float16, vae=vae)
33
+ pipe_3D.load_lora_weights("artificialguybr/3DRedmond-V1", weight_name="3DRedmond-3DRenderStyle-3DRenderAF.safetensors", adapter_name="dalle2")
34
+ pipe_3D.load_lora_weights("goofyai/3d_render_style_xl", weight_name="3d_render_style_xl.safetensors", adapter_name="dalle1")
35
+ pipe_3D.set_adapters(["dalle2","dalle1"], adapter_weights=[1.1, 0.8])
36
+ pipe_3D.to("cuda")
37
+
38
+ pipe_pixel = StableDiffusionXLPipeline.from_pretrained(repo, torch_dtype=torch.float16, vae=vae)
39
+ pipe_pixel.load_lora_weights("artificialguybr/PixelArtRedmond", weight_name="PixelArtRedmond-Lite64.safetensors", adapter_name="lora")
40
+ pipe_pixel.load_lora_weights("nerijs/pixel-art-xl", weight_name="pixel-art-xl.safetensors", adapter_name="pixel")
41
+ pipe_pixel.set_adapters(["lora", "pixel"], adapter_weights=[1.0, 1.2])
42
+ pipe_pixel.to("cuda")
43
+
44
+ pipe_logo = StableDiffusionXLPipeline.from_pretrained(repo, torch_dtype=torch.float16, vae=vae)
45
+ pipe_logo.load_lora_weights("artificialguybr/StickersRedmond", weight_name="StickersRedmond.safetensors", adapter_name="lora")
46
+ pipe_logo.load_lora_weights("artificialguybr/LogoRedmond-LogoLoraForSDXL", weight_name="LogoRedmond_LogoRedAF.safetensors", adapter_name="pixel")
47
+ pipe_logo.set_adapters(["lora", "pixel"], adapter_weights=[0.5, 1.2])
48
+ pipe_logo.to("cuda")
49
+
50
  help_text = """
51
  To optimize image results:
52
  - Adjust the **Image CFG weight** if the image isn't changing enough or is changing too much. Lower it to allow bigger changes, or raise it to preserve original details.
 
91
  image_cfg_scale: float = 1.7,
92
  width: int = 1024,
93
  height: int = 1024,
94
+ style="BEST",
95
  use_resolution_binning: bool = True,
96
  progress=gr.Progress(track_tqdm=True),
97
  ):
 
113
  if randomize_seed:
114
  seed = random.randint(0, 99999)
115
  generator = torch.Generator().manual_seed(seed)
116
+ if style=="3D"
117
+ image = pipe_3D( prompt = instruction, guidance_scale = 5, num_inference_steps = steps, width = width, height = height, generator = generator).images[0]
118
+ elif style=="PixelArt"
119
+ image = pipe_pixel( prompt = instruction, guidance_scale = 5, num_inference_steps = steps, width = width, height = height, generator = generator).images[0]
120
+ elif style=="Logo"
121
+ image = pipe_logo( prompt = instruction, guidance_scale = 5, num_inference_steps = steps, width = width, height = height, generator = generator).images[0]
122
+ elif style=="Origami"
123
+ image = pipe_ori( prompt = instruction, guidance_scale = 5, num_inference_steps = steps, width = width, height = height, generator = generator).images[0]
124
+ else:
125
+ image = pipe_best( prompt = instruction, guidance_scale = 5, num_inference_steps = steps, width = width, height = height, generator = generator).images[0]
126
  return seed, image
127
 
128
  client = InferenceClient()
 
192
  type = gr.Dropdown(["Image Generation","Image Editing"], label="Task", value="Image Generation",interactive=True, info="AI will select option based on your query, but if it selects wrong, please choose correct one.")
193
  with gr.Column(scale=1):
194
  generate_button = gr.Button("Generate")
195
+ with gr.Row():
196
+ style = gr.Radio(choices=["BEST","3D", "PixelART","Logo","Origami"],label="Style", value="BEST", interactive=True)
197
  with gr.Row():
198
  input_image = gr.Image(label="Image", type="pil", interactive=True)
199
 
 
240
  text_cfg_scale,
241
  image_cfg_scale,
242
  width,
243
+ height,
244
+ style
245
  ],
246
  outputs=[seed, input_image],
247
  )