Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -103,13 +103,9 @@ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str
|
|
103 |
def load_and_prepare_model():
|
104 |
#vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", safety_checker=None)
|
105 |
vaeX = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None,use_safetensors=False)
|
106 |
-
#vae = AutoencoderKL.from_pretrained('cross-attention/asymmetric-autoencoder-kl-x-2',use_safetensors=False)
|
107 |
#vae = AutoencoderKL.from_single_file('https://huggingface.co/ford442/sdxl-vae-bf16/mySLR/myslrVAE_v10.safetensors')
|
108 |
-
#vaeX = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse",use_safetensors=True)
|
109 |
#vaeX = AutoencoderKL.from_pretrained('ford442/Juggernaut-XI-v11-fp32',subfolder='vae') # ,use_safetensors=True FAILS
|
110 |
-
#vaeX = AutoencoderKL.from_pretrained('ford442/RealVisXL_V5.0_FP64',subfolder='vae').to(torch.bfloat16) # ,use_safetensors=True FAILS
|
111 |
#unetX = UNet2DConditionModel.from_pretrained('ford442/RealVisXL_V5.0_BF16',subfolder='unet').to(torch.bfloat16) # ,use_safetensors=True FAILS
|
112 |
-
# vae = AutoencoderKL.from_pretrained("BeastHF/MyBack_SDXL_Juggernaut_XL_VAE/MyBack_SDXL_Juggernaut_XL_VAE_V10(version_X).safetensors",safety_checker=None).to(torch.bfloat16)
|
113 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='scheduler',beta_schedule="scaled_linear", steps_offset=1,timestep_spacing="trailing"))
|
114 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='scheduler', steps_offset=1,timestep_spacing="trailing")
|
115 |
sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1,use_karras_sigmas=True)
|
@@ -145,8 +141,7 @@ def load_and_prepare_model():
|
|
145 |
#pipe.vae = AutoencoderKL.from_pretrained('stabilityai/sdxl-vae',subfolder='vae',force_upcast=False,scaling_factor= 0.182158767676)
|
146 |
#pipe.vae.to(torch.bfloat16)
|
147 |
|
148 |
-
|
149 |
-
|
150 |
'''
|
151 |
scaling_factor (`float`, *optional*, defaults to 0.18215):
|
152 |
The component-wise standard deviation of the trained latent space computed using the first batch of the
|
@@ -162,7 +157,6 @@ def load_and_prepare_model():
|
|
162 |
|
163 |
'''
|
164 |
|
165 |
-
#sched = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear",use_karras_sigmas=True, algorithm_type="dpmsolver++")
|
166 |
#pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
167 |
#pipe.scheduler = DPMSolverMultistepScheduler.from_pretrained('SG161222/RealVisXL_V5.0', subfolder='scheduler', algorithm_type='sde-dpmsolver++')
|
168 |
pipe.vae = vaeX.to(torch.bfloat16)
|
@@ -171,18 +165,15 @@ def load_and_prepare_model():
|
|
171 |
#pipe.vae.do_resize=False
|
172 |
#pipe.vae.do_rescale=False
|
173 |
#pipe.vae.do_convert_rgb=True
|
174 |
-
pipe.vae.vae_scale_factor=8
|
175 |
|
176 |
pipe.scheduler = sched
|
177 |
-
#pipe.scheduler =EDMDPMSolverMultistepScheduler.from_pretrained('John6666/uber-realistic-porn-merge-xl-urpmxl-v6final-sdxl', subfolder='scheduler', algorithm_type='sde-dpmsolver++')
|
178 |
#pipe.vae=vae.to(torch.bfloat16)
|
179 |
#pipe.unet=pipeX.unet
|
180 |
#pipe.scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
181 |
#pipe.scheduler=EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
|
182 |
|
183 |
pipe.to(device=device, dtype=torch.bfloat16)
|
184 |
-
#pipe.to(torch.bfloat16)
|
185 |
-
#apply_hidiffusion(pipe)
|
186 |
|
187 |
#pipe.unet.set_default_attn_processor()
|
188 |
pipe.vae.set_default_attn_processor()
|
@@ -196,10 +187,7 @@ def load_and_prepare_model():
|
|
196 |
#print(f'UNET: {pipe.unet}')
|
197 |
pipe.watermark=None
|
198 |
pipe.safety_checker=None
|
199 |
-
|
200 |
-
#pipe.vae.to(torch.bfloat16)
|
201 |
-
#pipe.to(device, torch.bfloat16)
|
202 |
-
#del pipeX
|
203 |
|
204 |
# pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/Fantasy_World_XL.safetensors", adapter_name="fantasy")
|
205 |
pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/skin_texture_style_v4.safetensors", adapter_name="skin")
|
@@ -207,13 +195,9 @@ def load_and_prepare_model():
|
|
207 |
#pipe.set_adapters(["skin", "photo", "fantasy"], adapter_weights=[0.75, 0.25, 0.5])
|
208 |
pipe.set_adapters(["skin"], adapter_weights=[0.5])
|
209 |
|
210 |
-
#sched = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", algorithm_type="dpmsolver++")
|
211 |
#sched = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, beta_schedule="linear", algorithm_type="dpmsolver++")
|
212 |
-
#sched = DDIMScheduler.from_config(pipe.scheduler.config)
|
213 |
return pipe
|
214 |
|
215 |
-
# Preload and compile both models
|
216 |
-
#models = {key: load_and_prepare_model(value) for key, value in MODEL_OPTIONS.items()}
|
217 |
pipe = load_and_prepare_model()
|
218 |
MAX_SEED = np.iinfo(np.int32).max
|
219 |
|
@@ -237,9 +221,8 @@ def save_image(img):
|
|
237 |
img.save(unique_name,optimize=False,compress_level=0)
|
238 |
return unique_name
|
239 |
|
240 |
-
def randomize_seed_fn(seed: int
|
241 |
-
|
242 |
-
seed = random.randint(0, MAX_SEED)
|
243 |
return seed
|
244 |
|
245 |
def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise):
|
@@ -273,9 +256,9 @@ def generate_30(
|
|
273 |
height: int = 768,
|
274 |
guidance_scale: float = 4,
|
275 |
num_inference_steps: int = 125,
|
276 |
-
randomize_seed: bool = False,
|
277 |
use_resolution_binning: bool = True,
|
278 |
denoise: float = 0.3,
|
|
|
279 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
280 |
):
|
281 |
#torch.backends.cudnn.benchmark = False
|
@@ -283,7 +266,8 @@ def generate_30(
|
|
283 |
#gc.collect()
|
284 |
#global models
|
285 |
#pipe = models[model_choice]
|
286 |
-
|
|
|
287 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
288 |
#prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
289 |
options = {
|
@@ -307,7 +291,7 @@ def generate_30(
|
|
307 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
|
308 |
batch_options = options.copy()
|
309 |
rv_image = pipe(**batch_options).images[0]
|
310 |
-
sd_image_path = f"rv50_A_{
|
311 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
312 |
upload_to_ftp(sd_image_path)
|
313 |
image_paths = save_image(rv_image)
|
@@ -335,17 +319,18 @@ def generate_60(
|
|
335 |
height: int = 768,
|
336 |
guidance_scale: float = 4,
|
337 |
num_inference_steps: int = 250,
|
338 |
-
randomize_seed: bool = False,
|
339 |
use_resolution_binning: bool = True,
|
340 |
denoise: float = 0.3,
|
|
|
341 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
342 |
):
|
343 |
-
#torch.backends.cudnn.benchmark =
|
344 |
#torch.cuda.empty_cache()
|
345 |
#gc.collect()
|
346 |
#global models
|
347 |
#pipe = models[model_choice]
|
348 |
-
|
|
|
349 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
350 |
#prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
351 |
options = {
|
@@ -369,7 +354,7 @@ def generate_60(
|
|
369 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
|
370 |
batch_options = options.copy()
|
371 |
rv_image = pipe(**batch_options).images[0]
|
372 |
-
sd_image_path = f"rv50_A_{
|
373 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
374 |
upload_to_ftp(sd_image_path)
|
375 |
image_paths = save_image(rv_image)
|
@@ -397,17 +382,18 @@ def generate_90(
|
|
397 |
height: int = 768,
|
398 |
guidance_scale: float = 4,
|
399 |
num_inference_steps: int = 250,
|
400 |
-
randomize_seed: bool = False,
|
401 |
use_resolution_binning: bool = True,
|
402 |
denoise: float = 0.3,
|
|
|
403 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
404 |
):
|
405 |
-
#torch.backends.cudnn.benchmark =
|
406 |
#torch.cuda.empty_cache()
|
407 |
#gc.collect()
|
408 |
#global models
|
409 |
#pipe = models[model_choice]
|
410 |
-
|
|
|
411 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
412 |
#prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
413 |
options = {
|
@@ -431,7 +417,7 @@ def generate_90(
|
|
431 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
|
432 |
batch_options = options.copy()
|
433 |
rv_image = pipe(**batch_options).images[0]
|
434 |
-
sd_image_path = f"rv50_A_{
|
435 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
436 |
upload_to_ftp(sd_image_path)
|
437 |
image_paths = save_image(rv_image)
|
@@ -516,13 +502,6 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
516 |
value="('deformed', 'distorted', 'disfigured':1.3),'not photorealistic':1.5, 'poorly drawn', 'bad anatomy', 'wrong anatomy', 'extra limb', 'missing limb', 'floating limbs', 'poorly drawn hands', 'poorly drawn feet', 'poorly drawn face':1.3, 'out of frame', 'extra limbs', 'bad anatomy', 'bad art', 'beginner', 'distorted face','amateur'",
|
517 |
visible=True,
|
518 |
)
|
519 |
-
seed = gr.Slider(
|
520 |
-
label="Seed",
|
521 |
-
minimum=0,
|
522 |
-
maximum=MAX_SEED,
|
523 |
-
step=1,
|
524 |
-
value=0,
|
525 |
-
)
|
526 |
denoise = gr.Slider(
|
527 |
label="Denoising Strength",
|
528 |
minimum=0.0,
|
@@ -530,7 +509,13 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
530 |
step=0.01,
|
531 |
value=0.3,
|
532 |
)
|
533 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
534 |
with gr.Row():
|
535 |
width = gr.Slider(
|
536 |
label="Width",
|
@@ -587,15 +572,14 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
587 |
negative_prompt,
|
588 |
use_negative_prompt,
|
589 |
style_selection,
|
590 |
-
seed,
|
591 |
width,
|
592 |
height,
|
593 |
guidance_scale,
|
594 |
num_inference_steps,
|
595 |
-
|
596 |
-
|
597 |
],
|
598 |
-
outputs=[result
|
599 |
)
|
600 |
|
601 |
gr.on(
|
@@ -610,15 +594,14 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
610 |
negative_prompt,
|
611 |
use_negative_prompt,
|
612 |
style_selection,
|
613 |
-
seed,
|
614 |
width,
|
615 |
height,
|
616 |
guidance_scale,
|
617 |
num_inference_steps,
|
618 |
-
|
619 |
-
|
620 |
],
|
621 |
-
outputs=[result
|
622 |
)
|
623 |
|
624 |
gr.on(
|
@@ -633,15 +616,14 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
|
|
633 |
negative_prompt,
|
634 |
use_negative_prompt,
|
635 |
style_selection,
|
636 |
-
seed,
|
637 |
width,
|
638 |
height,
|
639 |
guidance_scale,
|
640 |
num_inference_steps,
|
641 |
-
|
642 |
-
|
643 |
],
|
644 |
-
outputs=[result
|
645 |
)
|
646 |
|
647 |
gr.Markdown("### REALVISXL V5.0")
|
|
|
103 |
def load_and_prepare_model():
|
104 |
#vae = AutoencoderKL.from_pretrained("ford442/sdxl-vae-bf16", safety_checker=None)
|
105 |
vaeX = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None,use_safetensors=False)
|
|
|
106 |
#vae = AutoencoderKL.from_single_file('https://huggingface.co/ford442/sdxl-vae-bf16/mySLR/myslrVAE_v10.safetensors')
|
|
|
107 |
#vaeX = AutoencoderKL.from_pretrained('ford442/Juggernaut-XI-v11-fp32',subfolder='vae') # ,use_safetensors=True FAILS
|
|
|
108 |
#unetX = UNet2DConditionModel.from_pretrained('ford442/RealVisXL_V5.0_BF16',subfolder='unet').to(torch.bfloat16) # ,use_safetensors=True FAILS
|
|
|
109 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='scheduler',beta_schedule="scaled_linear", steps_offset=1,timestep_spacing="trailing"))
|
110 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='scheduler', steps_offset=1,timestep_spacing="trailing")
|
111 |
sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1,use_karras_sigmas=True)
|
|
|
141 |
#pipe.vae = AutoencoderKL.from_pretrained('stabilityai/sdxl-vae',subfolder='vae',force_upcast=False,scaling_factor= 0.182158767676)
|
142 |
#pipe.vae.to(torch.bfloat16)
|
143 |
|
144 |
+
|
|
|
145 |
'''
|
146 |
scaling_factor (`float`, *optional*, defaults to 0.18215):
|
147 |
The component-wise standard deviation of the trained latent space computed using the first batch of the
|
|
|
157 |
|
158 |
'''
|
159 |
|
|
|
160 |
#pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
161 |
#pipe.scheduler = DPMSolverMultistepScheduler.from_pretrained('SG161222/RealVisXL_V5.0', subfolder='scheduler', algorithm_type='sde-dpmsolver++')
|
162 |
pipe.vae = vaeX.to(torch.bfloat16)
|
|
|
165 |
#pipe.vae.do_resize=False
|
166 |
#pipe.vae.do_rescale=False
|
167 |
#pipe.vae.do_convert_rgb=True
|
168 |
+
#pipe.vae.vae_scale_factor=8
|
169 |
|
170 |
pipe.scheduler = sched
|
|
|
171 |
#pipe.vae=vae.to(torch.bfloat16)
|
172 |
#pipe.unet=pipeX.unet
|
173 |
#pipe.scheduler=EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config, beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1)
|
174 |
#pipe.scheduler=EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
|
175 |
|
176 |
pipe.to(device=device, dtype=torch.bfloat16)
|
|
|
|
|
177 |
|
178 |
#pipe.unet.set_default_attn_processor()
|
179 |
pipe.vae.set_default_attn_processor()
|
|
|
187 |
#print(f'UNET: {pipe.unet}')
|
188 |
pipe.watermark=None
|
189 |
pipe.safety_checker=None
|
190 |
+
|
|
|
|
|
|
|
191 |
|
192 |
# pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/Fantasy_World_XL.safetensors", adapter_name="fantasy")
|
193 |
pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/skin_texture_style_v4.safetensors", adapter_name="skin")
|
|
|
195 |
#pipe.set_adapters(["skin", "photo", "fantasy"], adapter_weights=[0.75, 0.25, 0.5])
|
196 |
pipe.set_adapters(["skin"], adapter_weights=[0.5])
|
197 |
|
|
|
198 |
#sched = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config, beta_schedule="linear", algorithm_type="dpmsolver++")
|
|
|
199 |
return pipe
|
200 |
|
|
|
|
|
201 |
pipe = load_and_prepare_model()
|
202 |
MAX_SEED = np.iinfo(np.int32).max
|
203 |
|
|
|
221 |
img.save(unique_name,optimize=False,compress_level=0)
|
222 |
return unique_name
|
223 |
|
224 |
+
def randomize_seed_fn(seed: int) -> int:
|
225 |
+
seed = random.randint(0, MAX_SEED)
|
|
|
226 |
return seed
|
227 |
|
228 |
def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise):
|
|
|
256 |
height: int = 768,
|
257 |
guidance_scale: float = 4,
|
258 |
num_inference_steps: int = 125,
|
|
|
259 |
use_resolution_binning: bool = True,
|
260 |
denoise: float = 0.3,
|
261 |
+
vae_scale: int = 8,
|
262 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
263 |
):
|
264 |
#torch.backends.cudnn.benchmark = False
|
|
|
266 |
#gc.collect()
|
267 |
#global models
|
268 |
#pipe = models[model_choice]
|
269 |
+
pipe.vae.vae_scale_factor=vae_scale
|
270 |
+
seed = int(randomize_seed_fn(seed))
|
271 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
272 |
#prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
273 |
options = {
|
|
|
291 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
|
292 |
batch_options = options.copy()
|
293 |
rv_image = pipe(**batch_options).images[0]
|
294 |
+
sd_image_path = f"rv50_A_{timestamp}.png"
|
295 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
296 |
upload_to_ftp(sd_image_path)
|
297 |
image_paths = save_image(rv_image)
|
|
|
319 |
height: int = 768,
|
320 |
guidance_scale: float = 4,
|
321 |
num_inference_steps: int = 250,
|
|
|
322 |
use_resolution_binning: bool = True,
|
323 |
denoise: float = 0.3,
|
324 |
+
vae_scale: int = 8,
|
325 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
326 |
):
|
327 |
+
#torch.backends.cudnn.benchmark = False
|
328 |
#torch.cuda.empty_cache()
|
329 |
#gc.collect()
|
330 |
#global models
|
331 |
#pipe = models[model_choice]
|
332 |
+
pipe.vae.vae_scale_factor=vae_scale
|
333 |
+
seed = int(randomize_seed_fn(seed))
|
334 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
335 |
#prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
336 |
options = {
|
|
|
354 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
|
355 |
batch_options = options.copy()
|
356 |
rv_image = pipe(**batch_options).images[0]
|
357 |
+
sd_image_path = f"rv50_A_{timestamp}.png"
|
358 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
359 |
upload_to_ftp(sd_image_path)
|
360 |
image_paths = save_image(rv_image)
|
|
|
382 |
height: int = 768,
|
383 |
guidance_scale: float = 4,
|
384 |
num_inference_steps: int = 250,
|
|
|
385 |
use_resolution_binning: bool = True,
|
386 |
denoise: float = 0.3,
|
387 |
+
vae_scale: int = 8,
|
388 |
progress=gr.Progress(track_tqdm=True) # Add progress as a keyword argument
|
389 |
):
|
390 |
+
#torch.backends.cudnn.benchmark = False
|
391 |
#torch.cuda.empty_cache()
|
392 |
#gc.collect()
|
393 |
#global models
|
394 |
#pipe = models[model_choice]
|
395 |
+
pipe.vae.vae_scale_factor=vae_scale
|
396 |
+
seed = int(randomize_seed_fn(seed))
|
397 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
398 |
#prompt, negative_prompt = apply_style(style_selection, prompt, negative_prompt)
|
399 |
options = {
|
|
|
417 |
uploadNote(prompt,num_inference_steps,guidance_scale,timestamp,denoise)
|
418 |
batch_options = options.copy()
|
419 |
rv_image = pipe(**batch_options).images[0]
|
420 |
+
sd_image_path = f"rv50_A_{timestamp}.png"
|
421 |
rv_image.save(sd_image_path,optimize=False,compress_level=0)
|
422 |
upload_to_ftp(sd_image_path)
|
423 |
image_paths = save_image(rv_image)
|
|
|
502 |
value="('deformed', 'distorted', 'disfigured':1.3),'not photorealistic':1.5, 'poorly drawn', 'bad anatomy', 'wrong anatomy', 'extra limb', 'missing limb', 'floating limbs', 'poorly drawn hands', 'poorly drawn feet', 'poorly drawn face':1.3, 'out of frame', 'extra limbs', 'bad anatomy', 'bad art', 'beginner', 'distorted face','amateur'",
|
503 |
visible=True,
|
504 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
505 |
denoise = gr.Slider(
|
506 |
label="Denoising Strength",
|
507 |
minimum=0.0,
|
|
|
509 |
step=0.01,
|
510 |
value=0.3,
|
511 |
)
|
512 |
+
vae_scale = gr.Slider(
|
513 |
+
label="VAE Scale Multiplier",
|
514 |
+
minimum=1,
|
515 |
+
maximum=16,
|
516 |
+
step=1,
|
517 |
+
value=8,
|
518 |
+
)
|
519 |
with gr.Row():
|
520 |
width = gr.Slider(
|
521 |
label="Width",
|
|
|
572 |
negative_prompt,
|
573 |
use_negative_prompt,
|
574 |
style_selection,
|
|
|
575 |
width,
|
576 |
height,
|
577 |
guidance_scale,
|
578 |
num_inference_steps,
|
579 |
+
denoise,
|
580 |
+
vae_scale,
|
581 |
],
|
582 |
+
outputs=[result],
|
583 |
)
|
584 |
|
585 |
gr.on(
|
|
|
594 |
negative_prompt,
|
595 |
use_negative_prompt,
|
596 |
style_selection,
|
|
|
597 |
width,
|
598 |
height,
|
599 |
guidance_scale,
|
600 |
num_inference_steps,
|
601 |
+
denoise,
|
602 |
+
vae_scale,
|
603 |
],
|
604 |
+
outputs=[result],
|
605 |
)
|
606 |
|
607 |
gr.on(
|
|
|
616 |
negative_prompt,
|
617 |
use_negative_prompt,
|
618 |
style_selection,
|
|
|
619 |
width,
|
620 |
height,
|
621 |
guidance_scale,
|
622 |
num_inference_steps,
|
623 |
+
denoise,
|
624 |
+
vae_scale,
|
625 |
],
|
626 |
+
outputs=[result],
|
627 |
)
|
628 |
|
629 |
gr.Markdown("### REALVISXL V5.0")
|