Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -113,7 +113,8 @@ sdulers =[
|
|
113 |
]
|
114 |
|
115 |
def plex(mput, prompt, neg_prompt, stips, modal_id, dula, blip, blop):
|
116 |
-
|
|
|
117 |
pope = accelerator.prepare(StableDiffusionPipeline.from_pretrained(modal_id, use_safetensors=False, safety_checker=None,torch_dtype=torch.float32))
|
118 |
pope = accelerator.prepare(pope.to("cpu"))
|
119 |
pipe = accelerator.prepare(StableDiffusionControlNetPipeline.from_pretrained(modal_id, use_safetensors=False,controlnet=controlnet, safety_checker=None,torch_dtype=torch.float32))
|
@@ -139,5 +140,5 @@ def plex(mput, prompt, neg_prompt, stips, modal_id, dula, blip, blop):
|
|
139 |
imoge = pipe(prompt,images,num_inference_steps=stips,negative_prompt=neg_prompt,controlnet_conditioning_scale=[blip, blop],generator=generator).images[0]
|
140 |
return imoge
|
141 |
|
142 |
-
iface = gr.Interface(fn=plex,inputs=[gr.Image(type="filepath"), gr.Textbox(label="prompt"), gr.Textbox(label="neg_prompt", value="monochrome, lowres, bad anatomy, worst quality, low quality"), gr.Slider(label="infer_steps", value=20, minimum=1, step=1, maximum=100), gr.Dropdown(choices=models, type="value", label="select a model"), gr.Dropdown(choices=sdulers, value=sdulers[0], type="value", label="schedulrs"), gr.Slider(label="condition_scale_canny", value=0.5, minimum=0.05, step=0.05, maximum=0.95), gr.Slider(label="condition_scale_pose", value=0.5, minimum=0.05, step=0.05, maximum=0.95)], outputs=gr.Image(), title="Img2Img Guided Multi-Conditioned Canny/Pose Controlnet Selectable StableDiffusion Model Demo", description="by JoPmt.")
|
143 |
iface.launch()
|
|
|
113 |
]
|
114 |
|
115 |
def plex(mput, prompt, neg_prompt, stips, modal_id, dula, blip, blop):
|
116 |
+
modal_id = ""+modal_id+""
|
117 |
+
dula=""+dula+"" ## shedulers todo
|
118 |
pope = accelerator.prepare(StableDiffusionPipeline.from_pretrained(modal_id, use_safetensors=False, safety_checker=None,torch_dtype=torch.float32))
|
119 |
pope = accelerator.prepare(pope.to("cpu"))
|
120 |
pipe = accelerator.prepare(StableDiffusionControlNetPipeline.from_pretrained(modal_id, use_safetensors=False,controlnet=controlnet, safety_checker=None,torch_dtype=torch.float32))
|
|
|
140 |
imoge = pipe(prompt,images,num_inference_steps=stips,negative_prompt=neg_prompt,controlnet_conditioning_scale=[blip, blop],generator=generator).images[0]
|
141 |
return imoge
|
142 |
|
143 |
+
iface = gr.Interface(fn=plex,inputs=[gr.Image(type="filepath"), gr.Textbox(label="prompt"), gr.Textbox(label="neg_prompt", value="monochrome, lowres, bad anatomy, worst quality, low quality"), gr.Slider(label="infer_steps", value=20, minimum=1, step=1, maximum=100), gr.Dropdown(choices=models, value=models[0], type="value", label="select a model"), gr.Dropdown(choices=sdulers, value=sdulers[0], type="value", label="schedulrs"), gr.Slider(label="condition_scale_canny", value=0.5, minimum=0.05, step=0.05, maximum=0.95), gr.Slider(label="condition_scale_pose", value=0.5, minimum=0.05, step=0.05, maximum=0.95)], outputs=gr.Image(), title="Img2Img Guided Multi-Conditioned Canny/Pose Controlnet Selectable StableDiffusion Model Demo", description="by JoPmt.")
|
144 |
iface.launch()
|