JoPmt commited on
Commit
49d2711
·
1 Parent(s): 97098f6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -10
app.py CHANGED
@@ -49,7 +49,6 @@ models =[
49
  "wavymulder/modelshoot",
50
  "prompthero/openjourney-lora",
51
  "Fictiverse/Stable_Diffusion_VoxelArt_Model",
52
- "nousr/robo-diffusion-2-base",
53
  "darkstorm2150/Protogen_v2.2_Official_Release",
54
  "hassanblend/HassanBlend1.5.1.2",
55
  "hassanblend/hassanblend1.4",
@@ -71,7 +70,6 @@ models =[
71
  "lckidwell/album-cover-style",
72
  "axolotron/ice-cream-animals",
73
  "perion/ai-avatar",
74
- "FFusion/FFXL400",
75
  "digiplay/GhostMix",
76
  "ThePioneer/MISA",
77
  "TheLastBen/froggy-style-v21-768",
@@ -118,14 +116,14 @@ sdulers =[
118
  generator = torch.Generator(device="cpu").manual_seed(random.randint(0, MAX_SEED))
119
 
120
  def plex(mput, prompt, neg_prompt, stips, modal_id, dula, blip, blop):
 
121
  modal_id = ""+modal_id+""
122
  dula=""+dula+"" ## shedulers todo
123
- pope = accelerator.prepare(StableDiffusionPipeline.from_pretrained(modal_id, use_safetensors=True,torch_dtype=torch.float32))
124
  pope.unet.to(memory_format=torch.channels_last)
125
  pope = accelerator.prepare(pope.to("cpu"))
126
- pipe = accelerator.prepare(StableDiffusionControlNetPipeline.from_pretrained(modal_id, use_safetensors=True,controlnet=controlnet,torch_dtype=torch.float32))
127
- pipe.unet.to(memory_format=torch.channels_last)
128
- pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
129
  pipe = accelerator.prepare(pipe.to("cpu"))
130
 
131
  tilage = pope(prompt,num_inference_steps=5,height=512,width=512,generator=generator).images[0]
@@ -146,9 +144,13 @@ def plex(mput, prompt, neg_prompt, stips, modal_id, dula, blip, blop):
146
  openpose_image = openpose(mput)
147
  openpose_image.save('./fin.png','PNG')
148
  images = [openpose_image, canny_image]
 
 
 
 
 
 
 
149
 
150
- image = pipe(prompt,images,num_inference_steps=stips,negative_prompt=neg_prompt,controlnet_conditioning_scale=[blip, blop],height=512,width=512,generator=generator).images[0]
151
- return image
152
-
153
- iface = gr.Interface(fn=plex,inputs=[gr.Image(type="pil"), gr.Textbox(label="prompt"), gr.Textbox(label="neg_prompt", value="monochrome, lowres, bad anatomy, worst quality, low quality"), gr.Slider(label="infer_steps", value=20, minimum=1, step=1, maximum=100), gr.Dropdown(choices=models, value=models[0], type="value", label="select a model"), gr.Dropdown(choices=sdulers, value=sdulers[0], type="value", label="schedulrs"), gr.Slider(label="condition_scale_canny", value=0.5, minimum=0.1, step=0.1, maximum=1), gr.Slider(label="condition_scale_pose", value=0.5, minimum=0.1, step=0.1, maximum=1)], outputs=gr.Image(), title="Img2Img Guided Multi-Conditioned Canny/Pose Controlnet Selectable StableDiffusion Model Demo", description="by JoPmt.")
154
  iface.launch()
 
49
  "wavymulder/modelshoot",
50
  "prompthero/openjourney-lora",
51
  "Fictiverse/Stable_Diffusion_VoxelArt_Model",
 
52
  "darkstorm2150/Protogen_v2.2_Official_Release",
53
  "hassanblend/HassanBlend1.5.1.2",
54
  "hassanblend/hassanblend1.4",
 
70
  "lckidwell/album-cover-style",
71
  "axolotron/ice-cream-animals",
72
  "perion/ai-avatar",
 
73
  "digiplay/GhostMix",
74
  "ThePioneer/MISA",
75
  "TheLastBen/froggy-style-v21-768",
 
116
  generator = torch.Generator(device="cpu").manual_seed(random.randint(0, MAX_SEED))
117
 
118
  def plex(mput, prompt, neg_prompt, stips, modal_id, dula, blip, blop):
119
+ apol = []
120
  modal_id = ""+modal_id+""
121
  dula=""+dula+"" ## shedulers todo
122
+ pope = accelerator.prepare(StableDiffusionPipeline.from_pretrained(modal_id, use_safetensors=True,torch_dtype=torch.float32, safety_checker=None))
123
  pope.unet.to(memory_format=torch.channels_last)
124
  pope = accelerator.prepare(pope.to("cpu"))
125
+ pipe = accelerator.prepare(StableDiffusionControlNetPipeline.from_pretrained(modal_id, use_safetensors=True,controlnet=controlnet,torch_dtype=torch.float32,safety_checker=None))
126
+ pipe.scheduler = dula.from_config(pipe.scheduler.config)
 
127
  pipe = accelerator.prepare(pipe.to("cpu"))
128
 
129
  tilage = pope(prompt,num_inference_steps=5,height=512,width=512,generator=generator).images[0]
 
144
  openpose_image = openpose(mput)
145
  openpose_image.save('./fin.png','PNG')
146
  images = [openpose_image, canny_image]
147
+ apol.append(openpose_image)
148
+ apol.append(canny_image)
149
+ apol.append(tilage)
150
+ imoge = pipe([prompt] *2,images,num_inference_steps=stips,negative_prompt=[neg_prompt] *2,controlnet_conditioning_scale=[blip, blop],height=512,width=512,generator=generator).images
151
+ for i, imge in enumerate(imoge["images"]):
152
+ apol.append(imge)
153
+ return apol
154
 
155
+ iface = gr.Interface(fn=plex,inputs=[gr.Image(type="filepath"), gr.Textbox(label="prompt"), gr.Textbox(label="neg_prompt", value="monochrome, lowres, bad anatomy, worst quality, low quality"), gr.Slider(label="infer_steps", value=5, minimum=1, step=1, maximum=5), gr.Dropdown(choices=models, value=models[0], type="value", label="select a model"), gr.Dropdown(choices=sdulers, value=sdulers[0], type="value", label="schedulrs"), gr.Slider(label="condition_scale_canny", value=0.5, minimum=0.1, step=0.1, maximum=1), gr.Slider(label="condition_scale_pose", value=0.5, minimum=0.1, step=0.1, maximum=1)], outputs=gr.Gallery(columns=2), title="Img2Img Guided Multi-Conditioned Canny/Pose Controlnet Selectable StableDiffusion Model Demo", description="by JoPmt.")
 
 
 
156
  iface.launch()