JoPmt commited on
Commit
78cac1e
·
1 Parent(s): 085408b

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +151 -0
app.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from PIL import Image
3
+ import cv2
4
+ import numpy as np
5
+ from transformers import pipeline
6
+ import PIL.Image
7
+ from diffusers.utils import load_image
8
+ from accelerate import Accelerator
9
+ from diffusers import StableDiffusionPipeline
10
+ import torch
11
+ from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
12
+ from controlnet_aux import OpenposeDetector
13
+
14
+ accelerator = Accelerator(cpu=True)
15
+ openpose = accelerator.prepare(OpenposeDetector.from_pretrained("lllyasviel/ControlNet"))
16
+
17
+ controlnet = [
18
+ accelerator.prepare(ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float32)),
19
+ accelerator.prepare(ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float32)),
20
+ ]
21
+
22
+ models =[
23
+ "runwayml/stable-diffusion-v1-5",
24
+ "stablediffusionapi/disney-pixal-cartoon",
25
+ "stablediffusionapi/edge-of-realism",
26
+ "MirageML/fantasy-scene",
27
+ "wavymulder/lomo-diffusion",
28
+ "sd-dreambooth-library/fashion",
29
+ "DucHaiten/DucHaitenDreamWorld",
30
+ "VegaKH/Ultraskin",
31
+ "kandinsky-community/kandinsky-2-1",
32
+ "MirageML/lowpoly-cyberpunk",
33
+ "thehive/everyjourney-sdxl-0.9-finetuned",
34
+ "plasmo/woolitize-768sd1-5",
35
+ "plasmo/food-crit",
36
+ "johnslegers/epic-diffusion-v1.1",
37
+ "Fictiverse/ElRisitas",
38
+ "robotjung/SemiRealMix",
39
+ "herpritts/FFXIV-Style",
40
+ "prompthero/linkedin-diffusion",
41
+ "RayHell/popupBook-diffusion",
42
+ "MirageML/lowpoly-world",
43
+ "warp-ai/wuerstchen",
44
+ "deadman44/SD_Photoreal_Merged_Models",
45
+ "Conflictx/CGI_Animation",
46
+ "johnslegers/epic-diffusion",
47
+ "tilake/China-Chic-illustration",
48
+ "wavymulder/modelshoot",
49
+ "prompthero/openjourney-lora",
50
+ "Fictiverse/Stable_Diffusion_VoxelArt_Model",
51
+ "nousr/robo-diffusion-2-base",
52
+ "darkstorm2150/Protogen_v2.2_Official_Release",
53
+ "hassanblend/HassanBlend1.5.1.2",
54
+ "hassanblend/hassanblend1.4",
55
+ "nitrosocke/redshift-diffusion",
56
+ "prompthero/openjourney-v2",
57
+ "nitrosocke/Arcane-Diffusion",
58
+ "Lykon/DreamShaper",
59
+ "wavymulder/Analog-Diffusion",
60
+ "nitrosocke/mo-di-diffusion",
61
+ "dreamlike-art/dreamlike-diffusion-1.0",
62
+ "dreamlike-art/dreamlike-photoreal-2.0",
63
+ "digiplay/RealismEngine_v1",
64
+ "digiplay/AIGEN_v1.4_diffusers",
65
+ "stablediffusionapi/dreamshaper-v6",
66
+ "JackAnon/GorynichMix",
67
+ "p1atdev/liminal-space-diffusion",
68
+ "nadanainone/gigaschizonegs",
69
+ "darkVOYAGE/dvMJv4",
70
+ "lckidwell/album-cover-style",
71
+ "axolotron/ice-cream-animals",
72
+ "perion/ai-avatar",
73
+ "FFusion/FFXL400",
74
+ "digiplay/GhostMix",
75
+ "ThePioneer/MISA",
76
+ "TheLastBen/froggy-style-v21-768",
77
+ "FloydianSound/Nixeu_Diffusion_v1-5",
78
+ "diffusers/sdxl-instructpix2pix-768",
79
+ "kakaobrain/karlo-v1-alpha-image-variations",
80
+ "coreml-community/coreml-HassanBlend",
81
+ "digiplay/PotoPhotoRealism_v1",
82
+ "ConsistentFactor/Aurora-By_Consistent_Factor",
83
+ "coreml/coreml-ghostmix-v11",
84
+ "rim0/quadruped_mechas",
85
+ "Akumetsu971/SD_Samurai_Anime_Model",
86
+ "Bojaxxx/Fantastic-Mr-Fox-Diffusion",
87
+ "sd-dreambooth-library/original-character-cyclps",
88
+ "AIArtsChannel/steampunk-diffusion",
89
+ ]
90
+
91
+ sdulers =[
92
+ "UniPCMultistepScheduler",
93
+ "DDIMScheduler",
94
+ "DDPMScheduler",
95
+ "DDIMInverseScheduler",
96
+ "CMStochasticIterativeScheduler",
97
+ "DEISMultistepScheduler",
98
+ "DPMSolverMultistepInverse",
99
+ "DPMSolverMultistepScheduler",
100
+ "DPMSolverSDEScheduler",
101
+ "DPMSolverSinglestepScheduler",
102
+ "EulerAncestralDiscreteScheduler",
103
+ "EulerDiscreteScheduler",
104
+ "HeunDiscreteScheduler",
105
+ "IPNDMScheduler",
106
+ "KarrasVeScheduler",
107
+ "KDPM2AncestralDiscreteScheduler",
108
+ "KDPM2DiscreteScheduler",
109
+ "LMSDiscreteScheduler",
110
+ "PNDMScheduler",
111
+ "RePaintScheduler",
112
+ "ScoreSdeVeScheduler",
113
+ "ScoreSdeVpScheduler",
114
+ "VQDiffusionScheduler",
115
+ ]
116
+
117
+ def plex(mput, prompt, neg_prompt, stips, modal_id, dula, blip, blop):
118
+ dula=dula ## shedulers todo
119
+ pope = accelerator.prepare(StableDiffusionPipeline.from_pretrained(modal_id, use_safetensors=False, safety_checker=None,torch_dtype=torch.float32))
120
+ pope.to("cpu")
121
+ pipe = accelerator.prepare(StableDiffusionControlNetPipeline.from_pretrained(modal_id, use_safetensors=False,controlnet=controlnet, safety_checker=None,torch_dtype=torch.float32))
122
+ pipe.to("cpu")
123
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
124
+ generator = torch.Generator(device="cpu").manual_seed(16384)
125
+
126
+ tilage = pope(prompt,num_inference_steps=5).images[0]
127
+ cannmage = tilage
128
+ cannyimage = np.array(cannmage)
129
+ low_threshold = 100
130
+ high_threshold = 200
131
+ cannyimage = cv2.Canny(cannyimage, low_threshold, high_threshold)
132
+ cannyimage = cannyimage[:, :, None]
133
+ cannyimage = np.concatenate([cannyimage, cannyimage, cannyimage], axis=2)
134
+ canny_image = Image.fromarray(cannyimage)
135
+ pose_image = load_image(mput)
136
+ openpose_image = openpose(pose_image)
137
+ images = [openpose_image, canny_image]
138
+
139
+ imoge = pipe(
140
+ prompt,
141
+ images,
142
+ num_inference_steps=stips,
143
+ generator=generator,
144
+ negative_prompt=neg_prompt,
145
+ controlnet_conditioning_scale=[blip, blop],
146
+ ).images[0]
147
+
148
+ return imoge
149
+
150
+ iface = gr.Interface(fn=plex,inputs=[gr.Image(type="filepath"), gr.Textbox(label="prompt"), gr.Textbox(label="neg_prompt", value="monochrome, lowres, bad anatomy, worst quality, low quality"), gr.Slider(label="infer_steps", value=20, minimum=1, step=1, maximum=100), gr.Dropdown(choices=models, type="value", label="select a model"), gr.Dropdown(choices=sdulers, type="value", label="schedulrs"), gr.Slider(label="condition_scale_canny", value=0.5, minimum=0.05, step=0.05, maximum=0.95), gr.Slider(label="condition_scale_pose", value=0.5, minimum=0.05, step=0.05, maximum=0.95)], outputs=gr.Image(), title="Img2Img Guided Multi-Conditioned Canny/Pose Controlnet Selectable StableDiffusion Model Demo", description="by JoPmt.")
151
+ iface.launch()