tombetthauser commited on
Commit
6cf80c2
Β·
1 Parent(s): e9aa514

Update app.py

Browse files

Try importing sample code

Files changed (1) hide show
  1. app.py +341 -322
app.py CHANGED
@@ -1,325 +1,344 @@
1
-
2
-
3
- #@title 1. General Setup
4
-
5
- # FOR DEPLOYMENT:
6
- # !pip install -qq diffusers==0.11.1 transformers ftfy accelerate
7
- # !pip install -Uq diffusers transformers
8
- # !pip install -Uq gradio
9
- # !pip install -Uq accelerate
10
-
11
- from diffusers import StableDiffusionPipeline
12
- pipeline = StableDiffusionPipeline
13
-
14
- from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
15
- from accelerate import init_empty_weights
16
- import gradio
17
- import torch
18
  import os
19
-
20
- # UNDER CONSTRUCTION ---{{{
21
- import subprocess
22
- # }}}---
23
-
24
- # FOR DEPLOYMENT: uncomment these and delete the notebook_login() below
25
- api_key = os.environ['api_key']
26
- my_token = api_key
27
-
28
- # from huggingface_hub import notebook_login
29
- # notebook_login()
30
-
31
- # NOT NEEDED FOR DEPLOYMENT ---{{{
32
- # import PIL
33
- # from PIL import Image
34
-
35
- # def image_grid(imgs, rows, cols):
36
- # assert len(imgs) == rows*cols
37
-
38
- # w, h = imgs[0].size
39
- # grid = Image.new('RGB', size=(cols*w, rows*h))
40
- # grid_w, grid_h = grid.size
41
-
42
- # for i, img in enumerate(imgs):
43
- # grid.paste(img, box=(i%cols*w, i//cols*h))
44
- # return grid
45
- # }}}---
46
-
47
- pretrained_model_name_or_path = "stabilityai/stable-diffusion-2"
48
-
49
- # from IPython.display import Markdown
50
- from huggingface_hub import hf_hub_download
51
-
52
-
53
- #@title 2. Tell it What Concepts to Load
54
-
55
- models_to_load = [
56
- "ahx-model-3",
57
- "ahx-model-5",
58
- "ahx-model-6",
59
- "ahx-model-7",
60
- "ahx-model-8",
61
- "ahx-model-9",
62
- "ahx-model-10",
63
- "ahx-model-11",
64
- ]
65
-
66
- models_to_load = [f"sd-concepts-library/{model}" for model in models_to_load]
67
- completed_concept_pipes = {}
68
-
69
-
70
- #@title 3. Load the Concepts as Distinct Pipes
71
-
72
- for repo_id_embeds in models_to_load:
73
- print(f"loading {repo_id_embeds}")
74
- print("----------------------")
75
- # repo_id_embeds = "sd-concepts-library/ahx-model-3"
76
-
77
- embeds_url = "" #Add the URL or path to a learned_embeds.bin file in case you have one
78
- placeholder_token_string = "" #Add what is the token string in case you are uploading your own embed
79
-
80
- downloaded_embedding_folder = "./downloaded_embedding"
81
- if not os.path.exists(downloaded_embedding_folder):
82
- os.mkdir(downloaded_embedding_folder)
83
- if(not embeds_url):
84
- embeds_path = hf_hub_download(repo_id=repo_id_embeds, filename="learned_embeds.bin")
85
- token_path = hf_hub_download(repo_id=repo_id_embeds, filename="token_identifier.txt")
86
- # FOR DEPLOYMENT: address file system use
87
- #!cp downloaded_embedding_folder
88
- #!cp downloaded_embedding_folder
89
-
90
- # UNDER CONSTRUCTION ---{{{
91
- subprocess.call([f"cp {embeds_path} {downloaded_embedding_folder}"])
92
- subprocess.call([f"cp {token_path} {downloaded_embedding_folder}"])
93
- # }}}---
94
-
95
- with open(f'{downloaded_embedding_folder}/token_identifier.txt', 'r') as file:
96
- placeholder_token_string = file.read()
97
- else:
98
- # FOR DEPLOYMENT: address file system use
99
- #!wget -q -O $downloaded_embedding_folder/learned_embeds.bin $embeds_url
100
-
101
- # UNDER CONSTRUCTION ---{{{
102
- subprocess.call([f"wget -q -O {downloaded_embedding_folder}/learned_embeds.bin {embeds_url}"])
103
- # }}}---
104
-
105
- learned_embeds_path = f"{downloaded_embedding_folder}/learned_embeds.bin"
106
-
107
- # ----
108
-
109
- tokenizer = CLIPTokenizer.from_pretrained(
110
- pretrained_model_name_or_path,
111
- subfolder="tokenizer",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  )
113
- text_encoder = CLIPTextModel.from_pretrained(
114
- pretrained_model_name_or_path, subfolder="text_encoder", torch_dtype=torch.float16
 
 
 
 
 
 
 
 
 
 
115
  )
116
-
117
- # ----
118
-
119
- def load_learned_embed_in_clip(learned_embeds_path, text_encoder, tokenizer, token=None):
120
- loaded_learned_embeds = torch.load(learned_embeds_path, map_location="cpu")
121
-
122
- # separate token and the embeds
123
- trained_token = list(loaded_learned_embeds.keys())[0]
124
- embeds = loaded_learned_embeds[trained_token]
125
-
126
- # cast to dtype of text_encoder
127
- dtype = text_encoder.get_input_embeddings().weight.dtype
128
- embeds.to(dtype)
129
-
130
- # add the token in tokenizer
131
- token = token if token is not None else trained_token
132
- num_added_tokens = tokenizer.add_tokens(token)
133
- if num_added_tokens == 0:
134
- raise ValueError(f"The tokenizer already contains the token {token}. Please pass a different `token` that is not already in the tokenizer.")
135
-
136
- # resize the token embeddings
137
- text_encoder.resize_token_embeddings(len(tokenizer))
138
-
139
- # get the id for the token and assign the embeds
140
- token_id = tokenizer.convert_tokens_to_ids(token)
141
- text_encoder.get_input_embeddings().weight.data[token_id] = embeds
142
-
143
- load_learned_embed_in_clip(learned_embeds_path, text_encoder, tokenizer)
144
-
145
- # FOR DEPLOYMENT: add use_auth_token=my_token to pipe keyword args
146
- # ie --> pipe = pipeline.from_pretrained("CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16, use_auth_token=my_token).to("cuda")
147
- pipe = StableDiffusionPipeline.from_pretrained(
148
- pretrained_model_name_or_path,
149
- torch_dtype=torch.float16,
150
- text_encoder=text_encoder,
151
- tokenizer=tokenizer,
152
- use_auth_token=my_token,
153
- ).to("cuda")
154
-
155
- completed_concept_pipes[repo_id_embeds] = pipe
156
- print("--> complete !")
157
- print("----------------------")
158
-
159
-
160
-
161
-
162
- #@title 4. Print Available Concept Strings
163
-
164
- # NOT NEEDED FOR DEPLOYMENT ---{{{
165
- # print("AVAILABLE CONCEPTS TO SELECT FROM")
166
- # print("copy one and paste below under 'model'")
167
- # print("------------------------------------------------------")
168
- # # list(completed_concept_pipes)
169
- # for model in completed_concept_pipes:
170
- # print(f"{model}")
171
- # }}}---
172
-
173
- #@title 5. Optionally Test without Gradio
174
-
175
- # NOT NEEDED FOR DEPLOYMENT ---{{{
176
- # model = "" #@param {type: "string"}
177
- # prompt = "" #@param {type:"string"}
178
-
179
- # if prompt and model:
180
- # if model not in completed_concept_pipes:
181
- # raise ValueError("Invalid Model Name")
182
-
183
- # model_token = model.split("/")[1]
184
- # prompt = f"{prompt} in the style of <{model_token}>"
185
-
186
- # if model == "sd-concepts-library/ahx-model-5":
187
- # prompt = f"{prompt} in the style of "
188
-
189
- # num_samples = 1
190
- # num_rows = 1
191
-
192
- # all_images = []
193
- # pipe = completed_concept_pipes[model]
194
-
195
- # for _ in range(num_rows):
196
- # images = pipe(prompt, num_images_per_prompt=num_samples, height=512, width=512, num_inference_steps=30, guidance_scale=7.5).images
197
- # all_images.extend(images)
198
-
199
- # grid = image_grid(all_images, num_samples, num_rows)
200
- # grid
201
- # }}}---
202
-
203
-
204
- #@title 6. Define Custom CSS for Gradio
205
-
206
- use_custom_css = True
207
-
208
- gradio_css = """
209
- #output-image {
210
- border: 1px solid black;
211
- background-color: white;
212
- width: 500px;
213
- display: block;
214
- margin-left: auto;
215
- margin-right: auto;
216
- }
217
- """
218
-
219
- gradio_css_alternative = """
220
- #go-button {
221
- background-color: white;
222
- border-radius: 0;
223
- border: none;
224
- font-family: serif;
225
- background-image: none;
226
- font-weight: 100;
227
- width: fit-content;
228
- display: block;
229
- margin-left: auto;
230
- margin-right: auto;
231
- text-decoration: underline;
232
- box-shadow: none;
233
- color: blue;
234
- }
235
- .rounded-lg {
236
- border: none;
237
- }
238
- .gr-box {
239
- border-radius: 0;
240
- border: 1px solid black;
241
- }
242
- .text-gray-500 {
243
- color: black;
244
- font-family: serif;
245
- font-size: 15px;
246
- }
247
- .border-gray-200 {
248
- border: 1px solid black;
249
- }
250
- .bg-gray-200 {
251
- background-color: white;
252
- --tw-bg-opacity: 0;
253
- }
254
- footer {
255
- display: none;
256
- }
257
- footer {
258
- opacity: 0;
259
- }
260
- #output-image {
261
- border: 1px solid black;
262
- background-color: white;
263
- width: 500px;
264
- display: block;
265
- margin-left: auto;
266
- margin-right: auto;
267
- }
268
- .absolute {
269
- display: none;
270
- }
271
- #input-text {
272
- width: 500px;
273
- display: block;
274
- margin-left: auto;
275
- margin-right: auto;
276
- padding: 0 0 0 0;
277
- }
278
- .py-6 {
279
- padding-top: 0;
280
- padding-bottom: 0;
281
- }
282
- .px-4 {
283
- padding-left: 0;
284
- padding-right: 0;
285
- }
286
- .rounded-lg {
287
- border-radius: 0;
288
- }
289
- .gr-padded {
290
- padding: 0 0;
291
- margin-bottom: 12.5px;
292
- }
293
- .col > *, .col > .gr-form > * {
294
- width: 500px;
295
- margin-left: auto;
296
- margin-right: auto;
297
- }
298
- """
299
-
300
-
301
- #@title 7. Build and Launch the Gradio Interface
302
-
303
- DROPDOWNS = {}
304
-
305
- for model in models_to_load:
306
- token = model.split("/")[1]
307
- DROPDOWNS[model] = f" in the style of <{token}>"
308
-
309
- if "sd-concepts-library/ahx-model-5" in DROPDOWNS:
310
- DROPDOWNS["sd-concepts-library/ahx-model-5"] = f"{prompt} in the style of "
311
-
312
- def image_prompt(prompt, dropdown):
313
- prompt = prompt + DROPDOWNS[dropdown]
314
- pipe = completed_concept_pipes[dropdown]
315
- return pipe(prompt=prompt, height=512, width=512).images[0]
316
-
317
- with gradio.Blocks(css=gradio_css if use_custom_css else "") as demo:
318
- dropdown = gradio.Dropdown(list(DROPDOWNS), label="choose style...")
319
- prompt = gradio.Textbox(label="image prompt...", elem_id="input-text")
320
- output = gradio.Image(elem_id="output-image")
321
- go_button = gradio.Button("draw it!", elem_id="go-button")
322
- go_button.click(fn=image_prompt, inputs=[prompt, dropdown], outputs=output)
323
-
324
- demo.launch(share=True)
325
-
 
1
+ #@title Prepare the Concepts Library to be used
2
+ import requests
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import os
4
+ import gradio as gr
5
+ import wget
6
+ import torch
7
+ from torch import autocast
8
+ from diffusers import StableDiffusionPipeline
9
+ from huggingface_hub import HfApi
10
+ from transformers import CLIPTextModel, CLIPTokenizer
11
+ import html
12
+
13
+ from share_btn import community_icon_html, loading_icon_html, share_js
14
+
15
+ api = HfApi()
16
+ models_list = api.list_models(author="sd-concepts-library", sort="likes", direction=-1)
17
+ models = []
18
+
19
+ pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", use_auth_token=True, revision="fp16", torch_dtype=torch.float16).to("cuda")
20
+
21
+ def load_learned_embed_in_clip(learned_embeds_path, text_encoder, tokenizer, token=None):
22
+ loaded_learned_embeds = torch.load(learned_embeds_path, map_location="cpu")
23
+
24
+ # separate token and the embeds
25
+ trained_token = list(loaded_learned_embeds.keys())[0]
26
+ embeds = loaded_learned_embeds[trained_token]
27
+
28
+ # cast to dtype of text_encoder
29
+ dtype = text_encoder.get_input_embeddings().weight.dtype
30
+
31
+ # add the token in tokenizer
32
+ token = token if token is not None else trained_token
33
+ num_added_tokens = tokenizer.add_tokens(token)
34
+ i = 1
35
+ while(num_added_tokens == 0):
36
+ print(f"The tokenizer already contains the token {token}.")
37
+ token = f"{token[:-1]}-{i}>"
38
+ print(f"Attempting to add the token {token}.")
39
+ num_added_tokens = tokenizer.add_tokens(token)
40
+ i+=1
41
+
42
+ # resize the token embeddings
43
+ text_encoder.resize_token_embeddings(len(tokenizer))
44
+
45
+ # get the id for the token and assign the embeds
46
+ token_id = tokenizer.convert_tokens_to_ids(token)
47
+ text_encoder.get_input_embeddings().weight.data[token_id] = embeds
48
+ return token
49
+
50
+ print("Setting up the public library")
51
+ for model in models_list:
52
+ model_content = {}
53
+ model_id = model.modelId
54
+ model_content["id"] = model_id
55
+ embeds_url = f"https://huggingface.co/{model_id}/resolve/main/learned_embeds.bin"
56
+ os.makedirs(model_id,exist_ok = True)
57
+ if not os.path.exists(f"{model_id}/learned_embeds.bin"):
58
+ try:
59
+ wget.download(embeds_url, out=model_id)
60
+ except:
61
+ continue
62
+ token_identifier = f"https://huggingface.co/{model_id}/raw/main/token_identifier.txt"
63
+ response = requests.get(token_identifier)
64
+ token_name = response.text
65
+
66
+ concept_type = f"https://huggingface.co/{model_id}/raw/main/type_of_concept.txt"
67
+ response = requests.get(concept_type)
68
+ concept_name = response.text
69
+ model_content["concept_type"] = concept_name
70
+ images = []
71
+ for i in range(4):
72
+ url = f"https://huggingface.co/{model_id}/resolve/main/concept_images/{i}.jpeg"
73
+ image_download = requests.get(url)
74
+ url_code = image_download.status_code
75
+ if(url_code == 200):
76
+ file = open(f"{model_id}/{i}.jpeg", "wb") ## Creates the file for image
77
+ file.write(image_download.content) ## Saves file content
78
+ file.close()
79
+ images.append(f"{model_id}/{i}.jpeg")
80
+ model_content["images"] = images
81
+ #if token cannot be loaded, skip it
82
+ try:
83
+ learned_token = load_learned_embed_in_clip(f"{model_id}/learned_embeds.bin", pipe.text_encoder, pipe.tokenizer, token_name)
84
+ except:
85
+ continue
86
+ model_content["token"] = learned_token
87
+ models.append(model_content)
88
+
89
+ #@title Run the app to navigate around [the Library](https://huggingface.co/sd-concepts-library)
90
+ #@markdown Click the `Running on public URL:` result to run the Gradio app
91
+
92
+ SELECT_LABEL = "Select concept"
93
+ def assembleHTML(model):
94
+ html_gallery = ''
95
+ html_gallery = html_gallery+'''
96
+ <div class="flex gr-gap gr-form-gap row gap-4 w-full flex-wrap" id="main_row">
97
+ '''
98
+ cap = 0
99
+ for model in models:
100
+ html_gallery = html_gallery+f'''
101
+ <div class="gr-block gr-box relative w-full overflow-hidden border-solid border border-gray-200 gr-panel">
102
+ <div class="output-markdown gr-prose" style="max-width: 100%;">
103
+ <h3>
104
+ <a href="https://huggingface.co/{model["id"]}" target="_blank">
105
+ <code>{html.escape(model["token"])}</code>
106
+ </a>
107
+ </h3>
108
+ </div>
109
+ <div id="gallery" class="gr-block gr-box relative w-full overflow-hidden border-solid border border-gray-200">
110
+ <div class="wrap svelte-17ttdjv opacity-0"></div>
111
+ <div class="absolute left-0 top-0 py-1 px-2 rounded-br-lg shadow-sm text-xs text-gray-500 flex items-center pointer-events-none bg-white z-20 border-b border-r border-gray-100 dark:bg-gray-900">
112
+ <span class="mr-2 h-[12px] w-[12px] opacity-80">
113
+ <svg xmlns="http://www.w3.org/2000/svg" width="100%" height="100%" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round" class="feather feather-image">
114
+ <rect x="3" y="3" width="18" height="18" rx="2" ry="2"></rect>
115
+ <circle cx="8.5" cy="8.5" r="1.5"></circle>
116
+ <polyline points="21 15 16 10 5 21"></polyline>
117
+ </svg>
118
+ </span> {model["concept_type"]}
119
+ </div>
120
+ <div class="overflow-y-auto h-full p-2" style="position: relative;">
121
+ <div class="grid gap-2 grid-cols-2 sm:grid-cols-2 md:grid-cols-2 lg:grid-cols-2 xl:grid-cols-2 2xl:grid-cols-2 svelte-1g9btlg pt-6">
122
+ '''
123
+ for image in model["images"]:
124
+ html_gallery = html_gallery + f'''
125
+ <button class="gallery-item svelte-1g9btlg">
126
+ <img alt="" loading="lazy" class="h-full w-full overflow-hidden object-contain" src="file/{image}">
127
+ </button>
128
+ '''
129
+ html_gallery = html_gallery+'''
130
+ </div>
131
+ <iframe style="display: block; position: absolute; top: 0; left: 0; width: 100%; height: 100%; overflow: hidden; border: 0; opacity: 0; pointer-events: none; z-index: -1;" aria-hidden="true" tabindex="-1" src="about:blank"></iframe>
132
+ </div>
133
+ </div>
134
+ </div>
135
+ '''
136
+ cap += 1
137
+ if(cap == 99):
138
+ break
139
+ html_gallery = html_gallery+'''
140
+ </div>
141
+ '''
142
+ return html_gallery
143
+
144
+ def title_block(title, id):
145
+ return gr.Markdown(f"### [`{title}`](https://huggingface.co/{id})")
146
+
147
+ def image_block(image_list, concept_type):
148
+ return gr.Gallery(
149
+ label=concept_type, value=image_list, elem_id="gallery"
150
+ ).style(grid=[2], height="auto")
151
+
152
+ def checkbox_block():
153
+ checkbox = gr.Checkbox(label=SELECT_LABEL).style(container=False)
154
+ return checkbox
155
+
156
+ def infer(text):
157
+ #with autocast("cuda"):
158
+ images_list = pipe(
159
+ [text]*2,
160
+ num_inference_steps=50,
161
+ guidance_scale=7.5
162
  )
163
+ #output_images = []
164
+ #for i, image in enumerate(images_list.images):
165
+ # output_images.append(image)
166
+ return images_list.images, gr.update(visible=True), gr.update(visible=True), gr.update(visible=True)
167
+
168
+ # idetnical to `infer` function without gradio state updates for share btn
169
+ def infer_examples(text):
170
+ #with autocast("cuda"):
171
+ images_list = pipe(
172
+ [text]*2,
173
+ num_inference_steps=50,
174
+ guidance_scale=7.5
175
  )
176
+ #output_images = []
177
+ #for i, image in enumerate(images_list["sample"]):
178
+ # output_images.append(image)
179
+ return images_list.images
180
+
181
+ css = '''
182
+ .gradio-container {font-family: 'IBM Plex Sans', sans-serif}
183
+ #top_title{margin-bottom: .5em}
184
+ #top_title h2{margin-bottom: 0; text-align: center}
185
+ /*#main_row{flex-wrap: wrap; gap: 1em; max-height: 550px; overflow-y: scroll; flex-direction: row}*/
186
+ #component-3{height: 760px; overflow: auto}
187
+ #component-9{position: sticky;top: 0;align-self: flex-start;}
188
+ @media (min-width: 768px){#main_row > div{flex: 1 1 32%; margin-left: 0 !important}}
189
+ .gr-prose code::before, .gr-prose code::after {content: "" !important}
190
+ ::-webkit-scrollbar {width: 10px}
191
+ ::-webkit-scrollbar-track {background: #f1f1f1}
192
+ ::-webkit-scrollbar-thumb {background: #888}
193
+ ::-webkit-scrollbar-thumb:hover {background: #555}
194
+ .gr-button {white-space: nowrap}
195
+ .gr-button:focus {
196
+ border-color: rgb(147 197 253 / var(--tw-border-opacity));
197
+ outline: none;
198
+ box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
199
+ --tw-border-opacity: 1;
200
+ --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
201
+ --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
202
+ --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
203
+ --tw-ring-opacity: .5;
204
+ }
205
+ #prompt_input{flex: 1 3 auto; width: auto !important;}
206
+ #prompt_area{margin-bottom: .75em}
207
+ #prompt_area > div:first-child{flex: 1 3 auto}
208
+ .animate-spin {
209
+ animation: spin 1s linear infinite;
210
+ }
211
+ @keyframes spin {
212
+ from {
213
+ transform: rotate(0deg);
214
+ }
215
+ to {
216
+ transform: rotate(360deg);
217
+ }
218
+ }
219
+ #share-btn-container {
220
+ display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
221
+ }
222
+ #share-btn {
223
+ all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;
224
+ }
225
+ #share-btn * {
226
+ all: unset;
227
+ }
228
+ '''
229
+ examples = ["a <cat-toy> in <madhubani-art> style", "a <line-art> style mecha robot", "a piano being played by <bonzi>", "Candid photo of <cheburashka>, high resolution photo, trending on artstation, interior design"]
230
+
231
+ with gr.Blocks(css=css) as demo:
232
+ state = gr.Variable({
233
+ 'selected': -1
234
+ })
235
+ state = {}
236
+ def update_state(i):
237
+ global checkbox_states
238
+ if(checkbox_states[i]):
239
+ checkbox_states[i] = False
240
+ state[i] = False
241
+ else:
242
+ state[i] = True
243
+ checkbox_states[i] = True
244
+ gr.HTML('''
245
+ <div style="text-align: center; max-width: 720px; margin: 0 auto;">
246
+ <div
247
+ style="
248
+ display: inline-flex;
249
+ align-items: center;
250
+ gap: 0.8rem;
251
+ font-size: 1.75rem;
252
+ "
253
+ >
254
+ <svg
255
+ width="0.65em"
256
+ height="0.65em"
257
+ viewBox="0 0 115 115"
258
+ fill="none"
259
+ xmlns="http://www.w3.org/2000/svg"
260
+ >
261
+ <rect width="23" height="23" fill="white"></rect>
262
+ <rect y="69" width="23" height="23" fill="white"></rect>
263
+ <rect x="23" width="23" height="23" fill="#AEAEAE"></rect>
264
+ <rect x="23" y="69" width="23" height="23" fill="#AEAEAE"></rect>
265
+ <rect x="46" width="23" height="23" fill="white"></rect>
266
+ <rect x="46" y="69" width="23" height="23" fill="white"></rect>
267
+ <rect x="69" width="23" height="23" fill="black"></rect>
268
+ <rect x="69" y="69" width="23" height="23" fill="black"></rect>
269
+ <rect x="92" width="23" height="23" fill="#D9D9D9"></rect>
270
+ <rect x="92" y="69" width="23" height="23" fill="#AEAEAE"></rect>
271
+ <rect x="115" y="46" width="23" height="23" fill="white"></rect>
272
+ <rect x="115" y="115" width="23" height="23" fill="white"></rect>
273
+ <rect x="115" y="69" width="23" height="23" fill="#D9D9D9"></rect>
274
+ <rect x="92" y="46" width="23" height="23" fill="#AEAEAE"></rect>
275
+ <rect x="92" y="115" width="23" height="23" fill="#AEAEAE"></rect>
276
+ <rect x="92" y="69" width="23" height="23" fill="white"></rect>
277
+ <rect x="69" y="46" width="23" height="23" fill="white"></rect>
278
+ <rect x="69" y="115" width="23" height="23" fill="white"></rect>
279
+ <rect x="69" y="69" width="23" height="23" fill="#D9D9D9"></rect>
280
+ <rect x="46" y="46" width="23" height="23" fill="black"></rect>
281
+ <rect x="46" y="115" width="23" height="23" fill="black"></rect>
282
+ <rect x="46" y="69" width="23" height="23" fill="black"></rect>
283
+ <rect x="23" y="46" width="23" height="23" fill="#D9D9D9"></rect>
284
+ <rect x="23" y="115" width="23" height="23" fill="#AEAEAE"></rect>
285
+ <rect x="23" y="69" width="23" height="23" fill="black"></rect>
286
+ </svg>
287
+ <h1 style="font-weight: 900; margin-bottom: 7px;">
288
+ Stable Diffusion Conceptualizer
289
+ </h1>
290
+ </div>
291
+ <p style="margin-bottom: 10px; font-size: 94%">
292
+ Navigate through community created concepts and styles via Stable Diffusion Textual Inversion and pick yours for inference.
293
+ To train your own concepts and contribute to the library <a style="text-decoration: underline" href="https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb">check out this notebook</a>.
294
+ </p>
295
+ </div>
296
+ ''')
297
+ with gr.Row():
298
+ with gr.Column():
299
+ gr.Markdown(f"### Navigate the top 100 Textual-Inversion community trained concepts. Use 600+ from [The Library](https://huggingface.co/sd-concepts-library)")
300
+ with gr.Row():
301
+ image_blocks = []
302
+ #for i, model in enumerate(models):
303
+ with gr.Box().style(border=None):
304
+ gr.HTML(assembleHTML(models))
305
+ #title_block(model["token"], model["id"])
306
+ #image_blocks.append(image_block(model["images"], model["concept_type"]))
307
+ with gr.Column():
308
+ with gr.Box():
309
+ with gr.Row(elem_id="prompt_area").style(mobile_collapse=False, equal_height=True):
310
+ text = gr.Textbox(
311
+ label="Enter your prompt", placeholder="Enter your prompt", show_label=False, max_lines=1, elem_id="prompt_input"
312
+ ).style(
313
+ border=(True, False, True, True),
314
+ rounded=(True, False, False, True),
315
+ container=False,
316
+ )
317
+ btn = gr.Button("Run",elem_id="run_btn").style(
318
+ margin=False,
319
+ rounded=(False, True, True, False),
320
+ )
321
+ with gr.Row().style():
322
+ infer_outputs = gr.Gallery(show_label=False, elem_id="generated-gallery").style(grid=[2], height="512px")
323
+ with gr.Row():
324
+ gr.HTML("<p style=\"font-size: 95%;margin-top: .75em\">Prompting may not work as you are used to. <code>objects</code> may need the concept added at the end, <code>styles</code> may work better at the beginning. You can navigate on <a href='https://lexica.art'>lexica.art</a> to get inspired on prompts</p>")
325
+ with gr.Row():
326
+ gr.Examples(examples=examples, fn=infer_examples, inputs=[text], outputs=infer_outputs, cache_examples=True)
327
+ with gr.Group(elem_id="share-btn-container"):
328
+ community_icon = gr.HTML(community_icon_html, visible=False)
329
+ loading_icon = gr.HTML(loading_icon_html, visible=False)
330
+ share_button = gr.Button("Share to community", elem_id="share-btn", visible=False)
331
+ checkbox_states = {}
332
+ inputs = [text]
333
+ btn.click(
334
+ infer,
335
+ inputs=inputs,
336
+ outputs=[infer_outputs, community_icon, loading_icon, share_button]
337
+ )
338
+ share_button.click(
339
+ None,
340
+ [],
341
+ [],
342
+ _js=share_js,
343
+ )
344
+ demo.queue(max_size=20).launch()