tombetthauser commited on
Commit
323d463
Β·
1 Parent(s): 69aef84

Update app.py

Browse files

Comment out unnecessary sections and dependencies from colab notebook version

Files changed (1) hide show
  1. app.py +47 -43
app.py CHANGED
@@ -17,10 +17,9 @@ import gradio
17
  import torch
18
  import os
19
 
20
- # UNDER CONSTRUCTION (START)
21
  import subprocess
22
- # UNDER CONSTRUCTION (END)
23
-
24
 
25
  # FOR DEPLOYMENT: uncomment these and delete the notebook_login() below
26
  api_key = os.environ['api_key']
@@ -29,23 +28,25 @@ my_token = api_key
29
  # from huggingface_hub import notebook_login
30
  # notebook_login()
31
 
32
- import PIL
33
- from PIL import Image
 
34
 
35
- def image_grid(imgs, rows, cols):
36
- assert len(imgs) == rows*cols
37
 
38
- w, h = imgs[0].size
39
- grid = Image.new('RGB', size=(cols*w, rows*h))
40
- grid_w, grid_h = grid.size
41
 
42
- for i, img in enumerate(imgs):
43
- grid.paste(img, box=(i%cols*w, i//cols*h))
44
- return grid
 
45
 
46
  pretrained_model_name_or_path = "stabilityai/stable-diffusion-2"
47
 
48
- from IPython.display import Markdown
49
  from huggingface_hub import hf_hub_download
50
 
51
 
@@ -86,10 +87,10 @@ for repo_id_embeds in models_to_load:
86
  #!cp downloaded_embedding_folder
87
  #!cp downloaded_embedding_folder
88
 
89
- # UNDER CONSTRUCTION (START)
90
  subprocess.call([f"cp {embeds_path} {downloaded_embedding_folder}"])
91
  subprocess.call([f"cp {token_path} {downloaded_embedding_folder}"])
92
- # UNDER CONSTRUCTION (END)
93
 
94
  with open(f'{downloaded_embedding_folder}/token_identifier.txt', 'r') as file:
95
  placeholder_token_string = file.read()
@@ -97,9 +98,9 @@ for repo_id_embeds in models_to_load:
97
  # FOR DEPLOYMENT: address file system use
98
  #!wget -q -O $downloaded_embedding_folder/learned_embeds.bin $embeds_url
99
 
100
- # UNDER CONSTRUCTION (START)
101
  subprocess.call([f"wget -q -O {downloaded_embedding_folder}/learned_embeds.bin {embeds_url}"])
102
- # UNDER CONSTRUCTION (END)
103
 
104
  learned_embeds_path = f"{downloaded_embedding_folder}/learned_embeds.bin"
105
 
@@ -160,41 +161,44 @@ for repo_id_embeds in models_to_load:
160
 
161
  #@title 4. Print Available Concept Strings
162
 
163
- print("AVAILABLE CONCEPTS TO SELECT FROM")
164
- print("copy one and paste below under 'model'")
165
- print("------------------------------------------------------")
166
- # list(completed_concept_pipes)
167
- for model in completed_concept_pipes:
168
- print(f"{model}")
169
-
 
170
 
171
  #@title 5. Optionally Test without Gradio
172
 
173
- model = "" #@param {type: "string"}
174
- prompt = "" #@param {type:"string"}
 
175
 
176
- if prompt and model:
177
- if model not in completed_concept_pipes:
178
- raise ValueError("Invalid Model Name")
179
 
180
- model_token = model.split("/")[1]
181
- prompt = f"{prompt} in the style of <{model_token}>"
182
 
183
- if model == "sd-concepts-library/ahx-model-5":
184
- prompt = f"{prompt} in the style of "
185
 
186
- num_samples = 1
187
- num_rows = 1
188
 
189
- all_images = []
190
- pipe = completed_concept_pipes[model]
191
 
192
- for _ in range(num_rows):
193
- images = pipe(prompt, num_images_per_prompt=num_samples, height=512, width=512, num_inference_steps=30, guidance_scale=7.5).images
194
- all_images.extend(images)
195
 
196
- grid = image_grid(all_images, num_samples, num_rows)
197
- grid
 
198
 
199
 
200
  #@title 6. Define Custom CSS for Gradio
 
17
  import torch
18
  import os
19
 
20
+ # UNDER CONSTRUCTION ---{{{
21
  import subprocess
22
+ # }}}---
 
23
 
24
  # FOR DEPLOYMENT: uncomment these and delete the notebook_login() below
25
  api_key = os.environ['api_key']
 
28
  # from huggingface_hub import notebook_login
29
  # notebook_login()
30
 
31
+ # NOT NEEDED FOR DEPLOYMENT ---{{{
32
+ # import PIL
33
+ # from PIL import Image
34
 
35
+ # def image_grid(imgs, rows, cols):
36
+ # assert len(imgs) == rows*cols
37
 
38
+ # w, h = imgs[0].size
39
+ # grid = Image.new('RGB', size=(cols*w, rows*h))
40
+ # grid_w, grid_h = grid.size
41
 
42
+ # for i, img in enumerate(imgs):
43
+ # grid.paste(img, box=(i%cols*w, i//cols*h))
44
+ # return grid
45
+ # }}}---
46
 
47
  pretrained_model_name_or_path = "stabilityai/stable-diffusion-2"
48
 
49
+ # from IPython.display import Markdown
50
  from huggingface_hub import hf_hub_download
51
 
52
 
 
87
  #!cp downloaded_embedding_folder
88
  #!cp downloaded_embedding_folder
89
 
90
+ # UNDER CONSTRUCTION ---{{{
91
  subprocess.call([f"cp {embeds_path} {downloaded_embedding_folder}"])
92
  subprocess.call([f"cp {token_path} {downloaded_embedding_folder}"])
93
+ # }}}---
94
 
95
  with open(f'{downloaded_embedding_folder}/token_identifier.txt', 'r') as file:
96
  placeholder_token_string = file.read()
 
98
  # FOR DEPLOYMENT: address file system use
99
  #!wget -q -O $downloaded_embedding_folder/learned_embeds.bin $embeds_url
100
 
101
+ # UNDER CONSTRUCTION ---{{{
102
  subprocess.call([f"wget -q -O {downloaded_embedding_folder}/learned_embeds.bin {embeds_url}"])
103
+ # }}}---
104
 
105
  learned_embeds_path = f"{downloaded_embedding_folder}/learned_embeds.bin"
106
 
 
161
 
162
  #@title 4. Print Available Concept Strings
163
 
164
+ # NOT NEEDED FOR DEPLOYMENT ---{{{
165
+ # print("AVAILABLE CONCEPTS TO SELECT FROM")
166
+ # print("copy one and paste below under 'model'")
167
+ # print("------------------------------------------------------")
168
+ # # list(completed_concept_pipes)
169
+ # for model in completed_concept_pipes:
170
+ # print(f"{model}")
171
+ # }}}---
172
 
173
  #@title 5. Optionally Test without Gradio
174
 
175
+ # NOT NEEDED FOR DEPLOYMENT ---{{{
176
+ # model = "" #@param {type: "string"}
177
+ # prompt = "" #@param {type:"string"}
178
 
179
+ # if prompt and model:
180
+ # if model not in completed_concept_pipes:
181
+ # raise ValueError("Invalid Model Name")
182
 
183
+ # model_token = model.split("/")[1]
184
+ # prompt = f"{prompt} in the style of <{model_token}>"
185
 
186
+ # if model == "sd-concepts-library/ahx-model-5":
187
+ # prompt = f"{prompt} in the style of "
188
 
189
+ # num_samples = 1
190
+ # num_rows = 1
191
 
192
+ # all_images = []
193
+ # pipe = completed_concept_pipes[model]
194
 
195
+ # for _ in range(num_rows):
196
+ # images = pipe(prompt, num_images_per_prompt=num_samples, height=512, width=512, num_inference_steps=30, guidance_scale=7.5).images
197
+ # all_images.extend(images)
198
 
199
+ # grid = image_grid(all_images, num_samples, num_rows)
200
+ # grid
201
+ # }}}---
202
 
203
 
204
  #@title 6. Define Custom CSS for Gradio