prithivMLmods commited on
Commit
ddfc2a2
·
verified ·
1 Parent(s): 2c3f177

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +108 -81
app.py CHANGED
@@ -33,63 +33,81 @@ trigger_word = "Super Realism" # Leave blank if not used
33
  pipe.load_lora_weights(lora_repo)
34
  pipe.to("cuda")
35
 
36
- # Define style options
37
  style_list = [
38
  {
39
  "name": "3840 x 2160",
40
  "prompt": "hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
 
41
  },
42
  {
43
  "name": "2560 x 1440",
44
  "prompt": "hyper-realistic 4K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
 
45
  },
46
  {
47
  "name": "HD+",
48
  "prompt": "hyper-realistic 2K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
 
49
  },
50
  {
51
  "name": "Style Zero",
52
  "prompt": "{prompt}",
 
53
  },
54
  ]
55
 
56
- styles = {k["name"]: k["prompt"] for k in style_list}
57
  DEFAULT_STYLE_NAME = "3840 x 2160"
58
  STYLE_NAMES = list(styles.keys())
59
 
60
  # Apply selected style to the prompt
61
- def apply_style(style_name: str, positive: str) -> str:
62
- return styles.get(style_name, styles[DEFAULT_STYLE_NAME]).replace("{prompt}", positive)
 
63
 
64
  # Image generation function with Spaces GPU support
65
  @spaces.GPU(duration=60, enable_queue=True)
66
  def generate(
67
  prompt: str,
 
 
68
  seed: int = 0,
69
  width: int = 1024,
70
  height: int = 1024,
71
  guidance_scale: float = 3,
72
  randomize_seed: bool = False,
73
  style_name: str = DEFAULT_STYLE_NAME,
 
74
  progress=gr.Progress(track_tqdm=True),
75
  ):
76
- seed = int(randomize_seed_fn(seed, randomize_seed))
77
- positive_prompt = apply_style(style_name, prompt)
 
 
 
 
 
 
78
 
79
  if trigger_word:
80
  positive_prompt = f"{trigger_word} {positive_prompt}"
81
-
 
 
 
82
  images = pipe(
83
  prompt=positive_prompt,
 
84
  width=width,
85
  height=height,
86
  guidance_scale=guidance_scale,
87
- num_inference_steps=30,
88
  num_images_per_prompt=1,
 
89
  output_type="pil",
90
  ).images
91
  image_paths = [save_image(img) for img in images]
92
- print(image_paths)
93
  return image_paths, seed
94
 
95
  # Example prompts
@@ -122,81 +140,87 @@ footer {
122
  '''
123
 
124
  # Gradio interface
125
- with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
126
  with gr.Row():
127
- with gr.Column(scale=1):
128
- prompt = gr.Text(
129
- label="Prompt",
130
- show_label=False,
131
- max_lines=1,
132
- placeholder="Enter your prompt",
133
- container=False,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134
  )
135
- run_button = gr.Button("Generated as 3:1 Images", scale=0, elem_classes="submit-btn")
136
-
137
- with gr.Accordion("Advanced options", open=True, visible=True):
138
- seed = gr.Slider(
139
- label="Seed",
140
- minimum=0,
141
- maximum=MAX_SEED,
142
- step=1,
143
- value=0,
144
- visible=True
145
- )
146
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
147
-
148
- with gr.Row(visible=True):
149
- width = gr.Slider(
150
- label="Width",
151
- minimum=512,
152
- maximum=2048,
153
- step=64,
154
- value=1280,
155
- )
156
- height = gr.Slider(
157
- label="Height",
158
- minimum=512,
159
- maximum=2048,
160
- step=64,
161
- value=832,
162
- )
163
-
164
- with gr.Row():
165
- guidance_scale = gr.Slider(
166
- label="Guidance Scale",
167
- minimum=0.1,
168
- maximum=20.0,
169
- step=0.1,
170
- value=3.0,
171
- )
172
- num_inference_steps = gr.Slider(
173
- label="Number of inference steps",
174
- minimum=1,
175
- maximum=40,
176
- step=1,
177
- value=30,
178
- )
179
-
180
- style_selection = gr.Radio(
181
- show_label=True,
182
- container=True,
183
- interactive=True,
184
- choices=STYLE_NAMES,
185
- value=DEFAULT_STYLE_NAME,
186
- label="Quality Style",
187
- )
188
-
189
- with gr.Column(scale=2):
190
- result = gr.Gallery(label="Result", columns=1, show_label=False)
191
-
192
- gr.Examples(
193
- examples=examples,
194
- inputs=prompt,
195
- outputs=[result, seed],
196
- fn=generate,
197
- cache_examples=False,
198
  )
199
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
  gr.on(
201
  triggers=[
202
  prompt.submit,
@@ -205,12 +229,15 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
205
  fn=generate,
206
  inputs=[
207
  prompt,
 
 
208
  seed,
209
  width,
210
  height,
211
  guidance_scale,
212
  randomize_seed,
213
  style_selection,
 
214
  ],
215
  outputs=[result, seed],
216
  api_name="run",
 
33
  pipe.load_lora_weights(lora_repo)
34
  pipe.to("cuda")
35
 
36
+ # Define style options with negative prompts
37
  style_list = [
38
  {
39
  "name": "3840 x 2160",
40
  "prompt": "hyper-realistic 8K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
41
+ "negative_prompt": "",
42
  },
43
  {
44
  "name": "2560 x 1440",
45
  "prompt": "hyper-realistic 4K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
46
+ "negative_prompt": "",
47
  },
48
  {
49
  "name": "HD+",
50
  "prompt": "hyper-realistic 2K image of {prompt}. ultra-detailed, lifelike, high-resolution, sharp, vibrant colors, photorealistic",
51
+ "negative_prompt": "",
52
  },
53
  {
54
  "name": "Style Zero",
55
  "prompt": "{prompt}",
56
+ "negative_prompt": "",
57
  },
58
  ]
59
 
60
+ styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
61
  DEFAULT_STYLE_NAME = "3840 x 2160"
62
  STYLE_NAMES = list(styles.keys())
63
 
64
  # Apply selected style to the prompt
65
+ def apply_style(style_name: str, positive: str) -> Tuple[str, str]:
66
+ p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
67
+ return p.replace("{prompt}", positive), n
68
 
69
  # Image generation function with Spaces GPU support
70
  @spaces.GPU(duration=60, enable_queue=True)
71
  def generate(
72
  prompt: str,
73
+ negative_prompt: str = "",
74
+ use_negative_prompt: bool = False,
75
  seed: int = 0,
76
  width: int = 1024,
77
  height: int = 1024,
78
  guidance_scale: float = 3,
79
  randomize_seed: bool = False,
80
  style_name: str = DEFAULT_STYLE_NAME,
81
+ num_inference_steps: int = 30,
82
  progress=gr.Progress(track_tqdm=True),
83
  ):
84
+ positive_prompt, style_negative_prompt = apply_style(style_name, prompt)
85
+
86
+ if use_negative_prompt:
87
+ final_negative_prompt = style_negative_prompt + " " + negative_prompt
88
+ else:
89
+ final_negative_prompt = style_negative_prompt
90
+
91
+ final_negative_prompt = final_negative_prompt.strip()
92
 
93
  if trigger_word:
94
  positive_prompt = f"{trigger_word} {positive_prompt}"
95
+
96
+ seed = int(randomize_seed_fn(seed, randomize_seed))
97
+ generator = torch.Generator(device="cuda").manual_seed(seed)
98
+
99
  images = pipe(
100
  prompt=positive_prompt,
101
+ negative_prompt=final_negative_prompt if final_negative_prompt else None,
102
  width=width,
103
  height=height,
104
  guidance_scale=guidance_scale,
105
+ num_inference_steps=num_inference_steps,
106
  num_images_per_prompt=1,
107
+ generator=generator,
108
  output_type="pil",
109
  ).images
110
  image_paths = [save_image(img) for img in images]
 
111
  return image_paths, seed
112
 
113
  # Example prompts
 
140
  '''
141
 
142
  # Gradio interface
143
+ with gr.Blocks(css=css) as demo:
144
  with gr.Row():
145
+ prompt = gr.Text(
146
+ label="Prompt",
147
+ show_label=False,
148
+ max_lines=1,
149
+ placeholder="Enter your prompt",
150
+ container=False,
151
+ )
152
+ run_button = gr.Button("Run", scale=0, elem_classes="submit-btn")
153
+ result = gr.Gallery(label="Result", columns=1, show_label=False)
154
+
155
+ with gr.Accordion("Advanced options", open=False):
156
+ style_selection = gr.Dropdown(
157
+ label="Quality Style",
158
+ choices=STYLE_NAMES,
159
+ value=DEFAULT_STYLE_NAME,
160
+ interactive=True,
161
+ )
162
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
163
+ negative_prompt = gr.Text(
164
+ label="Negative prompt",
165
+ max_lines=1,
166
+ placeholder="Enter a negative prompt",
167
+ visible=False,
168
+ )
169
+ seed = gr.Slider(
170
+ label="Seed",
171
+ minimum=0,
172
+ maximum=MAX_SEED,
173
+ step=1,
174
+ value=0,
175
+ )
176
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
177
+ with gr.Row():
178
+ width = gr.Slider(
179
+ label="Width",
180
+ minimum=512,
181
+ maximum=2048,
182
+ step=64,
183
+ value=1280,
184
  )
185
+ height = gr.Slider(
186
+ label="Height",
187
+ minimum=512,
188
+ maximum=2048,
189
+ step=64,
190
+ value=832,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191
  )
192
+ guidance_scale = gr.Slider(
193
+ label="Guidance Scale",
194
+ minimum=0.1,
195
+ maximum=20.0,
196
+ step=0.1,
197
+ value=3.0,
198
+ )
199
+ num_inference_steps = gr.Slider(
200
+ label="Number of inference steps",
201
+ minimum=1,
202
+ maximum=40,
203
+ step=1,
204
+ value=30,
205
+ )
206
+
207
+ gr.Examples(
208
+ examples=examples,
209
+ inputs=prompt,
210
+ outputs=[result, seed],
211
+ fn=generate,
212
+ cache_examples=False,
213
+ )
214
+
215
+ # Handle visibility of negative prompt
216
+ use_negative_prompt.change(
217
+ fn=lambda x: gr.update(visible=x),
218
+ inputs=use_negative_prompt,
219
+ outputs=negative_prompt,
220
+ api_name=False,
221
+ )
222
+
223
+ # Trigger generate on prompt submit or run button click
224
  gr.on(
225
  triggers=[
226
  prompt.submit,
 
229
  fn=generate,
230
  inputs=[
231
  prompt,
232
+ negative_prompt,
233
+ use_negative_prompt,
234
  seed,
235
  width,
236
  height,
237
  guidance_scale,
238
  randomize_seed,
239
  style_selection,
240
+ num_inference_steps,
241
  ],
242
  outputs=[result, seed],
243
  api_name="run",