prithivMLmods commited on
Commit
f191662
·
verified ·
1 Parent(s): 1da3a54

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +62 -24
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import os
2
  import random
3
  import uuid
@@ -13,7 +14,7 @@ DESCRIPTIONx = """## REALVISXL V5 🦉
13
  """
14
 
15
  css = '''
16
- .gradio-container{max-width: 550px !important}
17
  h1{text-align:center}
18
  footer {
19
  visibility: hidden
@@ -25,12 +26,28 @@ examples = [
25
  "Cold coffee in a cup bokeh --ar 85:128 --v 6.0 --style raw5, 4K",
26
  ]
27
 
28
- MODEL_ID = os.getenv("MODEL_VAL_PATH", "SG161222/RealVisXL_V4.0_Lightning")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
30
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
31
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
32
  BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1")) # Allow generating multiple images at once
33
 
 
34
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
35
  pipe = StableDiffusionXLPipeline.from_pretrained(
36
  MODEL_ID,
@@ -44,6 +61,7 @@ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.conf
44
  if USE_TORCH_COMPILE:
45
  pipe.compile()
46
 
 
47
  if ENABLE_CPU_OFFLOAD:
48
  pipe.enable_model_cpu_offload()
49
 
@@ -69,7 +87,7 @@ def set_wallpaper_size(size):
69
  elif size == "Headers (1080x512)":
70
  return 1080, 512
71
  else:
72
- return 1024, 1024 # Default return if none of the conditions are met
73
 
74
  @spaces.GPU(duration=60, enable_queue=True)
75
  def generate(
@@ -77,6 +95,8 @@ def generate(
77
  negative_prompt: str = "",
78
  use_negative_prompt: bool = False,
79
  seed: int = 1,
 
 
80
  wallpaper_size: str = "Default (1024x1024)",
81
  guidance_scale: float = 3,
82
  num_inference_steps: int = 25,
@@ -87,9 +107,9 @@ def generate(
87
  ):
88
  seed = int(randomize_seed_fn(seed, randomize_seed))
89
  generator = torch.Generator(device=device).manual_seed(seed)
90
-
91
  width, height = set_wallpaper_size(wallpaper_size)
92
 
 
93
  options = {
94
  "prompt": [prompt] * num_images,
95
  "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
@@ -100,10 +120,12 @@ def generate(
100
  "generator": generator,
101
  "output_type": "pil",
102
  }
103
-
 
104
  if use_resolution_binning:
105
  options["use_resolution_binning"] = True
106
 
 
107
  images = []
108
  for i in range(0, num_images, BATCH_SIZE):
109
  batch_options = options.copy()
@@ -116,20 +138,26 @@ def generate(
116
  return image_paths, seed
117
 
118
  with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
119
-
120
  gr.Markdown(DESCRIPTIONx)
121
- with gr.Row():
122
- prompt = gr.Text(
123
- label="Prompt",
124
- show_label=False,
125
- max_lines=1,
126
- placeholder="Enter your prompt",
127
- container=False,
128
- )
129
- run_button = gr.Button("Run ⚡", scale=0)
130
- result = gr.Gallery(label="Result", columns=1, show_label=False)
131
-
132
- with gr.Accordion("Advanced options", open=False, visible=True):
 
 
 
 
 
 
 
133
  num_images = gr.Slider(
134
  label="Number of Images",
135
  minimum=1,
@@ -156,10 +184,19 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
156
  )
157
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
158
  with gr.Row(visible=True):
159
- wallpaper_size = gr.Radio(
160
- choices=["Mobile (1080x1920)", "Desktop (1920x1080)", "Extented (1920x512)", "Headers (1080x512)", "Default (1024x1024)"],
161
- label="Pixel Size(x*y)",
162
- value="Default (1024x1024)"
 
 
 
 
 
 
 
 
 
163
  )
164
  with gr.Row():
165
  guidance_scale = gr.Slider(
@@ -201,7 +238,8 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
201
  negative_prompt,
202
  use_negative_prompt,
203
  seed,
204
- wallpaper_size,
 
205
  guidance_scale,
206
  num_inference_steps,
207
  randomize_seed,
@@ -210,6 +248,6 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
210
  outputs=[result, seed],
211
  api_name="run",
212
  )
213
-
214
  if __name__ == "__main__":
215
  demo.queue(max_size=40).launch()
 
1
+ # ...
2
  import os
3
  import random
4
  import uuid
 
14
  """
15
 
16
  css = '''
17
+ .gradio-container{max-width: 560px !important}
18
  h1{text-align:center}
19
  footer {
20
  visibility: hidden
 
26
  "Cold coffee in a cup bokeh --ar 85:128 --v 6.0 --style raw5, 4K",
27
  ]
28
 
29
+
30
+ #examples = [
31
+ # ["file/1.png", "3d image, cute girl, in the style of Pixar --ar 1:2 --stylize 750, 4K resolution highlights, Sharp focus, octane render, ray tracing, Ultra-High-Definition, 8k, UHD, HDR, (Masterpiece:1.5), (best quality:1.5)"],
32
+ # ["file/2.png", "Cold coffee in a cup bokeh --ar 85:128 --v 6.0 --style raw5, 4K"],
33
+ #["file/3.png", "Vector illustration of a horse, vector graphic design with flat colors on a brown background in the style of vector art, using simple shapes and graphics with simple details, professionally designed as a tshirt logo ready for print on a white background. --ar 89:82 --v 6.0 --style raw"],
34
+ #["file/4.png", "Man in brown leather jacket posing for the camera, in the style of sleek and stylized, clockpunk, subtle shades, exacting precision, ferrania p30 --ar 67:101 --v 5"],
35
+ #["file/5.png", "Commercial photography, giant burger, white lighting, studio light, 8k octane rendering, high resolution photography, insanely detailed, fine details, on a white isolated plain, 8k, commercial photography, stock photo, professional color grading, --v 4 --ar 9:16"]
36
+ #]
37
+
38
+
39
+ #Set an os.Getenv variable
40
+ #set VAR_NAME=”VALUE”
41
+ #Fetch an environment variable
42
+ #echo %VAR_NAME%
43
+
44
+ MODEL_ID = os.getenv("MODEL_VAL_PATH") #Use SDXL Model as "MODEL_REPO" --------->>> ”VALUE”.
45
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
46
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
47
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
48
  BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1")) # Allow generating multiple images at once
49
 
50
+ #Load model outside of function
51
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
52
  pipe = StableDiffusionXLPipeline.from_pretrained(
53
  MODEL_ID,
 
61
  if USE_TORCH_COMPILE:
62
  pipe.compile()
63
 
64
+ # Offloading capacity (RAM)
65
  if ENABLE_CPU_OFFLOAD:
66
  pipe.enable_model_cpu_offload()
67
 
 
87
  elif size == "Headers (1080x512)":
88
  return 1080, 512
89
  else:
90
+ return 1024, 1024
91
 
92
  @spaces.GPU(duration=60, enable_queue=True)
93
  def generate(
 
95
  negative_prompt: str = "",
96
  use_negative_prompt: bool = False,
97
  seed: int = 1,
98
+ width: int = 1024,
99
+ height: int = 1024,
100
  wallpaper_size: str = "Default (1024x1024)",
101
  guidance_scale: float = 3,
102
  num_inference_steps: int = 25,
 
107
  ):
108
  seed = int(randomize_seed_fn(seed, randomize_seed))
109
  generator = torch.Generator(device=device).manual_seed(seed)
 
110
  width, height = set_wallpaper_size(wallpaper_size)
111
 
112
+
113
  options = {
114
  "prompt": [prompt] * num_images,
115
  "negative_prompt": [negative_prompt] * num_images if use_negative_prompt else None,
 
120
  "generator": generator,
121
  "output_type": "pil",
122
  }
123
+
124
+ #VRAM usage Lesser
125
  if use_resolution_binning:
126
  options["use_resolution_binning"] = True
127
 
128
+ #Images potential batches
129
  images = []
130
  for i in range(0, num_images, BATCH_SIZE):
131
  batch_options = options.copy()
 
138
  return image_paths, seed
139
 
140
  with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
 
141
  gr.Markdown(DESCRIPTIONx)
142
+ with gr.Group():
143
+ with gr.Row():
144
+ prompt = gr.Text(
145
+ label="Prompt",
146
+ show_label=False,
147
+ max_lines=1,
148
+ placeholder="Enter your prompt",
149
+ container=False,
150
+ )
151
+ run_button = gr.Button("Run", scale=0)
152
+ result = gr.Gallery(label="Result", columns=1, show_label=False)
153
+
154
+ with gr.Row(visible=True):
155
+ wallpaper_size = gr.Radio(
156
+ choices=["Mobile (1080x1920)", "Desktop (1920x1080)", "Extented (1920x512)", "Headers (1080x512)", "Default (1024x1024)"],
157
+ label="Pixel Size(x*y)",
158
+ value="Default (1024x1024)"
159
+ )
160
+ with gr.Accordion("Advanced options", open=False, visible=False):
161
  num_images = gr.Slider(
162
  label="Number of Images",
163
  minimum=1,
 
184
  )
185
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
186
  with gr.Row(visible=True):
187
+ width = gr.Slider(
188
+ label="Width",
189
+ minimum=512,
190
+ maximum=MAX_IMAGE_SIZE,
191
+ step=64,
192
+ value=1024,
193
+ )
194
+ height = gr.Slider(
195
+ label="Height",
196
+ minimum=512,
197
+ maximum=MAX_IMAGE_SIZE,
198
+ step=64,
199
+ value=1024,
200
  )
201
  with gr.Row():
202
  guidance_scale = gr.Slider(
 
238
  negative_prompt,
239
  use_negative_prompt,
240
  seed,
241
+ width,
242
+ height,
243
  guidance_scale,
244
  num_inference_steps,
245
  randomize_seed,
 
248
  outputs=[result, seed],
249
  api_name="run",
250
  )
251
+
252
  if __name__ == "__main__":
253
  demo.queue(max_size=40).launch()