prithivMLmods commited on
Commit
1da3a54
·
verified ·
1 Parent(s): 9a0d412

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -14
app.py CHANGED
@@ -29,7 +29,7 @@ MODEL_ID = os.getenv("MODEL_VAL_PATH", "SG161222/RealVisXL_V4.0_Lightning")
29
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
30
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
31
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
32
- BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1"))
33
 
34
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
35
  pipe = StableDiffusionXLPipeline.from_pretrained(
@@ -64,7 +64,7 @@ def set_wallpaper_size(size):
64
  return 1080, 1920
65
  elif size == "Desktop (1920x1080)":
66
  return 1920, 1080
67
- elif size == "Extended (1920x512)":
68
  return 1920, 512
69
  elif size == "Headers (1080x512)":
70
  return 1080, 512
@@ -77,12 +77,12 @@ def generate(
77
  negative_prompt: str = "",
78
  use_negative_prompt: bool = False,
79
  seed: int = 1,
 
80
  guidance_scale: float = 3,
81
  num_inference_steps: int = 25,
82
  randomize_seed: bool = False,
83
  use_resolution_binning: bool = True,
84
  num_images: int = 1, # Number of images to generate
85
- wallpaper_size: str = "Default (1024x1024)",
86
  progress=gr.Progress(track_tqdm=True),
87
  ):
88
  seed = int(randomize_seed_fn(seed, randomize_seed))
@@ -105,7 +105,7 @@ def generate(
105
  options["use_resolution_binning"] = True
106
 
107
  images = []
108
- for i in range 0, num_images, BATCH_SIZE):
109
  batch_options = options.copy()
110
  batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
111
  if "negative_prompt" in batch_options:
@@ -116,6 +116,7 @@ def generate(
116
  return image_paths, seed
117
 
118
  with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
 
119
  gr.Markdown(DESCRIPTIONx)
120
  with gr.Row():
121
  prompt = gr.Text(
@@ -127,13 +128,6 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
127
  )
128
  run_button = gr.Button("Run ⚡", scale=0)
129
  result = gr.Gallery(label="Result", columns=1, show_label=False)
130
-
131
- with gr.Row(visible=True):
132
- wallpaper_size = gr.Radio(
133
- choices=["Mobile (1080x1920)", "Desktop (1920x1080)", "Extended (1920x512)", "Headers (1080x512)", "Default (1024x1024)"],
134
- label="Pixel Size(x*y)",
135
- value="Default (1024x1024)"
136
- )
137
 
138
  with gr.Accordion("Advanced options", open=False, visible=True):
139
  num_images = gr.Slider(
@@ -161,7 +155,12 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
161
  value=0,
162
  )
163
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
164
-
 
 
 
 
 
165
  with gr.Row():
166
  guidance_scale = gr.Slider(
167
  label="Guidance Scale",
@@ -202,11 +201,11 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
202
  negative_prompt,
203
  use_negative_prompt,
204
  seed,
 
205
  guidance_scale,
206
  num_inference_steps,
207
  randomize_seed,
208
- num_images,
209
- wallpaper_size,
210
  ],
211
  outputs=[result, seed],
212
  api_name="run",
 
29
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "4096"))
30
  USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE", "0") == "1"
31
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
32
+ BATCH_SIZE = int(os.getenv("BATCH_SIZE", "1")) # Allow generating multiple images at once
33
 
34
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
35
  pipe = StableDiffusionXLPipeline.from_pretrained(
 
64
  return 1080, 1920
65
  elif size == "Desktop (1920x1080)":
66
  return 1920, 1080
67
+ elif size == "Extented (1920x512)":
68
  return 1920, 512
69
  elif size == "Headers (1080x512)":
70
  return 1080, 512
 
77
  negative_prompt: str = "",
78
  use_negative_prompt: bool = False,
79
  seed: int = 1,
80
+ wallpaper_size: str = "Default (1024x1024)",
81
  guidance_scale: float = 3,
82
  num_inference_steps: int = 25,
83
  randomize_seed: bool = False,
84
  use_resolution_binning: bool = True,
85
  num_images: int = 1, # Number of images to generate
 
86
  progress=gr.Progress(track_tqdm=True),
87
  ):
88
  seed = int(randomize_seed_fn(seed, randomize_seed))
 
105
  options["use_resolution_binning"] = True
106
 
107
  images = []
108
+ for i in range(0, num_images, BATCH_SIZE):
109
  batch_options = options.copy()
110
  batch_options["prompt"] = options["prompt"][i:i+BATCH_SIZE]
111
  if "negative_prompt" in batch_options:
 
116
  return image_paths, seed
117
 
118
  with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
119
+
120
  gr.Markdown(DESCRIPTIONx)
121
  with gr.Row():
122
  prompt = gr.Text(
 
128
  )
129
  run_button = gr.Button("Run ⚡", scale=0)
130
  result = gr.Gallery(label="Result", columns=1, show_label=False)
 
 
 
 
 
 
 
131
 
132
  with gr.Accordion("Advanced options", open=False, visible=True):
133
  num_images = gr.Slider(
 
155
  value=0,
156
  )
157
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
158
+ with gr.Row(visible=True):
159
+ wallpaper_size = gr.Radio(
160
+ choices=["Mobile (1080x1920)", "Desktop (1920x1080)", "Extented (1920x512)", "Headers (1080x512)", "Default (1024x1024)"],
161
+ label="Pixel Size(x*y)",
162
+ value="Default (1024x1024)"
163
+ )
164
  with gr.Row():
165
  guidance_scale = gr.Slider(
166
  label="Guidance Scale",
 
201
  negative_prompt,
202
  use_negative_prompt,
203
  seed,
204
+ wallpaper_size,
205
  guidance_scale,
206
  num_inference_steps,
207
  randomize_seed,
208
+ num_images
 
209
  ],
210
  outputs=[result, seed],
211
  api_name="run",