prithivMLmods commited on
Commit
6c00396
·
verified ·
1 Parent(s): d78dd57

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -58
app.py CHANGED
@@ -15,10 +15,6 @@ torch_dtype = torch.bfloat16 if torch.cuda.is_available() else torch.float32
15
  pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
16
  pipe = pipe.to(device)
17
 
18
- #pipe.load_lora_weights("prithivMLmods/SD3.5-Turbo-Realism-2.0-LoRA", weight_name="SD3.5-Turbo-Realism-2.0-LoRA.safetensors")
19
- #trigger_word = "Turbo Realism"
20
- #pipe.fuse_lora(lora_scale=1.0)
21
-
22
  # Constants
23
  MAX_SEED = np.iinfo(np.int32).max
24
  MAX_IMAGE_SIZE = 1024
@@ -58,6 +54,7 @@ grid_sizes = {
58
  "3x2": (3, 2),
59
  "1x1": (1, 1),
60
  }
 
61
  @spaces.GPU
62
  def infer(
63
  prompt,
@@ -72,21 +69,17 @@ def infer(
72
  grid_size="1x1",
73
  progress=gr.Progress(track_tqdm=True),
74
  ):
75
- # Apply seed
76
  if randomize_seed:
77
  seed = random.randint(0, MAX_SEED)
78
  generator = torch.Generator().manual_seed(seed)
79
 
80
- # Style formatting
81
  selected_style = next(s for s in style_list if s["name"] == style)
82
  styled_prompt = selected_style["prompt"].format(prompt=prompt)
83
  styled_negative = selected_style["negative_prompt"] or negative_prompt
84
 
85
- # Grid calculation
86
  grid_x, grid_y = grid_sizes.get(grid_size, (1, 1))
87
  num_images = grid_x * grid_y
88
 
89
- # Inference
90
  output = pipe(
91
  prompt=styled_prompt,
92
  negative_prompt=styled_negative,
@@ -98,7 +91,6 @@ def infer(
98
  num_images_per_prompt=num_images,
99
  )
100
 
101
- # Combine into grid
102
  grid_img = Image.new('RGB', (width * grid_x, height * grid_y))
103
  for i, img in enumerate(output.images[:num_images]):
104
  x = (i % grid_x) * width
@@ -116,71 +108,69 @@ examples = [
116
 
117
  css = '''
118
  .gradio-container {
119
- max-width: 599px !important;
120
- margin: 0 auto !important;
121
- display: flex;
122
- flex-direction: column;
123
- align-items: center;
124
- justify-content: center;
125
  }
126
  h1 { text-align: center; }
127
  footer { visibility: hidden; }
128
  '''
129
 
130
  with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
131
- with gr.Column(elem_id="col-container"):
132
- gr.Markdown("## Text to Image SD3.5")
133
-
134
- with gr.Row():
135
- prompt = gr.Text(
136
- show_label=False,
137
- max_lines=1,
138
- placeholder="Enter your prompt",
139
- container=False,
140
- )
141
- run_button = gr.Button("Run", scale=0, variant="primary")
142
 
143
- result = gr.Image(show_label=False)
 
 
 
 
 
 
 
 
 
 
 
144
 
145
- with gr.Row():
146
  grid_size_selection = gr.Dropdown(
147
  choices=list(grid_sizes.keys()),
148
  value="1x1",
149
  label="Grid Size"
150
  )
151
 
152
- with gr.Accordion("Advanced Settings", open=False):
153
- negative_prompt = gr.Text(
154
- label="Negative prompt",
155
- max_lines=1,
156
- placeholder="Enter a negative prompt",
157
- value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
158
- )
159
- seed = gr.Slider(0, MAX_SEED, value=0, label="Seed")
160
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
161
-
162
- with gr.Row():
163
- width = gr.Slider(512, MAX_IMAGE_SIZE, step=32, value=1024, label="Width")
164
- height = gr.Slider(512, MAX_IMAGE_SIZE, step=32, value=1024, label="Height")
165
-
166
- with gr.Row():
167
- guidance_scale = gr.Slider(0.0, 7.5, step=0.1, value=0.0, label="Guidance scale")
168
- num_inference_steps = gr.Slider(1, 50, step=1, value=10, label="Number of inference steps")
169
-
170
- style_selection = gr.Radio(
171
- choices=STYLE_NAMES,
172
- value=DEFAULT_STYLE_NAME,
173
- label="Quality Style",
 
 
 
 
 
 
 
 
 
 
174
  )
175
 
176
- gr.Examples(
177
- examples=examples,
178
- inputs=[prompt],
179
- outputs=[result, seed],
180
- fn=infer,
181
- cache_examples=False
182
- )
183
-
184
  gr.on(
185
  triggers=[run_button.click, prompt.submit],
186
  fn=infer,
 
15
  pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
16
  pipe = pipe.to(device)
17
 
 
 
 
 
18
  # Constants
19
  MAX_SEED = np.iinfo(np.int32).max
20
  MAX_IMAGE_SIZE = 1024
 
54
  "3x2": (3, 2),
55
  "1x1": (1, 1),
56
  }
57
+
58
  @spaces.GPU
59
  def infer(
60
  prompt,
 
69
  grid_size="1x1",
70
  progress=gr.Progress(track_tqdm=True),
71
  ):
 
72
  if randomize_seed:
73
  seed = random.randint(0, MAX_SEED)
74
  generator = torch.Generator().manual_seed(seed)
75
 
 
76
  selected_style = next(s for s in style_list if s["name"] == style)
77
  styled_prompt = selected_style["prompt"].format(prompt=prompt)
78
  styled_negative = selected_style["negative_prompt"] or negative_prompt
79
 
 
80
  grid_x, grid_y = grid_sizes.get(grid_size, (1, 1))
81
  num_images = grid_x * grid_y
82
 
 
83
  output = pipe(
84
  prompt=styled_prompt,
85
  negative_prompt=styled_negative,
 
91
  num_images_per_prompt=num_images,
92
  )
93
 
 
94
  grid_img = Image.new('RGB', (width * grid_x, height * grid_y))
95
  for i, img in enumerate(output.images[:num_images]):
96
  x = (i % grid_x) * width
 
108
 
109
  css = '''
110
  .gradio-container {
111
+ max-width: 100%;
112
+ margin: 0 auto;
 
 
 
 
113
  }
114
  h1 { text-align: center; }
115
  footer { visibility: hidden; }
116
  '''
117
 
118
  with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
119
+ gr.Markdown("## Text to Image SD3.5")
 
 
 
 
 
 
 
 
 
 
120
 
121
+ with gr.Row():
122
+ with gr.Column(scale=1):
123
+ with gr.Row():
124
+ prompt = gr.Text(
125
+ show_label=False,
126
+ max_lines=1,
127
+ placeholder="Enter your prompt",
128
+ container=False,
129
+ )
130
+ run_button = gr.Button("Run", scale=0, variant="primary")
131
+
132
+ result = gr.Image(show_label=False)
133
 
 
134
  grid_size_selection = gr.Dropdown(
135
  choices=list(grid_sizes.keys()),
136
  value="1x1",
137
  label="Grid Size"
138
  )
139
 
140
+ with gr.Accordion("Advanced Settings", open=False):
141
+ negative_prompt = gr.Text(
142
+ label="Negative prompt",
143
+ max_lines=1,
144
+ placeholder="Enter a negative prompt",
145
+ value="(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation",
146
+ )
147
+ seed = gr.Slider(0, MAX_SEED, value=0, label="Seed")
148
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
149
+
150
+ with gr.Row():
151
+ width = gr.Slider(512, MAX_IMAGE_SIZE, step=32, value=1024, label="Width")
152
+ height = gr.Slider(512, MAX_IMAGE_SIZE, step=32, value=1024, label="Height")
153
+
154
+ with gr.Row():
155
+ guidance_scale = gr.Slider(0.0, 7.5, step=0.1, value=0.0, label="Guidance scale")
156
+ num_inference_steps = gr.Slider(1, 50, step=1, value=10, label="Number of inference steps")
157
+
158
+ style_selection = gr.Radio(
159
+ choices=STYLE_NAMES,
160
+ value=DEFAULT_STYLE_NAME,
161
+ label="Quality Style",
162
+ )
163
+
164
+ with gr.Column(scale=1):
165
+ gr.Examples(
166
+ examples=examples,
167
+ inputs=[prompt],
168
+ outputs=[result, seed],
169
+ fn=infer,
170
+ cache_examples=False,
171
+ label="Prompt Examples"
172
  )
173
 
 
 
 
 
 
 
 
 
174
  gr.on(
175
  triggers=[run_button.click, prompt.submit],
176
  fn=infer,