ginipick commited on
Commit
a821008
ยท
verified ยท
1 Parent(s): 70eca38

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -50
app.py CHANGED
@@ -38,7 +38,6 @@ english_labels = {
38
 
39
  # Load pipelines
40
  base_model = "black-forest-labs/FLUX.1-schnell"
41
-
42
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16).to("cuda")
43
  pipe = FluxPipeline.from_pretrained(
44
  base_model,
@@ -47,7 +46,6 @@ pipe = FluxPipeline.from_pretrained(
47
  )
48
  pipe.transformer.to(memory_format=torch.channels_last)
49
  clip_slider = CLIPSliderFlux(pipe, device=torch.device("cuda"))
50
-
51
  MAX_SEED = 2**32 - 1
52
 
53
  def save_images_with_unique_filenames(image_list, save_directory):
@@ -95,20 +93,16 @@ def generate(prompt,
95
  prompt = translate_if_korean(prompt)
96
  concept_1 = translate_if_korean(concept_1)
97
  concept_2 = translate_if_korean(concept_2)
98
-
99
  print(f"Prompt: {prompt}, โ† {concept_2}, {concept_1} โžก๏ธ . scale {scale}, interm steps {interm_steps}")
100
  slider_x = [concept_2, concept_1]
101
- # Re-calculate latent direction if needed
102
  if randomize_seed:
103
  seed = random.randint(0, MAX_SEED)
104
-
105
  if not sorted(slider_x) == sorted([x_concept_1, x_concept_2]) or recalc_directions:
106
  gradio_progress(0, desc="Calculating directions...")
107
  avg_diff = clip_slider.find_latent_direction(slider_x[0], slider_x[1], num_iterations=iterations)
108
  x_concept_1, x_concept_2 = slider_x[0], slider_x[1]
109
  else:
110
  avg_diff = avg_diff_x
111
-
112
  images = []
113
  high_scale = scale
114
  low_scale = -1 * scale
@@ -128,7 +122,6 @@ def generate(prompt,
128
  canvas = Image.new('RGB', (256 * interm_steps, 256))
129
  for i, im in enumerate(images):
130
  canvas.paste(im.resize((256, 256)), (256 * i, 0))
131
-
132
  comma_concepts_x = f"{slider_x[1]}, {slider_x[0]}"
133
  scale_total = convert_to_centered_scale(interm_steps)
134
  scale_min = scale_total[0]
@@ -136,7 +129,6 @@ def generate(prompt,
136
  scale_middle = scale_total.index(0)
137
  post_generation_slider_update = gr.update(label=comma_concepts_x, value=0, minimum=scale_min, maximum=scale_max, interactive=True)
138
  avg_diff_x = avg_diff.cpu()
139
-
140
  video_path = f"{uuid.uuid4()}.mp4"
141
  print(video_path)
142
  return x_concept_1, x_concept_2, avg_diff_x, export_to_video(images, video_path, fps=5), canvas, images, images[scale_middle], post_generation_slider_update, seed
@@ -152,7 +144,7 @@ def update_pre_generated_images(slider_value, total_images):
152
  def reset_recalc_directions():
153
  return True
154
 
155
- # Five examples fitting the "Time Stream" theme (one Korean example included)
156
  examples = [
157
  ["์‹ ์„ ํ•œ ํ† ๋งˆํ† ๊ฐ€ ๋ถ€ํŒจํ•œ ํ† ๋งˆํ† ๋กœ ๋ณ€ํ•ด๊ฐ€๋Š” ๊ณผ์ •", "Fresh", "Rotten", 2.0],
158
  ["A blooming flower gradually withers into decay", "Bloom", "Wither", 1.5],
@@ -170,17 +162,14 @@ body {
170
  font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
171
  color: #333;
172
  }
173
-
174
  footer {
175
  visibility: hidden;
176
  }
177
-
178
  .container {
179
  max-width: 1200px;
180
  margin: 20px auto;
181
  padding: 0 10px;
182
  }
183
-
184
  .main-panel {
185
  background-color: rgba(255, 255, 255, 0.9);
186
  border-radius: 12px;
@@ -188,25 +177,21 @@ footer {
188
  margin-bottom: 20px;
189
  box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
190
  }
191
-
192
  .controls-panel {
193
  background-color: rgba(255, 255, 255, 0.85);
194
  border-radius: 8px;
195
  padding: 16px;
196
  box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.05);
197
  }
198
-
199
  .image-display {
200
  min-height: 400px;
201
  display: flex;
202
  flex-direction: column;
203
  justify-content: center;
204
  }
205
-
206
  .slider-container {
207
  padding: 10px 0;
208
  }
209
-
210
  .advanced-panel {
211
  margin-top: 20px;
212
  border-top: 1px solid #eaeaea;
@@ -215,8 +200,7 @@ footer {
215
  """
216
 
217
  with gr.Blocks(css=css, title="ํƒ€์ž„ ์ŠคํŠธ๋ฆผ") as demo:
218
- # Title and Description
219
- gr.Markdown("# ํƒ€์ž„ ์ŠคํŠธ๋ฆผ\nA creative journey through the transformation of images over time.")
220
 
221
  x_concept_1 = gr.State("")
222
  x_concept_2 = gr.State("")
@@ -316,31 +300,23 @@ with gr.Blocks(css=css, title="ํƒ€์ž„ ์ŠคํŠธ๋ฆผ") as demo:
316
  with gr.Column(scale=8):
317
  with gr.Group(elem_classes="main-panel"):
318
  gr.Markdown("### Generated Results")
319
- # Video output on top (bigger) and image output below
320
- output_video = gr.Video(
321
- label=english_labels["Looping video"],
322
- elem_id="video",
323
- loop=True,
324
- autoplay=True,
325
- height=400
326
- )
327
  with gr.Row():
328
- with gr.Column(scale=1):
329
- post_generation_image = gr.Image(
330
- label=english_labels["Generated Images"],
331
- type="filepath",
332
- elem_id="interactive",
333
- elem_classes="image-display",
334
- height=200
335
- )
336
- with gr.Column(scale=1):
337
- post_generation_slider = gr.Slider(
338
- minimum=-10,
339
- maximum=10,
340
- value=0,
341
- step=1,
342
- label=english_labels["From 1st to 2nd direction"]
343
- )
344
 
345
  # Examples Section
346
  gr.Examples(
@@ -349,12 +325,12 @@ with gr.Blocks(css=css, title="ํƒ€์ž„ ์ŠคํŠธ๋ฆผ") as demo:
349
  fn=generate,
350
  outputs=[
351
  x_concept_1, x_concept_2, avg_diff_x,
352
- output_video, # video output (larger)
353
- canvas, # image strip (below video)
354
- total_images,
355
- post_generation_image,
356
- post_generation_slider,
357
- seed
358
  ],
359
  cache_examples="lazy"
360
  )
@@ -369,8 +345,8 @@ with gr.Blocks(css=css, title="ํƒ€์ž„ ์ŠคํŠธ๋ฆผ") as demo:
369
  ],
370
  outputs=[
371
  x_concept_1, x_concept_2, avg_diff_x,
372
- output_video, # video output (larger)
373
- canvas, # image strip (below video)
374
  total_images,
375
  post_generation_image,
376
  post_generation_slider,
 
38
 
39
  # Load pipelines
40
  base_model = "black-forest-labs/FLUX.1-schnell"
 
41
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=torch.bfloat16).to("cuda")
42
  pipe = FluxPipeline.from_pretrained(
43
  base_model,
 
46
  )
47
  pipe.transformer.to(memory_format=torch.channels_last)
48
  clip_slider = CLIPSliderFlux(pipe, device=torch.device("cuda"))
 
49
  MAX_SEED = 2**32 - 1
50
 
51
  def save_images_with_unique_filenames(image_list, save_directory):
 
93
  prompt = translate_if_korean(prompt)
94
  concept_1 = translate_if_korean(concept_1)
95
  concept_2 = translate_if_korean(concept_2)
 
96
  print(f"Prompt: {prompt}, โ† {concept_2}, {concept_1} โžก๏ธ . scale {scale}, interm steps {interm_steps}")
97
  slider_x = [concept_2, concept_1]
 
98
  if randomize_seed:
99
  seed = random.randint(0, MAX_SEED)
 
100
  if not sorted(slider_x) == sorted([x_concept_1, x_concept_2]) or recalc_directions:
101
  gradio_progress(0, desc="Calculating directions...")
102
  avg_diff = clip_slider.find_latent_direction(slider_x[0], slider_x[1], num_iterations=iterations)
103
  x_concept_1, x_concept_2 = slider_x[0], slider_x[1]
104
  else:
105
  avg_diff = avg_diff_x
 
106
  images = []
107
  high_scale = scale
108
  low_scale = -1 * scale
 
122
  canvas = Image.new('RGB', (256 * interm_steps, 256))
123
  for i, im in enumerate(images):
124
  canvas.paste(im.resize((256, 256)), (256 * i, 0))
 
125
  comma_concepts_x = f"{slider_x[1]}, {slider_x[0]}"
126
  scale_total = convert_to_centered_scale(interm_steps)
127
  scale_min = scale_total[0]
 
129
  scale_middle = scale_total.index(0)
130
  post_generation_slider_update = gr.update(label=comma_concepts_x, value=0, minimum=scale_min, maximum=scale_max, interactive=True)
131
  avg_diff_x = avg_diff.cpu()
 
132
  video_path = f"{uuid.uuid4()}.mp4"
133
  print(video_path)
134
  return x_concept_1, x_concept_2, avg_diff_x, export_to_video(images, video_path, fps=5), canvas, images, images[scale_middle], post_generation_slider_update, seed
 
144
  def reset_recalc_directions():
145
  return True
146
 
147
+ # Five "Time Stream" themed examples (one Korean example included)
148
  examples = [
149
  ["์‹ ์„ ํ•œ ํ† ๋งˆํ† ๊ฐ€ ๋ถ€ํŒจํ•œ ํ† ๋งˆํ† ๋กœ ๋ณ€ํ•ด๊ฐ€๋Š” ๊ณผ์ •", "Fresh", "Rotten", 2.0],
150
  ["A blooming flower gradually withers into decay", "Bloom", "Wither", 1.5],
 
162
  font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
163
  color: #333;
164
  }
 
165
  footer {
166
  visibility: hidden;
167
  }
 
168
  .container {
169
  max-width: 1200px;
170
  margin: 20px auto;
171
  padding: 0 10px;
172
  }
 
173
  .main-panel {
174
  background-color: rgba(255, 255, 255, 0.9);
175
  border-radius: 12px;
 
177
  margin-bottom: 20px;
178
  box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
179
  }
 
180
  .controls-panel {
181
  background-color: rgba(255, 255, 255, 0.85);
182
  border-radius: 8px;
183
  padding: 16px;
184
  box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.05);
185
  }
 
186
  .image-display {
187
  min-height: 400px;
188
  display: flex;
189
  flex-direction: column;
190
  justify-content: center;
191
  }
 
192
  .slider-container {
193
  padding: 10px 0;
194
  }
 
195
  .advanced-panel {
196
  margin-top: 20px;
197
  border-top: 1px solid #eaeaea;
 
200
  """
201
 
202
  with gr.Blocks(css=css, title="ํƒ€์ž„ ์ŠคํŠธ๋ฆผ") as demo:
203
+ gr.Markdown("# ํƒ€์ž„ ์ŠคํŠธ๋ฆผ\nA creative journey through the transformation of images over time.\nWhen you input text, a video transitioning from the past to the future is generated.")
 
204
 
205
  x_concept_1 = gr.State("")
206
  x_concept_2 = gr.State("")
 
300
  with gr.Column(scale=8):
301
  with gr.Group(elem_classes="main-panel"):
302
  gr.Markdown("### Generated Results")
303
+ # Swapped order: Image strip on top, video below (video is larger)
304
+ image_strip = gr.Image(label="Image Strip", type="filepath", elem_id="strip", height=200)
305
+ output_video = gr.Video(label=english_labels["Looping video"], elem_id="video", loop=True, autoplay=True, height=600)
 
 
 
 
 
306
  with gr.Row():
307
+ post_generation_image = gr.Image(
308
+ label=english_labels["Generated Images"],
309
+ type="filepath",
310
+ elem_id="interactive",
311
+ elem_classes="image-display"
312
+ )
313
+ post_generation_slider = gr.Slider(
314
+ minimum=-10,
315
+ maximum=10,
316
+ value=0,
317
+ step=1,
318
+ label=english_labels["From 1st to 2nd direction"]
319
+ )
 
 
 
320
 
321
  # Examples Section
322
  gr.Examples(
 
325
  fn=generate,
326
  outputs=[
327
  x_concept_1, x_concept_2, avg_diff_x,
328
+ output_video, # 4th output from generate goes to output_video
329
+ image_strip, # 5th output (canvas) goes to image_strip
330
+ total_images, # 6th output (list of images)
331
+ post_generation_image, # 7th output (interactive image)
332
+ post_generation_slider, # 8th output (slider update)
333
+ seed # 9th output (seed)
334
  ],
335
  cache_examples="lazy"
336
  )
 
345
  ],
346
  outputs=[
347
  x_concept_1, x_concept_2, avg_diff_x,
348
+ output_video, # video
349
+ image_strip, # canvas
350
  total_images,
351
  post_generation_image,
352
  post_generation_slider,