ginipick commited on
Commit
e6d3f1b
ยท
verified ยท
1 Parent(s): c418ad5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -22
app.py CHANGED
@@ -11,6 +11,30 @@ from PIL import Image
11
  from diffusers.utils import load_image
12
  from diffusers.utils import export_to_video
13
  import random
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  # load pipelines
16
  base_model = "black-forest-labs/FLUX.1-schnell"
@@ -50,6 +74,11 @@ def convert_to_centered_scale(num):
50
  end = num // 2
51
  return tuple(range(start, end + 1))
52
 
 
 
 
 
 
53
  @spaces.GPU(duration=85)
54
  def generate(prompt,
55
  concept_1,
@@ -67,6 +96,11 @@ def generate(prompt,
67
  total_images=[],
68
  progress=gr.Progress()
69
  ):
 
 
 
 
 
70
  print(f"Prompt: {prompt}, โ† {concept_2}, {concept_1} โžก๏ธ . scale {scale}, interm steps {interm_steps}")
71
  slider_x = [concept_2, concept_1]
72
  # check if avg diff for directions need to be re-calculated
@@ -117,8 +151,6 @@ def update_pre_generated_images(slider_value, total_images):
117
  def reset_recalc_directions():
118
  return True
119
 
120
-
121
-
122
  examples = [["a dog in the park", "winter", "summer", 1.5], ["a house", "USA suburb", "Europe", 2.5], ["a tomato", "rotten", "super fresh", 2.5]]
123
 
124
  css = """
@@ -127,11 +159,7 @@ footer {
127
  }
128
  """
129
 
130
-
131
  with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
132
-
133
-
134
-
135
  x_concept_1 = gr.State("")
136
  x_concept_2 = gr.State("")
137
  total_images = gr.Gallery(visible=False)
@@ -143,37 +171,37 @@ with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
143
  with gr.Row():
144
  with gr.Column():
145
  with gr.Group():
146
- prompt = gr.Textbox(label="Prompt", info="Describe what to be steered by the directions", placeholder="A dog in the park")
147
  with gr.Row():
148
- concept_1 = gr.Textbox(label="1st direction to steer", info="Starting state", placeholder="winter")
149
- concept_2 = gr.Textbox(label="2nd direction to steer", info="Finishing state", placeholder="summer")
150
- x = gr.Slider(minimum=0, value=1.75, step=0.1, maximum=4.0, label="Strength", info="maximum strength on each direction (unstable beyond 2.5)")
151
- submit = gr.Button("Generate directions")
152
  with gr.Column():
153
  with gr.Group(elem_id="group"):
154
- post_generation_image = gr.Image(label="Generated Images", type="filepath", elem_id="interactive")
155
- post_generation_slider = gr.Slider(minimum=-10, maximum=10, value=0, step=1, label="From 1st to 2nd direction")
156
  with gr.Row():
157
  with gr.Column(scale=4):
158
- image_seq = gr.Image(label="Strip", elem_id="strip", height=80)
159
  with gr.Column(scale=2, min_width=100):
160
- output_image = gr.Video(label="Looping video", elem_id="video", loop=True, autoplay=True)
161
- with gr.Accordion(label="Advanced options", open=False):
162
- interm_steps = gr.Slider(label = "Num of intermediate images", minimum=3, value=7, maximum=65, step=2)
163
  with gr.Row():
164
- iterations = gr.Slider(label = "Num iterations for clip directions", minimum=0, value=200, maximum=400, step=1)
165
- steps = gr.Slider(label = "Num inference steps", minimum=1, value=3, maximum=4, step=1)
166
  with gr.Row():
167
  guidance_scale = gr.Slider(
168
- label="Guidance scale",
169
  minimum=0.1,
170
  maximum=10.0,
171
  step=0.1,
172
  value=3.5,
173
  )
174
  with gr.Column():
175
- randomize_seed = gr.Checkbox(True, label="Randomize seed")
176
- seed = gr.Slider(minimum=0, maximum=MAX_SEED, step=1, label="Seed", interactive=True, randomize=True)
177
 
178
  examples_gradio = gr.Examples(
179
  examples=examples,
 
11
  from diffusers.utils import load_image
12
  from diffusers.utils import export_to_video
13
  import random
14
+ from transformers import pipeline
15
+
16
+ # ๋ฒˆ์—ญ ๋ชจ๋ธ ๋กœ๋“œ
17
+ translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
18
+
19
+ # ํ•œ๊ธ€ ๋ฉ”๋‰ด ์ด๋ฆ„ dictionary
20
+ korean_labels = {
21
+ "Prompt": "ํ”„๋กฌํ”„ํŠธ",
22
+ "1st direction to steer": "์ฒซ ๋ฒˆ์งธ ๋ฐฉํ–ฅ",
23
+ "2nd direction to steer": "๋‘ ๋ฒˆ์งธ ๋ฐฉํ–ฅ",
24
+ "Strength": "๊ฐ•๋„",
25
+ "Generate directions": "๋ฐฉํ–ฅ ์ƒ์„ฑ",
26
+ "Generated Images": "์ƒ์„ฑ๋œ ์ด๋ฏธ์ง€",
27
+ "From 1st to 2nd direction": "์ฒซ ๋ฒˆ์งธ์—์„œ ๋‘ ๋ฒˆ์งธ ๋ฐฉํ–ฅ์œผ๋กœ",
28
+ "Strip": "์ด๋ฏธ์ง€ ์ŠคํŠธ๋ฆฝ",
29
+ "Looping video": "๋ฃจํ”„ ๋น„๋””์˜ค",
30
+ "Advanced options": "๊ณ ๊ธ‰ ์˜ต์…˜",
31
+ "Num of intermediate images": "์ค‘๊ฐ„ ์ด๋ฏธ์ง€ ์ˆ˜",
32
+ "Num iterations for clip directions": "ํด๋ฆฝ ๋ฐฉํ–ฅ ๋ฐ˜๋ณต ํšŸ์ˆ˜",
33
+ "Num inference steps": "์ถ”๋ก  ๋‹จ๊ณ„ ์ˆ˜",
34
+ "Guidance scale": "๊ฐ€์ด๋˜์Šค ์Šค์ผ€์ผ",
35
+ "Randomize seed": "์‹œ๋“œ ๋ฌด์ž‘์œ„ํ™”",
36
+ "Seed": "์‹œ๋“œ"
37
+ }
38
 
39
  # load pipelines
40
  base_model = "black-forest-labs/FLUX.1-schnell"
 
74
  end = num // 2
75
  return tuple(range(start, end + 1))
76
 
77
+ def translate_if_korean(text):
78
+ if any('\u3131' <= char <= '\u3163' or '\uac00' <= char <= '\ud7a3' for char in text):
79
+ return translator(text)[0]['translation_text']
80
+ return text
81
+
82
  @spaces.GPU(duration=85)
83
  def generate(prompt,
84
  concept_1,
 
96
  total_images=[],
97
  progress=gr.Progress()
98
  ):
99
+ # ํ”„๋กฌํ”„ํŠธ์™€ ์ปจ์…‰ ๋ฒˆ์—ญ
100
+ prompt = translate_if_korean(prompt)
101
+ concept_1 = translate_if_korean(concept_1)
102
+ concept_2 = translate_if_korean(concept_2)
103
+
104
  print(f"Prompt: {prompt}, โ† {concept_2}, {concept_1} โžก๏ธ . scale {scale}, interm steps {interm_steps}")
105
  slider_x = [concept_2, concept_1]
106
  # check if avg diff for directions need to be re-calculated
 
151
  def reset_recalc_directions():
152
  return True
153
 
 
 
154
  examples = [["a dog in the park", "winter", "summer", 1.5], ["a house", "USA suburb", "Europe", 2.5], ["a tomato", "rotten", "super fresh", 2.5]]
155
 
156
  css = """
 
159
  }
160
  """
161
 
 
162
  with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as demo:
 
 
 
163
  x_concept_1 = gr.State("")
164
  x_concept_2 = gr.State("")
165
  total_images = gr.Gallery(visible=False)
 
171
  with gr.Row():
172
  with gr.Column():
173
  with gr.Group():
174
+ prompt = gr.Textbox(label=korean_labels["Prompt"], info="์„ค๋ช…ํ•  ๋‚ด์šฉ์„ ์ž…๋ ฅํ•˜์„ธ์š”", placeholder="๊ณต์›์— ์žˆ๋Š” ๊ฐ•์•„์ง€")
175
  with gr.Row():
176
+ concept_1 = gr.Textbox(label=korean_labels["1st direction to steer"], info="์‹œ์ž‘ ์ƒํƒœ", placeholder="๊ฒจ์šธ")
177
+ concept_2 = gr.Textbox(label=korean_labels["2nd direction to steer"], info="์ข…๋ฃŒ ์ƒํƒœ", placeholder="์—ฌ๋ฆ„")
178
+ x = gr.Slider(minimum=0, value=1.75, step=0.1, maximum=4.0, label=korean_labels["Strength"], info="๊ฐ ๋ฐฉํ–ฅ์˜ ์ตœ๋Œ€ ๊ฐ•๋„ (2.5 ์ด์ƒ์€ ๋ถˆ์•ˆ์ •)")
179
+ submit = gr.Button(korean_labels["Generate directions"])
180
  with gr.Column():
181
  with gr.Group(elem_id="group"):
182
+ post_generation_image = gr.Image(label=korean_labels["Generated Images"], type="filepath", elem_id="interactive")
183
+ post_generation_slider = gr.Slider(minimum=-10, maximum=10, value=0, step=1, label=korean_labels["From 1st to 2nd direction"])
184
  with gr.Row():
185
  with gr.Column(scale=4):
186
+ image_seq = gr.Image(label=korean_labels["Strip"], elem_id="strip", height=80)
187
  with gr.Column(scale=2, min_width=100):
188
+ output_image = gr.Video(label=korean_labels["Looping video"], elem_id="video", loop=True, autoplay=True)
189
+ with gr.Accordion(label=korean_labels["Advanced options"], open=False):
190
+ interm_steps = gr.Slider(label=korean_labels["Num of intermediate images"], minimum=3, value=7, maximum=65, step=2)
191
  with gr.Row():
192
+ iterations = gr.Slider(label=korean_labels["Num iterations for clip directions"], minimum=0, value=200, maximum=400, step=1)
193
+ steps = gr.Slider(label=korean_labels["Num inference steps"], minimum=1, value=3, maximum=4, step=1)
194
  with gr.Row():
195
  guidance_scale = gr.Slider(
196
+ label=korean_labels["Guidance scale"],
197
  minimum=0.1,
198
  maximum=10.0,
199
  step=0.1,
200
  value=3.5,
201
  )
202
  with gr.Column():
203
+ randomize_seed = gr.Checkbox(True, label=korean_labels["Randomize seed"])
204
+ seed = gr.Slider(minimum=0, maximum=MAX_SEED, step=1, label=korean_labels["Seed"], interactive=True, randomize=True)
205
 
206
  examples_gradio = gr.Examples(
207
  examples=examples,