Gianpaolo Macario commited on
Commit
307a3fb
·
1 Parent(s): ed06954

feat: refactor generate_image under module flux

Browse files
Files changed (2) hide show
  1. app.py +10 -17
  2. flux.py +96 -0
app.py CHANGED
@@ -1,6 +1,8 @@
1
  import gradio as gr
2
  import requests
3
 
 
 
4
  def greet(name: str) -> str:
5
  """
6
  Greets the user with a personalized hello message.
@@ -69,21 +71,6 @@ def calculate(n1, op, n2):
69
  if op == "/" and n2 != 0: return str(n1 / n2)
70
  return "Error"
71
 
72
- def generate_image(prompt: str):
73
- """
74
- Generates an image based on a text prompt using the Stable Diffusion model.
75
-
76
- Args:
77
- prompt (str): The text prompt to generate the image from.
78
-
79
- Returns:
80
- str: The URL of the generated image.
81
- """
82
-
83
- # Call the Stable Diffusion API or model here
84
- # For demonstration, we'll return a placeholder image URL
85
- return "https://avatars.githubusercontent.com/u/75182?v=4"
86
-
87
 
88
  with gr.Blocks() as demo:
89
  gr.Markdown(
@@ -133,11 +120,17 @@ with gr.Blocks() as demo:
133
  with tab_image:
134
  img_prompt = gr.Textbox(
135
  label="Image Prompt",
136
- placeholder="Enter a prompt for the image generation"
 
137
  )
138
  img_btn = gr.Button("Generate Image")
139
  img_output = gr.Image(label="Sample Image")
140
- img_btn.click(fn=generate_image, inputs=img_prompt, outputs=img_output, api_name="generate_image")
 
 
 
 
 
141
 
142
 
143
  demo.launch(mcp_server=True, share=True)
 
1
  import gradio as gr
2
  import requests
3
 
4
+ import flux
5
+
6
  def greet(name: str) -> str:
7
  """
8
  Greets the user with a personalized hello message.
 
71
  if op == "/" and n2 != 0: return str(n1 / n2)
72
  return "Error"
73
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
 
75
  with gr.Blocks() as demo:
76
  gr.Markdown(
 
120
  with tab_image:
121
  img_prompt = gr.Textbox(
122
  label="Image Prompt",
123
+ placeholder="Enter a prompt for the image generation",
124
+ value="A portrait of a handsome software developer"
125
  )
126
  img_btn = gr.Button("Generate Image")
127
  img_output = gr.Image(label="Sample Image")
128
+ img_btn.click(
129
+ fn=flux.generate_image,
130
+ # fn=flux.generate_image2, # DEBUG
131
+ inputs=img_prompt,
132
+ outputs=img_output,
133
+ api_name="generate_image")
134
 
135
 
136
  demo.launch(mcp_server=True, share=True)
flux.py CHANGED
@@ -8,6 +8,7 @@ import time
8
  from io import BytesIO
9
  from pathlib import Path
10
  import modal
 
11
 
12
  # We’ll make use of the full CUDA toolkit in this example, so we’ll build our container image
13
  # off of the nvidia/cuda base.
@@ -217,6 +218,101 @@ def optimize(pipe, compile=True):
217
 
218
  return pipe
219
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
  # To run this script, use the command:
221
  # modal run flux.py --prompt "a beautiful landscape with mountains and a river" --twice --compile
222
 
 
8
  from io import BytesIO
9
  from pathlib import Path
10
  import modal
11
+ import modal.running_app
12
 
13
  # We’ll make use of the full CUDA toolkit in this example, so we’ll build our container image
14
  # off of the nvidia/cuda base.
 
218
 
219
  return pipe
220
 
221
+
222
+ def generate_image(prompt: str):
223
+ """
224
+ Generates an image based on a text prompt using the Flux model.
225
+
226
+ Args:
227
+ prompt (str): The text prompt to generate the image from.
228
+
229
+ Returns:
230
+ str: The URL of the generated image.
231
+ """
232
+
233
+ # For demonstration, we'll return a placeholder image URL
234
+
235
+ return "https://avatars.githubusercontent.com/u/75182?v=4"
236
+
237
+
238
+ app_running = False
239
+
240
+ def make_sure_app_running():
241
+ """
242
+ Ensure the Modal app is running.
243
+
244
+ NOTE: WORK-IN-PROGRESS
245
+ """
246
+ global app_running
247
+ # In practice, you would implement logic to check if the Modal app is running.
248
+ # For example, you could use `modal app list` to check if the app is running.
249
+ if not app_running:
250
+ print("⚠️ The Modal app is not running. Should start the app first.")
251
+ print("Starting Modal app...")
252
+ # Start the Modal app
253
+ # This will run the app in the background and allow it to handle requests.
254
+ # Note: This is a blocking call, so it will not return until the app is stopped.
255
+ # If you want to run it in the background, you can use `modal run --detach`.
256
+ # For example:
257
+ # modal run flux.py --prompt "a beautiful landscape with mountains and a river" --twice --compile
258
+ app.run(detach=False, interactive=True)
259
+ print("✅ Modal app started successfully.")
260
+ app_running = True
261
+ print("You can now call the generate_image function to generate images.")
262
+ print("Example usage:")
263
+ print("generate_image(prompt='a beautiful landscape with mountains and a river', twice=True, compile=False)")
264
+ else:
265
+ print("✅ The Modal app is already running. You can call the generate_image function to generate images.")
266
+ pass
267
+
268
+
269
+ def generate_image2(
270
+ prompt: str = """
271
+ A portrait of a handsome software developer
272
+ """,
273
+ twice: bool = True,
274
+ compile: bool = False,
275
+ ):
276
+ """
277
+ Generates an image based on a text prompt using the Flux model
278
+ running on Modal serverless infrastructure.
279
+
280
+ Args:
281
+ prompt (str): The text prompt to generate the image from.
282
+ twice (bool): Whether to run the inference twice.
283
+ compile (bool): Whether to compile the model.
284
+
285
+ Returns:
286
+ str: The URL of the generated image.
287
+ """
288
+
289
+ print("DEBUG: generate_image2 called with parameters:")
290
+ print(f" prompt: {prompt}")
291
+ print(f" twice: {twice}")
292
+ print(f" compile: {compile}")
293
+ print("DEBUG: Starting image generation...")
294
+
295
+ # Ensure the Modal app is running before making any calls
296
+ make_sure_app_running()
297
+
298
+ t0 = time.time()
299
+ image_bytes = Model(compile=compile).inference.remote(prompt)
300
+ print(f"🎨 first inference latency: {time.time() - t0:.2f} seconds")
301
+
302
+ if twice:
303
+ t0 = time.time()
304
+ image_bytes = Model(compile=compile).inference.remote(prompt)
305
+ print(f"🎨 second inference latency: {time.time() - t0:.2f} seconds")
306
+
307
+ output_path = Path("/tmp") / "flux" / "output.jpg"
308
+ output_path.parent.mkdir(exist_ok=True, parents=True)
309
+ print(f"🎨 saving output to {output_path}")
310
+ output_path.write_bytes(image_bytes)
311
+
312
+ print(f"✅ Image generated and saved to {output_path}")
313
+ return output_path
314
+
315
+
316
  # To run this script, use the command:
317
  # modal run flux.py --prompt "a beautiful landscape with mountains and a river" --twice --compile
318