Gianpaolo Macario commited on
Commit
0e3ba22
·
1 Parent(s): b4e3e17

feat(app): flux.generate_image2 calls Modal infra

Browse files
Files changed (2) hide show
  1. app.py +3 -2
  2. flux.py +64 -62
app.py CHANGED
@@ -80,6 +80,8 @@ with gr.Blocks() as demo:
80
 
81
  This app provides a simple calculator, a dad joke generator, a greeting function, and an image generation feature.
82
 
 
 
83
  Use via API or MCP 🚀 · [Powered by Modal](https://modal.com/) · [Built with Gradio](https://www.gradio.app/) 🟧
84
  """
85
  )
@@ -126,8 +128,7 @@ with gr.Blocks() as demo:
126
  img_btn = gr.Button("Generate Image")
127
  img_output = gr.Image(label="Sample Image")
128
  img_btn.click(
129
- fn=flux.generate_image,
130
- # fn=flux.generate_image2, # DEBUG
131
  inputs=img_prompt,
132
  outputs=img_output,
133
  api_name="generate_image")
 
80
 
81
  This app provides a simple calculator, a dad joke generator, a greeting function, and an image generation feature.
82
 
83
+ Read the [project documentation](https://huggingface.co/spaces/Agents-MCP-Hackathon/simple-calculator/blob/main/README.md) to understand its background and motivations.
84
+
85
  Use via API or MCP 🚀 · [Powered by Modal](https://modal.com/) · [Built with Gradio](https://www.gradio.app/) 🟧
86
  """
87
  )
 
128
  img_btn = gr.Button("Generate Image")
129
  img_output = gr.Image(label="Sample Image")
130
  img_btn.click(
131
+ fn=flux.generate_image2, # Choose 1 or 2
 
132
  inputs=img_prompt,
133
  outputs=img_output,
134
  api_name="generate_image")
flux.py CHANGED
@@ -219,57 +219,9 @@ def optimize(pipe, compile=True):
219
  return pipe
220
 
221
 
222
- def generate_image(prompt: str):
223
- """
224
- Generates an image based on a text prompt using the Flux model.
225
-
226
- Args:
227
- prompt (str): The text prompt to generate the image from.
228
-
229
- Returns:
230
- str: The URL of the generated image.
231
- """
232
-
233
- # For demonstration, we'll return a placeholder image URL
234
-
235
- return "https://avatars.githubusercontent.com/u/75182?v=4"
236
-
237
-
238
- app_running = False
239
-
240
- def make_sure_app_running():
241
- """
242
- Ensure the Modal app is running.
243
-
244
- NOTE: WORK-IN-PROGRESS
245
- """
246
- global app_running
247
- # In practice, you would implement logic to check if the Modal app is running.
248
- # For example, you could use `modal app list` to check if the app is running.
249
- if not app_running:
250
- print("⚠️ The Modal app is not running. Should start the app first.")
251
- print("Starting Modal app...")
252
- # Start the Modal app
253
- # This will run the app in the background and allow it to handle requests.
254
- # Note: This is a blocking call, so it will not return until the app is stopped.
255
- # If you want to run it in the background, you can use `modal run --detach`.
256
- # For example:
257
- # modal run flux.py --prompt "a beautiful landscape with mountains and a river" --twice --compile
258
- app.run(detach=False, interactive=True)
259
- print("✅ Modal app started successfully.")
260
- app_running = True
261
- print("You can now call the generate_image function to generate images.")
262
- print("Example usage:")
263
- print("generate_image(prompt='a beautiful landscape with mountains and a river', twice=True, compile=False)")
264
- else:
265
- print("✅ The Modal app is already running. You can call the generate_image function to generate images.")
266
- pass
267
-
268
-
269
- def generate_image2(
270
- prompt: str = """
271
- A portrait of a handsome software developer
272
- """,
273
  twice: bool = True,
274
  compile: bool = False,
275
  ):
@@ -283,18 +235,15 @@ def generate_image2(
283
  compile (bool): Whether to compile the model.
284
 
285
  Returns:
286
- str: The URL of the generated image.
287
  """
288
 
289
- print("DEBUG: generate_image2 called with parameters:")
290
  print(f" prompt: {prompt}")
291
  print(f" twice: {twice}")
292
  print(f" compile: {compile}")
293
  print("DEBUG: Starting image generation...")
294
 
295
- # Ensure the Modal app is running before making any calls
296
- make_sure_app_running()
297
-
298
  t0 = time.time()
299
  image_bytes = Model(compile=compile).inference.remote(prompt)
300
  print(f"🎨 first inference latency: {time.time() - t0:.2f} seconds")
@@ -304,13 +253,66 @@ def generate_image2(
304
  image_bytes = Model(compile=compile).inference.remote(prompt)
305
  print(f"🎨 second inference latency: {time.time() - t0:.2f} seconds")
306
 
307
- output_path = Path("/tmp") / "flux" / "output.jpg"
308
- output_path.parent.mkdir(exist_ok=True, parents=True)
309
- print(f"🎨 saving output to {output_path}")
310
- output_path.write_bytes(image_bytes)
311
 
312
- print(f"✅ Image generated and saved to {output_path}")
313
- return output_path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
314
 
315
 
316
  # To run this script, use the command:
 
219
  return pipe
220
 
221
 
222
+ @app.function()
223
+ def generate_image(
224
+ prompt: str = "Question Mark",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225
  twice: bool = True,
226
  compile: bool = False,
227
  ):
 
235
  compile (bool): Whether to compile the model.
236
 
237
  Returns:
238
+ A bytes object containing the generated image.
239
  """
240
 
241
+ print("DEBUG: generate_image called with parameters:")
242
  print(f" prompt: {prompt}")
243
  print(f" twice: {twice}")
244
  print(f" compile: {compile}")
245
  print("DEBUG: Starting image generation...")
246
 
 
 
 
247
  t0 = time.time()
248
  image_bytes = Model(compile=compile).inference.remote(prompt)
249
  print(f"🎨 first inference latency: {time.time() - t0:.2f} seconds")
 
253
  image_bytes = Model(compile=compile).inference.remote(prompt)
254
  print(f"🎨 second inference latency: {time.time() - t0:.2f} seconds")
255
 
256
+ print(f"DEBUG: Image generation completed - {len(image_bytes)} image_bytes")
257
+ return image_bytes
 
 
258
 
259
+
260
+ def generate_image1(prompt: str):
261
+ """
262
+ Generates an image based on a text prompt using the Flux model.
263
+ For demonstration, we'll return a placeholder image URL
264
+
265
+ Args:
266
+ prompt (str): The text prompt to generate the image from.
267
+
268
+ Returns:
269
+ str: The URL of the generated image.
270
+ """
271
+ return "https://avatars.githubusercontent.com/u/75182?v=4"
272
+
273
+
274
+ def generate_image2(prompt: str):
275
+ """
276
+ Generates an image based on a text prompt using the Flux model
277
+ running on Modal serverless infrastructure.
278
+
279
+ Args:
280
+ prompt (str): The text prompt to generate the image from.
281
+
282
+ Returns:
283
+ str: The URL of the generated image.
284
+ """
285
+
286
+ print("DEBUG: generate_image2 called with prompt:", prompt)
287
+
288
+ if prompt is None or prompt.strip() == "A portrait of a handsome software developer":
289
+ print("DEBUG: Returning hardcoded image URL for default prompt")
290
+ result = generate_image1(prompt)
291
+ else:
292
+ # Call the generate_image function in the Modal app context
293
+ # This will ensure that the function is executed in the Modal environment
294
+ # and can access the necessary resources and configurations.
295
+ print("DEBUG: Calling generate_image.remote with prompt:", prompt)
296
+ with app.run():
297
+ print("DEBUG: Running in Modal app context")
298
+ # This will return the path to the generated image.
299
+ # Note: This is a blocking call, so it will wait for the image generation to complete.
300
+ image_bytes = generate_image.remote(prompt=prompt)
301
+ # Use .remote() to call the function asynchronously
302
+ # This allows the function to run in the Modal serverless infrastructure.
303
+ # The result will be a future object that can be awaited or used to get the result later.
304
+ # In this case, we are returning the future object directly.
305
+
306
+ output_path = Path("/tmp") / "flux2" / "output.jpg"
307
+ output_path.parent.mkdir(exist_ok=True, parents=True)
308
+ print(f"🎨 Writing {len(image_bytes)} to {output_path}")
309
+ output_path.write_bytes(image_bytes)
310
+
311
+ print(f"✅ Image generated and saved to {output_path}")
312
+ result = output_path
313
+
314
+ print(f"DEBUG: Image generation completed, returning result: {result}")
315
+ return result
316
 
317
 
318
  # To run this script, use the command: