Ryukijano commited on
Commit
ff06f7f
·
verified ·
1 Parent(s): e5750a9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -8
app.py CHANGED
@@ -5,10 +5,8 @@ torch.backends.cudnn.allow_tf32 = True
5
  import gradio as gr
6
  import numpy as np
7
  import random
8
- import spaces
9
  import time
10
  from diffusers import DiffusionPipeline, AutoencoderTiny
11
- from diffusers.models.attention_processor import AttnProcessor2_0
12
  from custom_pipeline import FluxWithCFGPipeline
13
  import asyncio
14
 
@@ -51,8 +49,9 @@ if hasattr(pipe, "transformer") and torch.cuda.is_available():
51
 
52
  torch.cuda.empty_cache()
53
 
54
- # Inference function
55
- @spaces.GPU(duration=25)
 
56
  async def generate_image(
57
  prompt,
58
  seed=24,
@@ -119,8 +118,8 @@ async def generate_image(
119
  static_latents_out, height, width, "pil"
120
  )
121
 
122
- # Graph-based generation function
123
- async def generate_with_graph(
124
  latents,
125
  prompt_embeds,
126
  pooled_prompt_embeds,
@@ -137,8 +136,7 @@ async def generate_image(
137
  g.replay()
138
  return static_output
139
 
140
- # Only generate the last image in the sequence
141
- img = await pipe.generate_images(
142
  prompt=prompt,
143
  width=width,
144
  height=height,
 
5
  import gradio as gr
6
  import numpy as np
7
  import random
 
8
  import time
9
  from diffusers import DiffusionPipeline, AutoencoderTiny
 
10
  from custom_pipeline import FluxWithCFGPipeline
11
  import asyncio
12
 
 
49
 
50
  torch.cuda.empty_cache()
51
 
52
+
53
+
54
+ # Inference function (async)
55
  async def generate_image(
56
  prompt,
57
  seed=24,
 
118
  static_latents_out, height, width, "pil"
119
  )
120
 
121
+ # Graph-based generation function (synchronous)
122
+ def generate_with_graph(
123
  latents,
124
  prompt_embeds,
125
  pooled_prompt_embeds,
 
136
  g.replay()
137
  return static_output
138
 
139
+ img = pipe.generate_images(
 
140
  prompt=prompt,
141
  width=width,
142
  height=height,