NightRaven109 commited on
Commit
f6b2853
·
verified ·
1 Parent(s): 339c42a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -46
app.py CHANGED
@@ -18,6 +18,7 @@ class Args:
18
  def __init__(self, **kwargs):
19
  self.__dict__.update(kwargs)
20
 
 
21
  @spaces.GPU
22
  def initialize_models():
23
  global pipeline, generator, accelerator
@@ -49,24 +50,27 @@ def initialize_models():
49
  # Load pipeline
50
  pipeline = load_pipeline(args, accelerator, enable_xformers_memory_efficient_attention=False)
51
 
52
- # Ensure all models are in eval mode
 
53
  pipeline.unet.eval()
54
  pipeline.controlnet.eval()
55
  pipeline.vae.eval()
56
  pipeline.text_encoder.eval()
57
 
58
- # Move pipeline to CUDA
59
- pipeline = pipeline.to("cuda")
60
-
61
  # Initialize generator
62
  generator = torch.Generator("cuda")
63
 
 
64
  return True
65
 
66
  except Exception as e:
67
  print(f"Error initializing models: {str(e)}")
68
  return False
69
 
 
 
 
 
70
  @spaces.GPU(processing_timeout=180)
71
  def process_image(
72
  input_image,
@@ -79,14 +83,9 @@ def process_image(
79
  upscale_factor=4,
80
  color_fix_method="adain"
81
  ):
82
- global pipeline, generator, accelerator
83
 
84
  try:
85
- # Initialize models if not already done
86
- if pipeline is None:
87
- if not initialize_models():
88
- return None
89
-
90
  # Create args object with all necessary parameters
91
  args = Args(
92
  added_prompt=prompt,
@@ -105,7 +104,7 @@ def process_image(
105
  tile_diffusion_stride=None,
106
  start_steps=999,
107
  start_point='lr',
108
- use_vae_encode_condition=True, # Changed to True
109
  sample_times=1
110
  )
111
 
@@ -128,42 +127,27 @@ def process_image(
128
  validation_image = validation_image.resize((validation_image.size[0]//8*8, validation_image.size[1]//8*8))
129
  width, height = validation_image.size
130
 
131
- # Ensure pipeline is on CUDA and in eval mode
132
- pipeline = pipeline.to("cuda")
133
- pipeline.unet.eval()
134
- pipeline.controlnet.eval()
135
- pipeline.vae.eval()
136
- pipeline.text_encoder.eval()
137
-
138
  # Generate image
139
  with torch.no_grad():
140
- try:
141
- # First encode the image with VAE
142
- image_tensor = pipeline.image_processor.preprocess(validation_image)
143
- image_tensor = image_tensor.unsqueeze(0).to(device="cuda", dtype=torch.float32)
144
-
145
- inference_time, output = pipeline(
146
- args.t_max,
147
- args.t_min,
148
- args.tile_diffusion,
149
- args.tile_diffusion_size,
150
- args.tile_diffusion_stride,
151
- args.added_prompt,
152
- validation_image,
153
- num_inference_steps=args.num_inference_steps,
154
- generator=generator,
155
- height=height,
156
- width=width,
157
- guidance_scale=args.guidance_scale,
158
- negative_prompt=args.negative_prompt,
159
- conditioning_scale=args.conditioning_scale,
160
- start_steps=args.start_steps,
161
- start_point=args.start_point,
162
- use_vae_encode_condition=True, # Set to True
163
- )
164
- except Exception as e:
165
- print(f"Pipeline execution error: {str(e)}")
166
- raise
167
 
168
  image = output.images[0]
169
 
@@ -184,7 +168,6 @@ def process_image(
184
  traceback.print_exc()
185
  return None
186
 
187
-
188
  # Define default values
189
  DEFAULT_VALUES = {
190
  "prompt": "clean, texture, high-resolution, 8k",
 
18
  def __init__(self, **kwargs):
19
  self.__dict__.update(kwargs)
20
 
21
+ # Initialize models at startup
22
  @spaces.GPU
23
  def initialize_models():
24
  global pipeline, generator, accelerator
 
50
  # Load pipeline
51
  pipeline = load_pipeline(args, accelerator, enable_xformers_memory_efficient_attention=False)
52
 
53
+ # Ensure all models are in eval mode and on CUDA
54
+ pipeline = pipeline.to("cuda")
55
  pipeline.unet.eval()
56
  pipeline.controlnet.eval()
57
  pipeline.vae.eval()
58
  pipeline.text_encoder.eval()
59
 
 
 
 
60
  # Initialize generator
61
  generator = torch.Generator("cuda")
62
 
63
+ print("Models initialized and ready!")
64
  return True
65
 
66
  except Exception as e:
67
  print(f"Error initializing models: {str(e)}")
68
  return False
69
 
70
+ # Load models at module level
71
+ print("Initializing models...")
72
+ initialize_models()
73
+
74
  @spaces.GPU(processing_timeout=180)
75
  def process_image(
76
  input_image,
 
83
  upscale_factor=4,
84
  color_fix_method="adain"
85
  ):
86
+ global pipeline, generator
87
 
88
  try:
 
 
 
 
 
89
  # Create args object with all necessary parameters
90
  args = Args(
91
  added_prompt=prompt,
 
104
  tile_diffusion_stride=None,
105
  start_steps=999,
106
  start_point='lr',
107
+ use_vae_encode_condition=True,
108
  sample_times=1
109
  )
110
 
 
127
  validation_image = validation_image.resize((validation_image.size[0]//8*8, validation_image.size[1]//8*8))
128
  width, height = validation_image.size
129
 
 
 
 
 
 
 
 
130
  # Generate image
131
  with torch.no_grad():
132
+ inference_time, output = pipeline(
133
+ args.t_max,
134
+ args.t_min,
135
+ args.tile_diffusion,
136
+ args.tile_diffusion_size,
137
+ args.tile_diffusion_stride,
138
+ args.added_prompt,
139
+ validation_image,
140
+ num_inference_steps=args.num_inference_steps,
141
+ generator=generator,
142
+ height=height,
143
+ width=width,
144
+ guidance_scale=args.guidance_scale,
145
+ negative_prompt=args.negative_prompt,
146
+ conditioning_scale=args.conditioning_scale,
147
+ start_steps=args.start_steps,
148
+ start_point=args.start_point,
149
+ use_vae_encode_condition=True,
150
+ )
 
 
 
 
 
 
 
 
151
 
152
  image = output.images[0]
153
 
 
168
  traceback.print_exc()
169
  return None
170
 
 
171
  # Define default values
172
  DEFAULT_VALUES = {
173
  "prompt": "clean, texture, high-resolution, 8k",