Borcherding commited on
Commit
bc71736
·
verified ·
1 Parent(s): d071a59

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -14
app.py CHANGED
@@ -17,13 +17,22 @@ pipe = FluxControlPipeline.from_pretrained(
17
  )
18
  processor = DepthPreprocessor.from_pretrained("LiheYoung/depth-anything-large-hf")
19
 
 
 
 
 
 
 
20
  @spaces.GPU
21
  def load_lora(lora_path):
22
  if not lora_path.strip():
23
  return "Please provide a valid LoRA path"
24
  try:
 
 
25
  # Move to GPU within the wrapped function
26
  pipe.to("cuda")
 
27
 
28
  # Unload any existing LoRA weights first
29
  try:
@@ -35,25 +44,38 @@ def load_lora(lora_path):
35
  pipe.load_lora_weights(lora_path)
36
  return f"Successfully loaded LoRA weights from {lora_path}"
37
  except Exception as e:
 
38
  return f"Error loading LoRA weights: {str(e)}"
39
 
40
  @spaces.GPU
41
  def unload_lora():
42
  try:
 
43
  pipe.to("cuda")
44
  pipe.unload_lora_weights()
45
  return "Successfully unloaded LoRA weights"
46
  except Exception as e:
 
47
  return f"Error unloading LoRA weights: {str(e)}"
48
 
 
 
 
 
49
  @spaces.GPU
50
  def infer(control_image, prompt, seed=42, randomize_seed=False, width=1024, height=1024,
51
  guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
52
 
53
- if randomize_seed:
54
- seed = random.randint(0, MAX_SEED)
55
-
56
  try:
 
 
 
 
 
 
 
 
 
57
  # Move pipeline to GPU within the wrapped function
58
  pipe.to("cuda")
59
 
@@ -61,18 +83,21 @@ def infer(control_image, prompt, seed=42, randomize_seed=False, width=1024, heig
61
  control_image = processor(control_image)[0].convert("RGB")
62
 
63
  # Generate image
64
- image = pipe(
65
- prompt=prompt,
66
- control_image=control_image,
67
- height=height,
68
- width=width,
69
- num_inference_steps=num_inference_steps,
70
- guidance_scale=guidance_scale,
71
- generator=torch.Generator("cuda").manual_seed(seed),
72
- ).images[0]
 
73
 
 
74
  return image, seed
75
  except Exception as e:
 
76
  return None, f"Error during inference: {str(e)}"
77
 
78
  css="""
@@ -131,7 +156,7 @@ with gr.Blocks(css=css) as demo:
131
  label="Width",
132
  minimum=256,
133
  maximum=MAX_IMAGE_SIZE,
134
- step=32,
135
  value=1024,
136
  )
137
 
@@ -139,7 +164,7 @@ with gr.Blocks(css=css) as demo:
139
  label="Height",
140
  minimum=256,
141
  maximum=MAX_IMAGE_SIZE,
142
- step=32,
143
  value=1024,
144
  )
145
 
 
17
  )
18
  processor = DepthPreprocessor.from_pretrained("LiheYoung/depth-anything-large-hf")
19
 
20
+ def cleanup_memory():
21
+ """Clean up GPU memory"""
22
+ if torch.cuda.is_available():
23
+ torch.cuda.empty_cache()
24
+ torch.cuda.ipc_collect()
25
+
26
  @spaces.GPU
27
  def load_lora(lora_path):
28
  if not lora_path.strip():
29
  return "Please provide a valid LoRA path"
30
  try:
31
+ cleanup_memory()
32
+
33
  # Move to GPU within the wrapped function
34
  pipe.to("cuda")
35
+ pipe.enable_model_cpu_offload()
36
 
37
  # Unload any existing LoRA weights first
38
  try:
 
44
  pipe.load_lora_weights(lora_path)
45
  return f"Successfully loaded LoRA weights from {lora_path}"
46
  except Exception as e:
47
+ cleanup_memory()
48
  return f"Error loading LoRA weights: {str(e)}"
49
 
50
  @spaces.GPU
51
  def unload_lora():
52
  try:
53
+ cleanup_memory()
54
  pipe.to("cuda")
55
  pipe.unload_lora_weights()
56
  return "Successfully unloaded LoRA weights"
57
  except Exception as e:
58
+ cleanup_memory()
59
  return f"Error unloading LoRA weights: {str(e)}"
60
 
61
+ def round_to_multiple(number, multiple):
62
+ """Round a number to the nearest multiple"""
63
+ return multiple * round(number / multiple)
64
+
65
  @spaces.GPU
66
  def infer(control_image, prompt, seed=42, randomize_seed=False, width=1024, height=1024,
67
  guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
68
 
 
 
 
69
  try:
70
+ cleanup_memory()
71
+
72
+ if randomize_seed:
73
+ seed = random.randint(0, MAX_SEED)
74
+
75
+ # Ensure dimensions are divisible by 16
76
+ width = round_to_multiple(width, 16)
77
+ height = round_to_multiple(height, 16)
78
+
79
  # Move pipeline to GPU within the wrapped function
80
  pipe.to("cuda")
81
 
 
83
  control_image = processor(control_image)[0].convert("RGB")
84
 
85
  # Generate image
86
+ with torch.inference_mode():
87
+ image = pipe(
88
+ prompt=prompt,
89
+ control_image=control_image,
90
+ height=height,
91
+ width=width,
92
+ num_inference_steps=num_inference_steps,
93
+ guidance_scale=guidance_scale,
94
+ generator=torch.Generator("cuda").manual_seed(seed),
95
+ ).images[0]
96
 
97
+ cleanup_memory()
98
  return image, seed
99
  except Exception as e:
100
+ cleanup_memory()
101
  return None, f"Error during inference: {str(e)}"
102
 
103
  css="""
 
156
  label="Width",
157
  minimum=256,
158
  maximum=MAX_IMAGE_SIZE,
159
+ step=16, # Changed to 16 to ensure divisibility
160
  value=1024,
161
  )
162
 
 
164
  label="Height",
165
  minimum=256,
166
  maximum=MAX_IMAGE_SIZE,
167
+ step=16, # Changed to 16 to ensure divisibility
168
  value=1024,
169
  )
170