Spaces:
Runtime error
Runtime error
erwold
commited on
Commit
·
fb9dbfc
1
Parent(s):
7ef91cf
Initial Commit
Browse files
app.py
CHANGED
|
@@ -8,6 +8,18 @@ from flux.transformer_flux import FluxTransformer2DModel
|
|
| 8 |
from flux.pipeline_flux_chameleon import FluxPipeline
|
| 9 |
import torch.nn as nn
|
| 10 |
import math
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
MODEL_ID = "Djrango/Qwen2vl-Flux"
|
| 13 |
|
|
@@ -177,40 +189,40 @@ class FluxInterface:
|
|
| 177 |
|
| 178 |
def generate(self, input_image, prompt="", guidance_scale=3.5, num_inference_steps=28, num_images=2, seed=None, aspect_ratio="1:1"):
|
| 179 |
try:
|
| 180 |
-
|
| 181 |
|
| 182 |
if input_image is None:
|
| 183 |
raise ValueError("No input image provided")
|
| 184 |
|
| 185 |
if seed is not None:
|
| 186 |
torch.manual_seed(seed)
|
| 187 |
-
|
| 188 |
|
| 189 |
self.load_models()
|
| 190 |
-
|
| 191 |
|
| 192 |
# Get dimensions from aspect ratio
|
| 193 |
if aspect_ratio not in ASPECT_RATIOS:
|
| 194 |
raise ValueError(f"Invalid aspect ratio. Choose from {list(ASPECT_RATIOS.keys())}")
|
| 195 |
width, height = ASPECT_RATIOS[aspect_ratio]
|
| 196 |
-
|
| 197 |
|
| 198 |
# Process input image
|
| 199 |
try:
|
| 200 |
input_image = self.resize_image(input_image)
|
| 201 |
-
|
| 202 |
qwen2_hidden_state, image_grid_thw = self.process_image(input_image)
|
| 203 |
-
|
| 204 |
except Exception as e:
|
| 205 |
raise RuntimeError(f"Error processing input image: {str(e)}")
|
| 206 |
|
| 207 |
try:
|
| 208 |
pooled_prompt_embeds = self.compute_text_embeddings("")
|
| 209 |
-
|
| 210 |
|
| 211 |
# Get T5 embeddings if prompt is provided
|
| 212 |
t5_prompt_embeds = self.compute_t5_text_embeddings(prompt)
|
| 213 |
-
|
| 214 |
except Exception as e:
|
| 215 |
raise RuntimeError(f"Error computing embeddings: {str(e)}")
|
| 216 |
|
|
@@ -226,12 +238,12 @@ class FluxInterface:
|
|
| 226 |
width=width,
|
| 227 |
).images
|
| 228 |
|
| 229 |
-
|
| 230 |
return output_images
|
| 231 |
except Exception as e:
|
| 232 |
raise RuntimeError(f"Error generating images: {str(e)}")
|
| 233 |
except Exception as e:
|
| 234 |
-
|
| 235 |
raise gr.Error(f"Generation failed: {str(e)}")
|
| 236 |
|
| 237 |
# Initialize the interface
|
|
@@ -364,13 +376,25 @@ with gr.Blocks(
|
|
| 364 |
# Set up the generation function
|
| 365 |
def generate_with_error_handling(*args):
|
| 366 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 367 |
gr.Info("Starting image generation...")
|
| 368 |
results = interface.generate(*args)
|
|
|
|
| 369 |
gr.Info("Generation complete!")
|
| 370 |
return [results, None]
|
| 371 |
except Exception as e:
|
| 372 |
error_msg = str(e)
|
| 373 |
-
|
| 374 |
return [None, error_msg]
|
| 375 |
|
| 376 |
submit_btn.click(
|
|
|
|
| 8 |
from flux.pipeline_flux_chameleon import FluxPipeline
|
| 9 |
import torch.nn as nn
|
| 10 |
import math
|
| 11 |
+
import logging
|
| 12 |
+
import sys
|
| 13 |
+
|
| 14 |
+
# 设置日志
|
| 15 |
+
logging.basicConfig(
|
| 16 |
+
level=logging.INFO,
|
| 17 |
+
format='%(asctime)s - %(levelname)s - %(message)s',
|
| 18 |
+
handlers=[
|
| 19 |
+
logging.StreamHandler(sys.stdout)
|
| 20 |
+
]
|
| 21 |
+
)
|
| 22 |
+
logger = logging.getLogger(__name__)
|
| 23 |
|
| 24 |
MODEL_ID = "Djrango/Qwen2vl-Flux"
|
| 25 |
|
|
|
|
| 189 |
|
| 190 |
def generate(self, input_image, prompt="", guidance_scale=3.5, num_inference_steps=28, num_images=2, seed=None, aspect_ratio="1:1"):
|
| 191 |
try:
|
| 192 |
+
logger.info(f"Starting generation with prompt: {prompt}, guidance_scale: {guidance_scale}, steps: {num_inference_steps}")
|
| 193 |
|
| 194 |
if input_image is None:
|
| 195 |
raise ValueError("No input image provided")
|
| 196 |
|
| 197 |
if seed is not None:
|
| 198 |
torch.manual_seed(seed)
|
| 199 |
+
logger.info(f"Set random seed to: {seed}")
|
| 200 |
|
| 201 |
self.load_models()
|
| 202 |
+
logger.info("Models loaded successfully")
|
| 203 |
|
| 204 |
# Get dimensions from aspect ratio
|
| 205 |
if aspect_ratio not in ASPECT_RATIOS:
|
| 206 |
raise ValueError(f"Invalid aspect ratio. Choose from {list(ASPECT_RATIOS.keys())}")
|
| 207 |
width, height = ASPECT_RATIOS[aspect_ratio]
|
| 208 |
+
logger.info(f"Using dimensions: {width}x{height}")
|
| 209 |
|
| 210 |
# Process input image
|
| 211 |
try:
|
| 212 |
input_image = self.resize_image(input_image)
|
| 213 |
+
logger.info(f"Input image resized to: {input_image.size}")
|
| 214 |
qwen2_hidden_state, image_grid_thw = self.process_image(input_image)
|
| 215 |
+
logger.info("Input image processed successfully")
|
| 216 |
except Exception as e:
|
| 217 |
raise RuntimeError(f"Error processing input image: {str(e)}")
|
| 218 |
|
| 219 |
try:
|
| 220 |
pooled_prompt_embeds = self.compute_text_embeddings("")
|
| 221 |
+
logger.info("Base text embeddings computed")
|
| 222 |
|
| 223 |
# Get T5 embeddings if prompt is provided
|
| 224 |
t5_prompt_embeds = self.compute_t5_text_embeddings(prompt)
|
| 225 |
+
logger.info("T5 prompt embeddings computed")
|
| 226 |
except Exception as e:
|
| 227 |
raise RuntimeError(f"Error computing embeddings: {str(e)}")
|
| 228 |
|
|
|
|
| 238 |
width=width,
|
| 239 |
).images
|
| 240 |
|
| 241 |
+
logger.info("Images generated successfully")
|
| 242 |
return output_images
|
| 243 |
except Exception as e:
|
| 244 |
raise RuntimeError(f"Error generating images: {str(e)}")
|
| 245 |
except Exception as e:
|
| 246 |
+
logger.error(f"Error during generation: {str(e)}")
|
| 247 |
raise gr.Error(f"Generation failed: {str(e)}")
|
| 248 |
|
| 249 |
# Initialize the interface
|
|
|
|
| 376 |
# Set up the generation function
|
| 377 |
def generate_with_error_handling(*args):
|
| 378 |
try:
|
| 379 |
+
logger.info("Starting image generation with args: %s", str(args))
|
| 380 |
+
|
| 381 |
+
# 输入参数验证
|
| 382 |
+
input_image, prompt, guidance, steps, num_images, seed, aspect_ratio = args
|
| 383 |
+
logger.info(f"Input validation - Image: {type(input_image)}, Prompt: '{prompt}', "
|
| 384 |
+
f"Guidance: {guidance}, Steps: {steps}, Num Images: {num_images}, "
|
| 385 |
+
f"Seed: {seed}, Aspect Ratio: {aspect_ratio}")
|
| 386 |
+
|
| 387 |
+
if input_image is None:
|
| 388 |
+
raise ValueError("No input image provided")
|
| 389 |
+
|
| 390 |
gr.Info("Starting image generation...")
|
| 391 |
results = interface.generate(*args)
|
| 392 |
+
logger.info("Generation completed successfully")
|
| 393 |
gr.Info("Generation complete!")
|
| 394 |
return [results, None]
|
| 395 |
except Exception as e:
|
| 396 |
error_msg = str(e)
|
| 397 |
+
logger.error(f"Error in generate_with_error_handling: {error_msg}", exc_info=True)
|
| 398 |
return [None, error_msg]
|
| 399 |
|
| 400 |
submit_btn.click(
|