DonImages commited on
Commit
94d851c
·
verified ·
1 Parent(s): 752c604

Rename appCODE.py to app.py

Browse files
Files changed (1) hide show
  1. appCODE.py → app.py +8 -12
appCODE.py → app.py RENAMED
@@ -24,22 +24,21 @@ pipeline = StableDiffusion3Pipeline.from_pretrained(
24
  ).to(device)
25
 
26
  # Load the LoRA trained weights once at the start
27
- lora_path = "lora_trained_model.pt" # Ensure this file is uploaded in the Space
28
  if os.path.exists(lora_path):
29
  try:
30
- # Check if LoRA is supported by your pipeline's version
31
- if hasattr(pipeline, 'load_lora_weights'):
32
- pipeline.load_lora_weights(lora_path) # This automatically applies to the right components
33
- print("✅ LoRA weights loaded successfully!")
34
- else:
35
- print("❌ LoRA weights method not available. Manually loading weights.")
36
- # Optionally, you can manually load the weights using keys (refer to your printed keys)
37
- # Example: pipeline.model.load_state_dict(torch.load(lora_path))
38
  except Exception as e:
39
  print(f"❌ Error loading LoRA: {e}")
40
  else:
41
  print("⚠️ LoRA file not found! Running base model.")
42
 
 
 
 
 
 
43
  # Ensure GPU allocation in Hugging Face Spaces
44
  @spaces.GPU(duration=65)
45
  def generate_image(prompt: str, seed: int = None):
@@ -66,6 +65,3 @@ with gr.Blocks() as demo:
66
  output_image = gr.Image(label="Generated Image")
67
 
68
  generate_btn.click(generate_image, inputs=[prompt_input, seed_input], outputs=output_image)
69
-
70
- # Launch Gradio App
71
- demo.launch()
 
24
  ).to(device)
25
 
26
  # Load the LoRA trained weights once at the start
27
+ lora_path = "lora_trained_model.safetensors" # Ensure this file is uploaded in the Space
28
  if os.path.exists(lora_path):
29
  try:
30
+ SD3LoraLoaderMixin.load_lora_into_model(pipeline, lora_path)
31
+ print("✅ LoRA weights loaded successfully!")
 
 
 
 
 
 
32
  except Exception as e:
33
  print(f"❌ Error loading LoRA: {e}")
34
  else:
35
  print("⚠️ LoRA file not found! Running base model.")
36
 
37
+ # Verify if LoRA is applied
38
+ for name, param in pipeline.text_encoder.named_parameters():
39
+ if "lora" in name.lower():
40
+ print(f"LoRA applied to: {name}, requires_grad={param.requires_grad}")
41
+
42
  # Ensure GPU allocation in Hugging Face Spaces
43
  @spaces.GPU(duration=65)
44
  def generate_image(prompt: str, seed: int = None):
 
65
  output_image = gr.Image(label="Generated Image")
66
 
67
  generate_btn.click(generate_image, inputs=[prompt_input, seed_input], outputs=output_image)