DonImages commited on
Commit
9024a26
·
verified ·
1 Parent(s): 6ee05ba

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -35
app.py CHANGED
@@ -4,54 +4,42 @@ from huggingface_hub import login
4
  import os
5
  import gradio as gr
6
 
7
- # Retrieve Hugging Face token
8
- token = os.getenv("HF_TOKEN")
9
  if token:
10
- login(token=token)
11
  else:
12
- raise ValueError("Hugging Face token not found. Please set it as a repository secret.")
13
 
14
  # Load the Stable Diffusion 3.5 model
15
- model_id = "stabilityai/stable-diffusion-3.5-large"
16
- pipe = StableDiffusion3Pipeline.from_pretrained(
17
- model_id,
18
- torch_dtype=torch.float16,
19
- low_cpu_mem_usage=True,
20
- device_map="balanced"
21
- )
22
-
23
- # Enable attention slicing for reduced memory usage
24
- pipe.enable_attention_slicing()
25
 
26
  # Define the path to the LoRA model
27
- lora_model_path = "./lora_model.pth"
28
 
29
- # Load and apply the LoRA weights
30
  def load_lora_model(pipe, lora_model_path):
 
31
  lora_weights = torch.load(lora_model_path, map_location="cuda")
32
- pipe.unet.load_attn_procs(lora_weights)
33
- return pipe
34
 
 
 
 
 
 
 
 
 
 
35
  pipe = load_lora_model(pipe, lora_model_path)
36
 
37
- # Generate image function
38
- def generate_image(prompt, steps, scale):
39
- with torch.inference_mode(): # Avoid gradient computation for inference
40
- image = pipe(prompt, num_inference_steps=steps, guidance_scale=scale).images[0]
41
  return image
42
 
43
  # Gradio interface
44
- iface = gr.Interface(
45
- fn=generate_image,
46
- inputs=[
47
- gr.Textbox(label="Enter your prompt"),
48
- gr.Slider(10, 50, step=1, value=30, label="Number of Inference Steps"),
49
- gr.Slider(1.0, 20.0, step=0.5, value=7.5, label="Guidance Scale"),
50
- ],
51
- outputs="image",
52
- title="Optimized Stable Diffusion with LoRA",
53
- description="Generate images using Stable Diffusion 3.5 with optimized memory usage."
54
- )
55
-
56
- # Launch the Gradio interface
57
  iface.launch()
 
4
  import os
5
  import gradio as gr
6
 
7
+ # Retrieve the token from the environment variable
8
+ token = os.getenv("HF_TOKEN") # Hugging Face token from the secret
9
  if token:
10
+ login(token=token) # Log in with the retrieved token
11
  else:
12
+ raise ValueError("Hugging Face token not found. Please set it as a repository secret in the Space settings.")
13
 
14
  # Load the Stable Diffusion 3.5 model
15
+ model_id = "stabilityai/stable-diffusion-3.5-medium"
16
+ pipe = StableDiffusion3Pipeline.from_pretrained(model_id, torch_dtype=torch.float16)
17
+ pipe.to("cuda")
 
 
 
 
 
 
 
18
 
19
  # Define the path to the LoRA model
20
+ lora_model_path = "https://huggingface.co/spaces/DonImages/Testing2/resolve/main/lora_model.pth" # LoRA model path
21
 
22
+ # Custom method to load and apply LoRA weights to the Stable Diffusion pipeline
23
  def load_lora_model(pipe, lora_model_path):
24
+ # Load the LoRA weights (assuming it's a PyTorch .pth file)
25
  lora_weights = torch.load(lora_model_path, map_location="cuda")
 
 
26
 
27
+ # Modify this section based on how LoRA is intended to interact with your Stable Diffusion model
28
+ # Here, we just load the weights into the model's parameters (this is a conceptual approach)
29
+ for name, param in pipe.named_parameters():
30
+ if name in lora_weights:
31
+ param.data += lora_weights[name] # Apply LoRA weights to the parameters
32
+
33
+ return pipe # Return the updated model
34
+
35
+ # Load and apply the LoRA model weights
36
  pipe = load_lora_model(pipe, lora_model_path)
37
 
38
+ # Function to generate an image from a text prompt
39
+ def generate_image(prompt):
40
+ image = pipe(prompt).images[0]
 
41
  return image
42
 
43
  # Gradio interface
44
+ iface = gr.Interface(fn=generate_image, inputs="text", outputs="image")
 
 
 
 
 
 
 
 
 
 
 
 
45
  iface.launch()