kayfahaarukku commited on
Commit
56c8d0a
·
verified ·
1 Parent(s): 5e4a601

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -24
app.py CHANGED
@@ -2,31 +2,49 @@ import os
2
  import spaces
3
  import torch
4
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
 
5
  import gradio as gr
6
  import random
7
  import tqdm
 
8
 
9
  # Enable TQDM progress tracking
10
  tqdm.monitor_interval = 0
11
 
12
- # Load the diffusion pipeline
13
- pipe = StableDiffusionXLPipeline.from_pretrained(
14
- "kayfahaarukku/irAsu-1.0",
15
- torch_dtype=torch.float16,
16
- custom_pipeline="lpw_stable_diffusion_xl",
17
- )
18
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
  # Function to generate an image
21
- @spaces.GPU # Adjust the duration as needed
22
  def generate_image(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
23
- pipe.to('cuda') # Move the model to GPU when the function is called
24
 
25
  if randomize_seed:
26
  seed = random.randint(0, 99999999)
27
  if use_defaults:
28
- prompt = f"{prompt}, best quality, amazing quality, very aesthetic"
29
- negative_prompt = f"nsfw, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract], {negative_prompt}"
30
  generator = torch.manual_seed(seed)
31
 
32
  def callback(step, timestep, latents):
@@ -58,13 +76,13 @@ def interface_fn(prompt, negative_prompt, use_defaults, resolution, guidance_sca
58
  return image, seed, gr.update(value=metadata_text)
59
 
60
  def reset_inputs():
61
- return gr.update(value=''), gr.update(value=''), gr.update(value=True), gr.update(value='832x1216'), gr.update(value=4), gr.update(value=28), gr.update(value=0), gr.update(value=True), gr.update(value='')
62
 
63
- with gr.Blocks(title="irAsu 1.0 Demo", theme="NoCrypt/[email protected]") as demo:
64
  gr.HTML(
65
- "<h1>irAsu 1.0 Demo</h1>"
66
- "This demo is intended to showcase what the model is capable of and is not intended to be the main generation platform. Results produced with Diffusers are not the best, and it's highly recommended for you to get the model running inside Stable Diffusion WebUI or ComfyUI."
67
- )
68
  with gr.Row():
69
  with gr.Column():
70
  prompt_input = gr.Textbox(lines=2, placeholder="Enter prompt here", label="Prompt")
@@ -78,8 +96,8 @@ with gr.Blocks(title="irAsu 1.0 Demo", theme="NoCrypt/[email protected]") as demo:
78
  label="Resolution",
79
  value="832x1216"
80
  )
81
- guidance_scale_input = gr.Slider(minimum=1, maximum=20, step=0.5, label="Guidance Scale", value=4)
82
- num_inference_steps_input = gr.Slider(minimum=1, maximum=100, step=1, label="Number of Inference Steps", value=28)
83
  seed_input = gr.Slider(minimum=0, maximum=999999999, step=1, label="Seed", value=0, interactive=True)
84
  randomize_seed_input = gr.Checkbox(label="Randomize Seed", value=True)
85
  generate_button = gr.Button("Generate")
@@ -97,14 +115,15 @@ with gr.Blocks(title="irAsu 1.0 Demo", theme="NoCrypt/[email protected]") as demo:
97
  gr.Markdown(
98
  """
99
  ### Recommended prompt formatting:
100
- `1girl/1boy, character name, from what series, everything else in any order, best quality, amazing quality, very aesthetic,`
101
 
102
- **PS:** `best quality, amazing quality, very aesthetic,` is automatically added when "Use Default Quality Tags and Negative Prompt" is enabled
103
 
104
- ### Recommended settings:
105
- - Steps: 25-30
106
- - CFG: 3.5-5
107
- - Sweet spot: 28 steps, 4 CFG
 
108
  """
109
  )
110
 
 
2
  import spaces
3
  import torch
4
  from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
5
+ from safetensors.torch import load_file
6
  import gradio as gr
7
  import random
8
  import tqdm
9
+ from huggingface_hub import hf_hub_download
10
 
11
  # Enable TQDM progress tracking
12
  tqdm.monitor_interval = 0
13
 
14
+ # Load the model from safetensors file
15
+ def load_model():
16
+ model_path = hf_hub_download(
17
+ repo_id="kayfahaarukku/AkashicPulse-v1.0",
18
+ filename="AkashicPulse-v1.0-ft-ft.safetensors"
19
+ )
20
+
21
+ # Initialize pipeline with base SDXL configuration
22
+ pipe = StableDiffusionXLPipeline.from_pretrained(
23
+ "stabilityai/stable-diffusion-xl-base-1.0",
24
+ torch_dtype=torch.float16,
25
+ variant="fp16",
26
+ use_safetensors=True
27
+ )
28
+
29
+ # Load the custom model weights
30
+ state_dict = load_file(model_path)
31
+ pipe.unet.load_state_dict(state_dict)
32
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
33
+ return pipe
34
+
35
+ # Load the pipeline
36
+ pipe = load_model()
37
 
38
  # Function to generate an image
39
+ @spaces.GPU
40
  def generate_image(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
41
+ pipe.to('cuda')
42
 
43
  if randomize_seed:
44
  seed = random.randint(0, 99999999)
45
  if use_defaults:
46
+ prompt = f"{prompt}, masterpiece, best quality"
47
+ negative_prompt = f"lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, signature, watermark, username, blurry, {negative_prompt}"
48
  generator = torch.manual_seed(seed)
49
 
50
  def callback(step, timestep, latents):
 
76
  return image, seed, gr.update(value=metadata_text)
77
 
78
  def reset_inputs():
79
+ return gr.update(value=''), gr.update(value=''), gr.update(value=True), gr.update(value='832x1216'), gr.update(value=7), gr.update(value=28), gr.update(value=0), gr.update(value=True), gr.update(value='')
80
 
81
+ with gr.Blocks(title="AkashicPulse v1.0 Demo", theme="NoCrypt/[email protected]") as demo:
82
  gr.HTML(
83
+ "<h1>AkashicPulse v1.0 Demo</h1>"
84
+ "This demo showcases the AkashicPulse v1.0 model capabilities. For best results, it's recommended to run the model in Stable Diffusion WebUI or ComfyUI with MaHiRo CFG enabled."
85
+ )
86
  with gr.Row():
87
  with gr.Column():
88
  prompt_input = gr.Textbox(lines=2, placeholder="Enter prompt here", label="Prompt")
 
96
  label="Resolution",
97
  value="832x1216"
98
  )
99
+ guidance_scale_input = gr.Slider(minimum=4, maximum=10, step=0.5, label="Guidance Scale (CFG)", value=7)
100
+ num_inference_steps_input = gr.Slider(minimum=20, maximum=30, step=1, label="Number of Steps", value=28)
101
  seed_input = gr.Slider(minimum=0, maximum=999999999, step=1, label="Seed", value=0, interactive=True)
102
  randomize_seed_input = gr.Checkbox(label="Randomize Seed", value=True)
103
  generate_button = gr.Button("Generate")
 
115
  gr.Markdown(
116
  """
117
  ### Recommended prompt formatting:
118
+ `1girl/1boy, character name, series, by artist name, the rest of the prompt, masterpiece, best quality`
119
 
120
+ **PS:** `masterpiece, best quality` is automatically added when "Use Default Quality Tags and Negative Prompt" is enabled
121
 
122
+ ### Current settings (recommended):
123
+ - Sampler: Euler a (fixed)
124
+ - Steps: 20-30 (sweet spot: 28)
125
+ - CFG: 4-10 (sweet spot: 7)
126
+ - Optional: Enable MaHiRo CFG in reForge or ComfyUI
127
  """
128
  )
129