kayfahaarukku commited on
Commit
25b110f
·
verified ·
1 Parent(s): 5193ab5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -38
app.py CHANGED
@@ -14,7 +14,7 @@ HF_TOKEN = os.getenv("HF_TOKEN")
14
 
15
  # Load the diffusion pipeline
16
  pipe = StableDiffusionXLPipeline.from_single_file(
17
- "https://huggingface.co/kayfahaarukku/AkashicPulse-v1.0/resolve/main/AkashicPulse-v1.0-ft-ft.safetensors",
18
  torch_dtype=torch.float16,
19
  custom_pipeline="lpw_stable_diffusion_xl",
20
  use_safetensors=True,
@@ -25,41 +25,46 @@ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.conf
25
  # Function to generate an image
26
  @spaces.GPU # Adjust the duration as needed
27
  def generate_image(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
28
- pipe.to('cuda') # Move the model to GPU when the function is called
29
-
30
- if randomize_seed:
31
- seed = random.randint(0, 99999999)
32
- if use_defaults:
33
- prompt = f"{prompt}, best quality, amazing quality, very aesthetic"
34
- negative_prompt = f"nsfw, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract], {negative_prompt}"
35
- generator = torch.manual_seed(seed)
36
-
37
- def callback(step, timestep, latents):
38
- progress(step / num_inference_steps)
39
- return
40
-
41
- width, height = map(int, resolution.split('x'))
42
- image = pipe(
43
- prompt,
44
- negative_prompt=negative_prompt,
45
- width=width,
46
- height=height,
47
- guidance_scale=guidance_scale,
48
- num_inference_steps=num_inference_steps,
49
- generator=generator,
50
- callback=callback,
51
- callback_steps=1
52
- ).images[0]
 
53
 
54
- torch.cuda.empty_cache()
55
 
56
- metadata_text = f"{prompt}\nNegative prompt: {negative_prompt}\nSteps: {num_inference_steps}, Sampler: Euler a, Size: {width}x{height}, Seed: {seed}, CFG scale: {guidance_scale}"
57
 
58
- return image, seed, metadata_text
 
 
59
 
60
  # Define Gradio interface
61
  def interface_fn(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
62
  image, seed, metadata_text = generate_image(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress)
 
 
63
  return image, seed, gr.update(value=metadata_text)
64
 
65
  def reset_inputs():
@@ -68,8 +73,10 @@ def reset_inputs():
68
  with gr.Blocks(title="irAsu 1.0 Demo", theme="NoCrypt/[email protected]") as demo:
69
  gr.HTML(
70
  "<h1>irAsu 1.0 Demo</h1>"
71
- "This demo is intended to showcase what the model is capable of and is not intended to be the main generation platform. Results produced with Diffusers are not the best, and it's highly recommended for you to get the model running inside Stable Diffusion WebUI or ComfyUI."
72
- )
 
 
73
  with gr.Row():
74
  with gr.Column():
75
  prompt_input = gr.Textbox(lines=2, placeholder="Enter prompt here", label="Prompt")
@@ -93,11 +100,7 @@ with gr.Blocks(title="irAsu 1.0 Demo", theme="NoCrypt/[email protected]") as demo:
93
  with gr.Column():
94
  output_image = gr.Image(type="pil", label="Generated Image")
95
  with gr.Accordion("Parameters", open=False):
96
- gr.Markdown(
97
- """
98
- This parameter is compatible with Stable Diffusion WebUI's parameter importer.
99
- """
100
- )
101
  metadata_textbox = gr.Textbox(lines=6, label="Image Parameters", interactive=False, max_lines=6)
102
  gr.Markdown(
103
  """
@@ -116,7 +119,8 @@ with gr.Blocks(title="irAsu 1.0 Demo", theme="NoCrypt/[email protected]") as demo:
116
  generate_button.click(
117
  interface_fn,
118
  inputs=[
119
- prompt_input, negative_prompt_input, use_defaults_input, resolution_input, guidance_scale_input, num_inference_steps_input, seed_input, randomize_seed_input
 
120
  ],
121
  outputs=[output_image, seed_input, metadata_textbox]
122
  )
@@ -125,7 +129,8 @@ with gr.Blocks(title="irAsu 1.0 Demo", theme="NoCrypt/[email protected]") as demo:
125
  reset_inputs,
126
  inputs=[],
127
  outputs=[
128
- prompt_input, negative_prompt_input, use_defaults_input, resolution_input, guidance_scale_input, num_inference_steps_input, seed_input, randomize_seed_input, metadata_textbox
 
129
  ]
130
  )
131
 
 
14
 
15
  # Load the diffusion pipeline
16
  pipe = StableDiffusionXLPipeline.from_single_file(
17
+ "https://huggingface.co/kayfahaarukku/AkashicPulse-v1.0/blob/main/AkashicPulse-v1.0-ft-ft.safetensors", # Fixed URL
18
  torch_dtype=torch.float16,
19
  custom_pipeline="lpw_stable_diffusion_xl",
20
  use_safetensors=True,
 
25
  # Function to generate an image
26
  @spaces.GPU # Adjust the duration as needed
27
  def generate_image(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
28
+ try:
29
+ pipe.to('cuda') # Move the model to GPU when the function is called
30
+
31
+ if randomize_seed:
32
+ seed = random.randint(0, 99999999)
33
+ if use_defaults:
34
+ prompt = f"{prompt}, best quality, amazing quality, very aesthetic"
35
+ negative_prompt = f"nsfw, lowres, (bad), text, error, fewer, extra, missing, worst quality, jpeg artifacts, low quality, watermark, unfinished, displeasing, oldest, early, chromatic aberration, signature, extra digits, artistic error, username, scan, [abstract], {negative_prompt}"
36
+ generator = torch.manual_seed(seed)
37
+
38
+ def callback(step, timestep, latents):
39
+ progress(step / num_inference_steps)
40
+ return
41
+
42
+ width, height = map(int, resolution.split('x'))
43
+ image = pipe(
44
+ prompt,
45
+ negative_prompt=negative_prompt,
46
+ width=width,
47
+ height=height,
48
+ guidance_scale=guidance_scale,
49
+ num_inference_steps=num_inference_steps,
50
+ generator=generator,
51
+ callback=callback,
52
+ callback_steps=1
53
+ ).images[0]
54
 
55
+ torch.cuda.empty_cache()
56
 
57
+ metadata_text = f"{prompt}\nNegative prompt: {negative_prompt}\nSteps: {num_inference_steps}, Sampler: Euler a, Size: {width}x{height}, Seed: {seed}, CFG scale: {guidance_scale}"
58
 
59
+ return image, seed, metadata_text
60
+ except Exception as e:
61
+ return None, seed, f"Error during generation: {str(e)}"
62
 
63
  # Define Gradio interface
64
  def interface_fn(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
65
  image, seed, metadata_text = generate_image(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress)
66
+ if image is None:
67
+ return gr.update(value=None), seed, gr.update(value=metadata_text)
68
  return image, seed, gr.update(value=metadata_text)
69
 
70
  def reset_inputs():
 
73
  with gr.Blocks(title="irAsu 1.0 Demo", theme="NoCrypt/[email protected]") as demo:
74
  gr.HTML(
75
  "<h1>irAsu 1.0 Demo</h1>"
76
+ "<p>This demo is intended to showcase what the model is capable of and is not intended to be the main generation platform. "
77
+ "Results produced with Diffusers are not the best, and it's highly recommended for you to get the model running inside "
78
+ "Stable Diffusion WebUI or ComfyUI.</p>"
79
+ )
80
  with gr.Row():
81
  with gr.Column():
82
  prompt_input = gr.Textbox(lines=2, placeholder="Enter prompt here", label="Prompt")
 
100
  with gr.Column():
101
  output_image = gr.Image(type="pil", label="Generated Image")
102
  with gr.Accordion("Parameters", open=False):
103
+ gr.Markdown("This parameter is compatible with Stable Diffusion WebUI's parameter importer.")
 
 
 
 
104
  metadata_textbox = gr.Textbox(lines=6, label="Image Parameters", interactive=False, max_lines=6)
105
  gr.Markdown(
106
  """
 
119
  generate_button.click(
120
  interface_fn,
121
  inputs=[
122
+ prompt_input, negative_prompt_input, use_defaults_input, resolution_input,
123
+ guidance_scale_input, num_inference_steps_input, seed_input, randomize_seed_input
124
  ],
125
  outputs=[output_image, seed_input, metadata_textbox]
126
  )
 
129
  reset_inputs,
130
  inputs=[],
131
  outputs=[
132
+ prompt_input, negative_prompt_input, use_defaults_input, resolution_input,
133
+ guidance_scale_input, num_inference_steps_input, seed_input, randomize_seed_input, metadata_textbox
134
  ]
135
  )
136