RanM commited on
Commit
b85438c
·
verified ·
1 Parent(s): e15ee10

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -5
app.py CHANGED
@@ -1,20 +1,23 @@
1
  import gradio as gr
2
- from diffusers import AutoPipelineForText2Image
3
  from generate_propmts import generate_prompt
4
  from PIL import Image
5
  import asyncio
6
  import traceback
7
 
8
- # Load the model once outside of the function
9
  model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
 
 
 
10
 
11
  async def generate_image(prompt):
12
  try:
13
- num_inference_steps = 4 # Increased inference steps
14
  output = await asyncio.to_thread(
15
  model,
16
  prompt=prompt,
17
- num_inference_steps=num_inference_steps,
18
  guidance_scale=0.0,
19
  output_type="pil"
20
  )
@@ -26,7 +29,7 @@ async def generate_image(prompt):
26
  print(f"Error generating image: {e}")
27
  traceback.print_exc()
28
  return None
29
-
30
 
31
  async def inference(sentence_mapping, character_dict, selected_style):
32
  images = []
 
1
  import gradio as gr
2
+ from diffusers import AutoPipelineForText2Image, EulerAncestralDiscreteScheduler
3
  from generate_propmts import generate_prompt
4
  from PIL import Image
5
  import asyncio
6
  import traceback
7
 
8
+ # Load the model with a modified scheduler
9
  model = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
10
+ model.scheduler = EulerAncestralDiscreteScheduler.from_config(model.scheduler.config)
11
+ model.scheduler.config.prediction_type = "epsilon" # Adjust prediction_type
12
+
13
 
14
  async def generate_image(prompt):
15
  try:
16
+ num_inference_steps = 25 # Or your desired value
17
  output = await asyncio.to_thread(
18
  model,
19
  prompt=prompt,
20
+ num_inference_steps=num_inference_steps,
21
  guidance_scale=0.0,
22
  output_type="pil"
23
  )
 
29
  print(f"Error generating image: {e}")
30
  traceback.print_exc()
31
  return None
32
+
33
 
34
  async def inference(sentence_mapping, character_dict, selected_style):
35
  images = []