RanM commited on
Commit
bf161ed
·
verified ·
1 Parent(s): 7d8005c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -79
app.py CHANGED
@@ -1,108 +1,37 @@
1
- # import gradio as gr
2
- # import torch
3
- # from diffusers import DiffusionPipeline, AutoPipelineForText2Image
4
- # import base64
5
- # from io import BytesIO
6
-
7
-
8
-
9
- # def load_amused_model():
10
- # # pipeline = DiffusionPipeline.from_pretrained("Bakanayatsu/ponyDiffusion-V6-XL-Turbo-DPO")
11
- # # AutoPipelineForText2Image.from_pretrained("stabilityai/sd-turbo")
12
- # # AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
13
- # return DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4",
14
- # safety_checker = None,
15
- # requires_safety_checker = False)
16
-
17
-
18
- # # Generate image from prompt using AmusedPipeline
19
- # def generate_image(prompt):
20
- # try:
21
- # pipe = load_amused_model()
22
- # generator = torch.Generator().manual_seed(8) # Create a generator for reproducibility
23
- # image = pipe(prompt, generator=generator).images[0] # Generate image from prompt
24
- # # image = pipe(prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0]
25
- # return image, None
26
- # except Exception as e:
27
- # return None, str(e)
28
-
29
- # def inference(prompt):
30
- # print(f"Received prompt: {prompt}") # Debugging statement
31
- # image, error = generate_image(prompt)
32
- # if error:
33
- # print(f"Error generating image: {error}") # Debugging statement
34
- # return "Error: " + error
35
-
36
- # buffered = BytesIO()
37
- # image.save(buffered, format="PNG")
38
- # img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
39
- # return img_str
40
-
41
- # gradio_interface = gr.Interface(
42
- # fn=inference,
43
- # inputs="text",
44
- # outputs="text" # Change output to text to return base64 string
45
- # )
46
-
47
- # if __name__ == "__main__":
48
- # gradio_interface.launch()
49
-
50
-
51
-
52
  import gradio as gr
53
- from diffusers import DiffusionPipeline, DPMSolverSinglestepScheduler
54
  import torch
 
55
  import base64
56
  from io import BytesIO
57
 
58
 
59
- def load_amused_model():
60
- # pipeline = DiffusionPipeline.from_pretrained("Bakanayatsu/ponyDiffusion-V6-XL-Turbo-DPO")
61
  # AutoPipelineForText2Image.from_pretrained("stabilityai/sd-turbo")
62
  # AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
63
- return DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float32).to("cpu")
64
-
65
 
66
  # Generate image from prompt using AmusedPipeline
67
  def generate_image(prompt):
68
  try:
69
- pipe = load_amused_model()
70
- pipe.load_lora_weights(
71
- "mann-e/Mann-E_Turbo",
72
- weight_name="manne_turbo.safetensors",
73
- )
74
- # This is equivalent to DPM++ SDE Karras, as noted in https://huggingface.co/docs/diffusers/main/en/api/schedulers/overview
75
- pipe.scheduler = DPMSolverSinglestepScheduler.from_config(pipe.scheduler.config, use_karras_sigmas=True)
76
-
77
- #generator = torch.Generator().manual_seed(8) # Create a generator for reproducibility
78
- #image = pipe(prompt, generator=generator).images[0] # Generate image from prompt
79
- # image = pipe(prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0]
80
- image = pipe(
81
- prompt="a cat in a bustling middle eastern city",
82
- num_inference_steps=8,
83
- guidance_scale=4,
84
- width=768,
85
- height=768,
86
- clip_skip=1
87
- ).images[0]
88
  return image, None
89
  except Exception as e:
90
  return None, str(e)
91
 
92
-
93
  def inference(prompt):
94
  print(f"Received prompt: {prompt}") # Debugging statement
95
  image, error = generate_image(prompt)
96
  if error:
97
  print(f"Error generating image: {error}") # Debugging statement
98
  return "Error: " + error
99
-
100
  buffered = BytesIO()
101
  image.save(buffered, format="PNG")
102
  img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
103
  return img_str
104
 
105
-
106
  gradio_interface = gr.Interface(
107
  fn=inference,
108
  inputs="text",
@@ -110,4 +39,4 @@ gradio_interface = gr.Interface(
110
  )
111
 
112
  if __name__ == "__main__":
113
- gradio_interface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
 
2
  import torch
3
+ from diffusers import DiffusionPipeline, AutoPipelineForText2Image
4
  import base64
5
  from io import BytesIO
6
 
7
 
8
+
9
+ def text_to_image_model():
10
  # AutoPipelineForText2Image.from_pretrained("stabilityai/sd-turbo")
11
  # AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
12
+ return AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo")
 
13
 
14
  # Generate image from prompt using AmusedPipeline
15
  def generate_image(prompt):
16
  try:
17
+ pipe = text_to_image_model()
18
+ image = pipe(prompt=prompt, num_inference_steps=1, guidance_scale=0.0).images[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  return image, None
20
  except Exception as e:
21
  return None, str(e)
22
 
 
23
  def inference(prompt):
24
  print(f"Received prompt: {prompt}") # Debugging statement
25
  image, error = generate_image(prompt)
26
  if error:
27
  print(f"Error generating image: {error}") # Debugging statement
28
  return "Error: " + error
29
+
30
  buffered = BytesIO()
31
  image.save(buffered, format="PNG")
32
  img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
33
  return img_str
34
 
 
35
  gradio_interface = gr.Interface(
36
  fn=inference,
37
  inputs="text",
 
39
  )
40
 
41
  if __name__ == "__main__":
42
+ gradio_interface.launch()