Andre Embury commited on
Commit
ea33a01
·
unverified ·
1 Parent(s): 02022fe

Update main app

Browse files

Initial commit to convert from prompt to image input.

Files changed (2) hide show
  1. app.py +117 -53
  2. requirements.txt +8 -1
app.py CHANGED
@@ -1,21 +1,41 @@
1
  import gradio as gr
2
  import numpy as np
3
- import random
 
4
 
5
  # import spaces #[uncomment to use ZeroGPU]
6
- from diffusers import DiffusionPipeline
 
 
 
7
  import torch
8
 
 
 
 
 
 
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
 
11
 
12
  if torch.cuda.is_available():
13
  torch_dtype = torch.float16
14
  else:
15
  torch_dtype = torch.float32
16
 
17
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
 
 
 
 
 
 
 
 
 
18
  pipe = pipe.to(device)
 
19
 
20
  MAX_SEED = np.iinfo(np.int32).max
21
  MAX_IMAGE_SIZE = 1024
@@ -23,32 +43,67 @@ MAX_IMAGE_SIZE = 1024
23
 
24
  # @spaces.GPU #[uncomment to use ZeroGPU]
25
  def infer(
26
- prompt,
27
- negative_prompt,
28
- seed,
29
- randomize_seed,
30
  width,
31
  height,
32
  guidance_scale,
33
  num_inference_steps,
34
  progress=gr.Progress(track_tqdm=True),
35
  ):
36
- if randomize_seed:
37
- seed = random.randint(0, MAX_SEED)
38
-
39
- generator = torch.Generator().manual_seed(seed)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
- image = pipe(
42
  prompt=prompt,
43
- negative_prompt=negative_prompt,
 
 
 
44
  guidance_scale=guidance_scale,
45
  num_inference_steps=num_inference_steps,
46
- width=width,
47
  height=height,
48
- generator=generator,
49
  ).images[0]
50
 
51
- return image, seed
52
 
53
 
54
  examples = [
@@ -64,16 +119,20 @@ css = """
64
  }
65
  """
66
 
 
 
 
 
67
  with gr.Blocks(css=css) as demo:
68
  with gr.Column(elem_id="col-container"):
69
  gr.Markdown(" # Text-to-Image Gradio Template")
70
 
71
  with gr.Row():
72
- prompt = gr.Text(
73
- label="Prompt",
74
  show_label=False,
75
- max_lines=1,
76
- placeholder="Enter your prompt",
77
  container=False,
78
  )
79
 
@@ -82,38 +141,40 @@ with gr.Blocks(css=css) as demo:
82
  result = gr.Image(label="Result", show_label=False)
83
 
84
  with gr.Accordion("Advanced Settings", open=False):
85
- negative_prompt = gr.Text(
86
- label="Negative prompt",
87
- max_lines=1,
88
- placeholder="Enter a negative prompt",
89
- visible=False,
90
  )
91
 
92
- seed = gr.Slider(
93
- label="Seed",
94
- minimum=0,
95
- maximum=MAX_SEED,
96
- step=1,
97
- value=0,
98
- )
99
 
100
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
101
 
102
  with gr.Row():
103
- width = gr.Slider(
104
  label="Width",
105
- minimum=256,
106
- maximum=MAX_IMAGE_SIZE,
107
- step=32,
108
- value=1024, # Replace with defaults that work for your model
 
109
  )
110
 
111
- height = gr.Slider(
112
  label="Height",
113
- minimum=256,
114
- maximum=MAX_IMAGE_SIZE,
115
- step=32,
116
- value=1024, # Replace with defaults that work for your model
 
117
  )
118
 
119
  with gr.Row():
@@ -122,7 +183,7 @@ with gr.Blocks(css=css) as demo:
122
  minimum=0.0,
123
  maximum=10.0,
124
  step=0.1,
125
- value=0.0, # Replace with defaults that work for your model
126
  )
127
 
128
  num_inference_steps = gr.Slider(
@@ -130,24 +191,27 @@ with gr.Blocks(css=css) as demo:
130
  minimum=1,
131
  maximum=50,
132
  step=1,
133
- value=2, # Replace with defaults that work for your model
134
  )
135
 
136
- gr.Examples(examples=examples, inputs=[prompt])
137
  gr.on(
138
- triggers=[run_button.click, prompt.submit],
139
  fn=infer,
140
  inputs=[
141
- prompt,
142
- negative_prompt,
143
- seed,
144
- randomize_seed,
145
  width,
146
  height,
147
  guidance_scale,
148
  num_inference_steps,
149
  ],
150
- outputs=[result, seed],
 
 
 
151
  )
152
 
153
  if __name__ == "__main__":
 
1
  import gradio as gr
2
  import numpy as np
3
+
4
+ # import random
5
 
6
  # import spaces #[uncomment to use ZeroGPU]
7
+ from diffusers import (
8
+ StableDiffusionControlNetImg2ImgPipeline,
9
+ ControlNetModel,
10
+ )
11
  import torch
12
 
13
+ import requests
14
+ from fastapi import FastAPI, HTTPException
15
+ from PIL import Image
16
+ from controlnet_aux import CannyDetector
17
+
18
  device = "cuda" if torch.cuda.is_available() else "cpu"
19
+ # model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
20
+ model_repo_id = "runwayml/stable-diffusion-v1-5"
21
 
22
  if torch.cuda.is_available():
23
  torch_dtype = torch.float16
24
  else:
25
  torch_dtype = torch.float32
26
 
27
+ controlnet = ControlNetModel.from_pretrained(
28
+ "lllyasviel/sd-controlnet-canny", torch_dtype=torch.float32
29
+ )
30
+
31
+ # pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
32
+ pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
33
+ model_repo_id,
34
+ controlnet=controlnet,
35
+ torch_dtype=torch_dtype,
36
+ ).to(device)
37
  pipe = pipe.to(device)
38
+ canny = CannyDetector()
39
 
40
  MAX_SEED = np.iinfo(np.int32).max
41
  MAX_IMAGE_SIZE = 1024
 
43
 
44
  # @spaces.GPU #[uncomment to use ZeroGPU]
45
  def infer(
46
+ image_url,
47
+ # negative_prompt,
48
+ # seed,
49
+ # randomize_seed,
50
  width,
51
  height,
52
  guidance_scale,
53
  num_inference_steps,
54
  progress=gr.Progress(track_tqdm=True),
55
  ):
56
+ # if randomize_seed:
57
+ # seed = random.randint(0, MAX_SEED)
58
+
59
+ # generator = torch.Generator().manual_seed(seed)
60
+
61
+ # image = pipe(
62
+ # prompt=prompt,
63
+ # negative_prompt=negative_prompt,
64
+ # guidance_scale=guidance_scale,
65
+ # num_inference_steps=num_inference_steps,
66
+ # width=width,
67
+ # height=height,
68
+ # generator=generator,
69
+ # ).images[0]
70
+
71
+ # return image, seed
72
+
73
+ width = int(width)
74
+ height = int(height)
75
+
76
+ try:
77
+ resp = requests.get(image_url)
78
+ resp.raise_for_status()
79
+ except Exception as e:
80
+ raise HTTPException(400, f"Could not download image: {e}")
81
+
82
+ # img = Image.open(io.BytesIO(resp.content)).convert("RGB")
83
+ img = Image.open(requests.get(image_url, stream=True).raw).convert("RGB")
84
+ # img = img.resize((req.width, req.height))
85
+ img = img.resize((width, height))
86
+
87
+ control_net_image = canny(img).resize((width, height))
88
+
89
+ prompt = (
90
+ "redraw the logo from scratch, clean sharp vector-style, "
91
+ # + STYLE_PROMPTS[req.style_preset]
92
+ )
93
 
94
+ output = pipe(
95
  prompt=prompt,
96
+ negative_prompt=NEGATIVE,
97
+ image=img,
98
+ control_image=control_net_image,
99
+ # strength=req.strength,
100
  guidance_scale=guidance_scale,
101
  num_inference_steps=num_inference_steps,
 
102
  height=height,
103
+ width=width,
104
  ).images[0]
105
 
106
+ return output
107
 
108
 
109
  examples = [
 
119
  }
120
  """
121
 
122
+ NEGATIVE = "blurry, distorted, messy, gradients, background noise"
123
+ WIDTH = 512
124
+ HEIGHT = 512
125
+
126
  with gr.Blocks(css=css) as demo:
127
  with gr.Column(elem_id="col-container"):
128
  gr.Markdown(" # Text-to-Image Gradio Template")
129
 
130
  with gr.Row():
131
+ image_url = gr.Text(
132
+ label="Image URL",
133
  show_label=False,
134
+ # max_lines=1,
135
+ placeholder="Provide a image URL",
136
  container=False,
137
  )
138
 
 
141
  result = gr.Image(label="Result", show_label=False)
142
 
143
  with gr.Accordion("Advanced Settings", open=False):
144
+ negative_prompt = gr.Label(
145
+ label="Negative prompts",
146
+ # max_lines=1,
147
+ value=NEGATIVE,
148
+ visible=True,
149
  )
150
 
151
+ # seed = gr.Slider(
152
+ # label="Seed",
153
+ # minimum=0,
154
+ # maximum=MAX_SEED,
155
+ # step=1,
156
+ # value=0,
157
+ # )
158
 
159
+ # randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
160
 
161
  with gr.Row():
162
+ width = gr.Label(
163
  label="Width",
164
+ value=WIDTH,
165
+ # minimum=256,
166
+ # maximum=MAX_IMAGE_SIZE,
167
+ # step=32,
168
+ # value=1024, # Replace with defaults that work for your model
169
  )
170
 
171
+ height = gr.Label(
172
  label="Height",
173
+ value=HEIGHT,
174
+ # minimum=256,
175
+ # maximum=MAX_IMAGE_SIZE,
176
+ # step=32,
177
+ # value=1024, # Replace with defaults that work for your model
178
  )
179
 
180
  with gr.Row():
 
183
  minimum=0.0,
184
  maximum=10.0,
185
  step=0.1,
186
+ value=8.5, # Replace with defaults that work for your model
187
  )
188
 
189
  num_inference_steps = gr.Slider(
 
191
  minimum=1,
192
  maximum=50,
193
  step=1,
194
+ value=25, # Replace with defaults that work for your model
195
  )
196
 
197
+ # gr.Examples(examples=examples, inputs=[prompt])
198
  gr.on(
199
+ triggers=[run_button.click, image_url.submit],
200
  fn=infer,
201
  inputs=[
202
+ image_url,
203
+ # negative_prompt,
204
+ # seed,
205
+ # randomize_seed,
206
  width,
207
  height,
208
  guidance_scale,
209
  num_inference_steps,
210
  ],
211
+ outputs=[
212
+ result,
213
+ # seed,
214
+ ],
215
  )
216
 
217
  if __name__ == "__main__":
requirements.txt CHANGED
@@ -3,4 +3,11 @@ diffusers
3
  invisible_watermark
4
  torch
5
  transformers
6
- xformers
 
 
 
 
 
 
 
 
3
  invisible_watermark
4
  torch
5
  transformers
6
+ # xformers
7
+ fastapi
8
+ uvicorn
9
+ pydantic
10
+ requests
11
+ Pillow
12
+ controlnet-aux
13
+ gradio