wanghuging commited on
Commit
a13309f
·
1 Parent(s): 6e04a6e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -24
app.py CHANGED
@@ -41,12 +41,12 @@ t2i_pipe = StableDiffusionPipeline.from_single_file(
41
  requires_safety_checker = False
42
  )
43
 
44
- if SAFETY_CHECKER == "True":
45
- i2i_pipe = AutoPipelineForImage2Image.from_pretrained(
46
- "stabilityai/sdxl-turbo",
47
- torch_dtype=torch_dtype,
48
- variant="fp16" if torch_dtype == torch.float16 else "fp32",
49
- )
50
  # t2i_pipe = AutoPipelineForText2Image.from_pretrained(
51
  # #"stabilityai/sdxl-turbo",
52
  # # "wanghuging/demo_model",
@@ -55,13 +55,13 @@ if SAFETY_CHECKER == "True":
55
  # torch_dtype=torch_dtype,
56
  # variant="fp16" #if torch_dtype == torch.float16 else "fp32",
57
  # )
58
- else:
59
- i2i_pipe = AutoPipelineForImage2Image.from_pretrained(
60
- "stabilityai/sdxl-turbo",
61
- safety_checker=None,
62
- torch_dtype=torch_dtype,
63
- variant="fp16" if torch_dtype == torch.float16 else "fp32",
64
- )
65
  # t2i_pipe = AutoPipelineForText2Image.from_pretrained(
66
  # #"stabilityai/sdxl-turbo",
67
  # # "wanghuging/demo_model",
@@ -76,8 +76,8 @@ else:
76
  t2i_pipe.safety_checker = lambda images, clip_input: (images, False)
77
  t2i_pipe.to(device=torch_device, dtype=torch_dtype).to(device)
78
  t2i_pipe.set_progress_bar_config(disable=True)
79
- i2i_pipe.to(device=torch_device, dtype=torch_dtype).to(device)
80
- i2i_pipe.set_progress_bar_config(disable=True)
81
 
82
 
83
 
@@ -89,6 +89,7 @@ def resize_crop(image, size=512):
89
 
90
 
91
  async def predict(init_image, prompt, strength, steps, seed=1231231):
 
92
  if init_image is not None:
93
  init_image = resize_crop(init_image)
94
  generator = torch.manual_seed(seed)
@@ -146,7 +147,7 @@ css = """
146
  }
147
  """
148
  with gr.Blocks(css=css) as demo:
149
- init_image_state = gr.State()
150
  with gr.Column(elem_id="container"):
151
  gr.Markdown(
152
  """# Derm-T2IM Text to Image Skin Cancer
@@ -161,14 +162,19 @@ with gr.Blocks(css=css) as demo:
161
  scale=5,
162
  container=False,
163
  )
 
 
 
 
 
164
  generate_bt = gr.Button("Generate", scale=1)
165
  with gr.Row():
166
- with gr.Column():
167
- image_input = gr.Image(
168
- sources=["upload", "webcam", "clipboard"],
169
- label="Webcam",
170
- type="pil",
171
- )
172
  with gr.Column():
173
  image = gr.Image(type="filepath")
174
  with gr.Accordion("Advanced options", open=False):
@@ -180,7 +186,7 @@ with gr.Blocks(css=css) as demo:
180
  step=0.001,
181
  )
182
  steps = gr.Slider(
183
- label="Steps", value=2, minimum=1, maximum=10, step=1
184
  )
185
  seed = gr.Slider(
186
  randomize=True,
@@ -211,7 +217,7 @@ with gr.Blocks(css=css) as demo:
211
  # ```
212
  # """
213
  # )
214
-
215
  inputs = [image_input, prompt, strength, steps, seed]
216
  generate_bt.click(fn=predict, inputs=inputs, outputs=image, show_progress=False)
217
  prompt.input(fn=predict, inputs=inputs, outputs=image, show_progress=False)
 
41
  requires_safety_checker = False
42
  )
43
 
44
+ # if SAFETY_CHECKER == "True":
45
+ # i2i_pipe = AutoPipelineForImage2Image.from_pretrained(
46
+ # "stabilityai/sdxl-turbo",
47
+ # torch_dtype=torch_dtype,
48
+ # variant="fp16" if torch_dtype == torch.float16 else "fp32",
49
+ # )
50
  # t2i_pipe = AutoPipelineForText2Image.from_pretrained(
51
  # #"stabilityai/sdxl-turbo",
52
  # # "wanghuging/demo_model",
 
55
  # torch_dtype=torch_dtype,
56
  # variant="fp16" #if torch_dtype == torch.float16 else "fp32",
57
  # )
58
+ # else:
59
+ # i2i_pipe = AutoPipelineForImage2Image.from_pretrained(
60
+ # "stabilityai/sdxl-turbo",
61
+ # safety_checker=None,
62
+ # torch_dtype=torch_dtype,
63
+ # variant="fp16" if torch_dtype == torch.float16 else "fp32",
64
+ # )
65
  # t2i_pipe = AutoPipelineForText2Image.from_pretrained(
66
  # #"stabilityai/sdxl-turbo",
67
  # # "wanghuging/demo_model",
 
76
  t2i_pipe.safety_checker = lambda images, clip_input: (images, False)
77
  t2i_pipe.to(device=torch_device, dtype=torch_dtype).to(device)
78
  t2i_pipe.set_progress_bar_config(disable=True)
79
+ # i2i_pipe.to(device=torch_device, dtype=torch_dtype).to(device)
80
+ # i2i_pipe.set_progress_bar_config(disable=True)
81
 
82
 
83
 
 
89
 
90
 
91
  async def predict(init_image, prompt, strength, steps, seed=1231231):
92
+ init_image = None
93
  if init_image is not None:
94
  init_image = resize_crop(init_image)
95
  generator = torch.manual_seed(seed)
 
147
  }
148
  """
149
  with gr.Blocks(css=css) as demo:
150
+ # init_image_state = gr.State()
151
  with gr.Column(elem_id="container"):
152
  gr.Markdown(
153
  """# Derm-T2IM Text to Image Skin Cancer
 
162
  scale=5,
163
  container=False,
164
  )
165
+ neg_prompt = gr.Textbox(
166
+ placeholder="Insert your negative prompt here:",
167
+ scale=5,
168
+ container=False,
169
+ )
170
  generate_bt = gr.Button("Generate", scale=1)
171
  with gr.Row():
172
+ # with gr.Column():
173
+ # image_input = gr.Image(
174
+ # sources=["upload", "webcam", "clipboard"],
175
+ # label="Webcam",
176
+ # type="pil",
177
+ # )
178
  with gr.Column():
179
  image = gr.Image(type="filepath")
180
  with gr.Accordion("Advanced options", open=False):
 
186
  step=0.001,
187
  )
188
  steps = gr.Slider(
189
+ label="Steps", value=2, minimum=1, maximum=25, step=1
190
  )
191
  seed = gr.Slider(
192
  randomize=True,
 
217
  # ```
218
  # """
219
  # )
220
+ image_input = None
221
  inputs = [image_input, prompt, strength, steps, seed]
222
  generate_bt.click(fn=predict, inputs=inputs, outputs=image, show_progress=False)
223
  prompt.input(fn=predict, inputs=inputs, outputs=image, show_progress=False)