Profakerr commited on
Commit
a2dbbb3
·
verified ·
1 Parent(s): bd92452

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -42
app.py CHANGED
@@ -1,6 +1,5 @@
1
  import gradio as gr
2
  import spaces
3
- from RealESRGAN import RealESRGAN
4
  import torch
5
  from diffusers import AutoencoderKL, TCDScheduler, DPMSolverMultistepScheduler
6
  from diffusers.models.model_loading_utils import load_state_dict
@@ -47,43 +46,8 @@ pipe = StableDiffusionXLFillPipeline.from_pretrained(
47
  pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config,algorithm_type="dpmsolver++",use_karras_sigmas=True)
48
 
49
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
50
- model2 = RealESRGAN(device, scale=2)
51
- model2.load_weights('weights/RealESRGAN_x2.pth', download=True)
52
- model4 = RealESRGAN(device, scale=4)
53
- model4.load_weights('weights/RealESRGAN_x4.pth', download=True)
54
 
55
 
56
- @spaces.GPU
57
- def inference(image, size):
58
- global model2
59
- global model4
60
- global model8
61
- if image is None:
62
- raise gr.Error("Image not uploaded")
63
-
64
-
65
- if torch.cuda.is_available():
66
- torch.cuda.empty_cache()
67
-
68
- if size == '2x':
69
- try:
70
- result = model2.predict(image.convert('RGB'))
71
- except torch.cuda.OutOfMemoryError as e:
72
- print(e)
73
- model2 = RealESRGAN(device, scale=2)
74
- model2.load_weights('weights/RealESRGAN_x2.pth', download=False)
75
- result = model2.predict(image.convert('RGB'))
76
- elif size == '4x':
77
- try:
78
- result = model4.predict(image.convert('RGB'))
79
- except torch.cuda.OutOfMemoryError as e:
80
- print(e)
81
- model4 = RealESRGAN(device, scale=4)
82
- model4.load_weights('weights/RealESRGAN_x4.pth', download=False)
83
- result = model2.predict(image.convert('RGB'))
84
-
85
- print(f"Image size ({device}): {size} ... OK")
86
- return result
87
 
88
  def add_watermark(image, text="ProFaker", font_path="BRLNSDB.TTF", font_size=25):
89
  # Load the Berlin Sans Demi font with the specified size
@@ -101,7 +65,7 @@ def add_watermark(image, text="ProFaker", font_path="BRLNSDB.TTF", font_size=25)
101
  return image
102
 
103
  @spaces.GPU
104
- def fill_image(prompt, negative_prompt, image, model_selection, paste_back, guidance_scale, num_steps, size):
105
  (
106
  prompt_embeds,
107
  negative_prompt_embeds,
@@ -138,8 +102,6 @@ def fill_image(prompt, negative_prompt, image, model_selection, paste_back, guid
138
  cnet_image = image
139
 
140
  cnet_image = add_watermark(cnet_image)
141
- if size !="0":
142
- cnet_image = inference(cnet_image,size)
143
  yield source, cnet_image
144
 
145
 
@@ -179,7 +141,6 @@ with gr.Blocks() as demo:
179
  step=1,
180
  label="Steps"
181
  )
182
- size = gr.Radio(["0", "2x", "4x"], type="value", value="0", label="Image Quality")
183
 
184
  input_image = gr.ImageMask(
185
  type="pil", label="Input Image", crop_size=(1024,1024), layers=False
@@ -223,7 +184,7 @@ with gr.Blocks() as demo:
223
  outputs=use_as_input_button,
224
  ).then(
225
  fn=fill_image,
226
- inputs=[prompt, negative_prompt, input_image, model_selection, paste_back, guidance_scale, num_steps, size],
227
  outputs=result,
228
  ).then(
229
  fn=lambda: gr.update(visible=True),
@@ -241,7 +202,7 @@ with gr.Blocks() as demo:
241
  outputs=use_as_input_button,
242
  ).then(
243
  fn=fill_image,
244
- inputs=[prompt, negative_prompt, input_image, model_selection, paste_back, guidance_scale, num_steps, size],
245
  outputs=result,
246
  ).then(
247
  fn=lambda: gr.update(visible=True),
 
1
  import gradio as gr
2
  import spaces
 
3
  import torch
4
  from diffusers import AutoencoderKL, TCDScheduler, DPMSolverMultistepScheduler
5
  from diffusers.models.model_loading_utils import load_state_dict
 
46
  pipe.scheduler = TCDScheduler.from_config(pipe.scheduler.config,algorithm_type="dpmsolver++",use_karras_sigmas=True)
47
 
48
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
 
 
 
 
49
 
50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
 
52
  def add_watermark(image, text="ProFaker", font_path="BRLNSDB.TTF", font_size=25):
53
  # Load the Berlin Sans Demi font with the specified size
 
65
  return image
66
 
67
  @spaces.GPU
68
+ def fill_image(prompt, negative_prompt, image, model_selection, paste_back, guidance_scale, num_steps):
69
  (
70
  prompt_embeds,
71
  negative_prompt_embeds,
 
102
  cnet_image = image
103
 
104
  cnet_image = add_watermark(cnet_image)
 
 
105
  yield source, cnet_image
106
 
107
 
 
141
  step=1,
142
  label="Steps"
143
  )
 
144
 
145
  input_image = gr.ImageMask(
146
  type="pil", label="Input Image", crop_size=(1024,1024), layers=False
 
184
  outputs=use_as_input_button,
185
  ).then(
186
  fn=fill_image,
187
+ inputs=[prompt, negative_prompt, input_image, model_selection, paste_back, guidance_scale, num_steps],
188
  outputs=result,
189
  ).then(
190
  fn=lambda: gr.update(visible=True),
 
202
  outputs=use_as_input_button,
203
  ).then(
204
  fn=fill_image,
205
+ inputs=[prompt, negative_prompt, input_image, model_selection, paste_back, guidance_scale, num_steps],
206
  outputs=result,
207
  ).then(
208
  fn=lambda: gr.update(visible=True),