HelloSun commited on
Commit
dea1924
·
verified ·
1 Parent(s): 7193aee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -5
app.py CHANGED
@@ -9,6 +9,10 @@ import openvino.runtime as ov
9
  from typing import Optional, Dict
10
 
11
  model_id = "Disty0/LCM_SoteMix"
 
 
 
 
12
  batch_size = -1
13
  class CustomOVModelVaeDecoder(OVModelVaeDecoder):
14
  def __init__(
@@ -22,19 +26,19 @@ pipe = OVStableDiffusionPipeline.from_pretrained(model_id, compile = False, ov_c
22
  taesd_dir = snapshot_download(repo_id="deinferno/taesd-openvino")
23
  pipe.vae_decoder = CustomOVModelVaeDecoder(model = OVBaseModel.load_model(f"{taesd_dir}/vae_decoder/openvino_model.xml"), parent_model = pipe, model_dir = taesd_dir)
24
 
25
- pipe.reshape( batch_size=-1, height=512, width=512, num_images_per_prompt=1)
26
  pipe.compile()
27
 
28
  prompt=""
29
  negative_prompt=""
30
 
31
- def infer(prompt,negative_prompt):
32
 
33
  image = pipe(
34
  prompt = prompt,
35
  negative_prompt = negative_prompt,
36
- width = 512,
37
- height = 512,
38
  guidance_scale=1.0,
39
  num_inference_steps=8,
40
  num_images_per_prompt=1,
@@ -63,7 +67,7 @@ with gr.Blocks(css=css) as demo:
63
 
64
  with gr.Column(elem_id="col-container"):
65
  gr.Markdown(f"""
66
- # Disty0/LCM_SoteMix 512x512
67
  Currently running on {power_device}.
68
  """)
69
 
 
9
  from typing import Optional, Dict
10
 
11
  model_id = "Disty0/LCM_SoteMix"
12
+ HIGH=1024
13
+ WIDTH=1024
14
+
15
+
16
  batch_size = -1
17
  class CustomOVModelVaeDecoder(OVModelVaeDecoder):
18
  def __init__(
 
26
  taesd_dir = snapshot_download(repo_id="deinferno/taesd-openvino")
27
  pipe.vae_decoder = CustomOVModelVaeDecoder(model = OVBaseModel.load_model(f"{taesd_dir}/vae_decoder/openvino_model.xml"), parent_model = pipe, model_dir = taesd_dir)
28
 
29
+ pipe.reshape( batch_size=-1, height=HIGH, width=WIDTH, num_images_per_prompt=1)
30
  pipe.compile()
31
 
32
  prompt=""
33
  negative_prompt=""
34
 
35
+ def infer(prompt,negative_prompt,step):
36
 
37
  image = pipe(
38
  prompt = prompt,
39
  negative_prompt = negative_prompt,
40
+ width = WIDTH,
41
+ height = WIDTH,
42
  guidance_scale=1.0,
43
  num_inference_steps=8,
44
  num_images_per_prompt=1,
 
67
 
68
  with gr.Column(elem_id="col-container"):
69
  gr.Markdown(f"""
70
+ # Disty0/LCM_SoteMix 1024x1024
71
  Currently running on {power_device}.
72
  """)
73