Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -90,7 +90,7 @@ def load_and_prepare_model():
|
|
90 |
vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
91 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1,use_karras_sigmas=True)
|
92 |
#sched = DPMSolverSDEScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler')
|
93 |
-
sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1
|
94 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
|
95 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
96 |
'ford442/RealVisXL_V5.0_BF16',
|
@@ -117,11 +117,11 @@ def load_and_prepare_model():
|
|
117 |
|
118 |
# for compile
|
119 |
hidet.option.parallel_build(True)
|
120 |
-
hidet.option.parallel_tune(-1,
|
121 |
torch._dynamo.config.suppress_errors = True
|
122 |
torch._dynamo.disallow_in_graph(diffusers.models.attention.BasicTransformerBlock)
|
123 |
# more search
|
124 |
-
hidet.torch.dynamo_config.search_space(
|
125 |
#hidet.torch.dynamo_config.dump_graph_ir("./local_graph")
|
126 |
hidet.option.cache_dir("local_cache")
|
127 |
# automatically transform the model to use float16 data type
|
@@ -131,6 +131,7 @@ hidet.torch.dynamo_config.use_fp16_reduction(True)
|
|
131 |
# use tensorcore
|
132 |
hidet.torch.dynamo_config.use_tensor_core()
|
133 |
# Preload and compile both models
|
|
|
134 |
|
135 |
pipe = load_and_prepare_model()
|
136 |
|
|
|
90 |
vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False) #.to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
91 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1,use_karras_sigmas=True)
|
92 |
#sched = DPMSolverSDEScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler')
|
93 |
+
sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1,use_karras_sigmas=True)
|
94 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
|
95 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
96 |
'ford442/RealVisXL_V5.0_BF16',
|
|
|
117 |
|
118 |
# for compile
|
119 |
hidet.option.parallel_build(True)
|
120 |
+
hidet.option.parallel_tune(-1,4.0)
|
121 |
torch._dynamo.config.suppress_errors = True
|
122 |
torch._dynamo.disallow_in_graph(diffusers.models.attention.BasicTransformerBlock)
|
123 |
# more search
|
124 |
+
hidet.torch.dynamo_config.search_space(1)
|
125 |
#hidet.torch.dynamo_config.dump_graph_ir("./local_graph")
|
126 |
hidet.option.cache_dir("local_cache")
|
127 |
# automatically transform the model to use float16 data type
|
|
|
131 |
# use tensorcore
|
132 |
hidet.torch.dynamo_config.use_tensor_core()
|
133 |
# Preload and compile both models
|
134 |
+
hidet.torch.dynamo_config.steal_weights(False)
|
135 |
|
136 |
pipe = load_and_prepare_model()
|
137 |
|