Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -191,8 +191,8 @@ def load_and_prepare_model():
|
|
191 |
|
192 |
return pipe
|
193 |
|
194 |
-
hidet.option.parallel_build(
|
195 |
-
hidet.option.parallel_tune(2,1.5)
|
196 |
#hidet.torch.dynamo_config.steal_weights(False)
|
197 |
torch._dynamo.config.suppress_errors = True
|
198 |
torch._dynamo.disallow_in_graph(diffusers.models.attention.BasicTransformerBlock)
|
@@ -206,7 +206,7 @@ hidet.option.cache_dir("local_cache")
|
|
206 |
# use float16 data type as the accumulate data type in operators with reduction
|
207 |
#hidet.torch.dynamo_config.use_fp16_reduction(True)
|
208 |
# use tensorcore
|
209 |
-
|
210 |
hidet.torch.dynamo_config.steal_weights(False)
|
211 |
|
212 |
# Preload and compile both models
|
|
|
191 |
|
192 |
return pipe
|
193 |
|
194 |
+
hidet.option.parallel_build(False)
|
195 |
+
#hidet.option.parallel_tune(2,1.5)
|
196 |
#hidet.torch.dynamo_config.steal_weights(False)
|
197 |
torch._dynamo.config.suppress_errors = True
|
198 |
torch._dynamo.disallow_in_graph(diffusers.models.attention.BasicTransformerBlock)
|
|
|
206 |
# use float16 data type as the accumulate data type in operators with reduction
|
207 |
#hidet.torch.dynamo_config.use_fp16_reduction(True)
|
208 |
# use tensorcore
|
209 |
+
hidet.torch.dynamo_config.use_tensor_core()
|
210 |
hidet.torch.dynamo_config.steal_weights(False)
|
211 |
|
212 |
# Preload and compile both models
|