ford442 commited on
Commit
0051c38
·
verified ·
1 Parent(s): 7baa9f2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -191,8 +191,8 @@ def load_and_prepare_model():
191
 
192
  return pipe
193
 
194
- hidet.option.parallel_build(True)
195
- hidet.option.parallel_tune(2,1.5)
196
  #hidet.torch.dynamo_config.steal_weights(False)
197
  torch._dynamo.config.suppress_errors = True
198
  torch._dynamo.disallow_in_graph(diffusers.models.attention.BasicTransformerBlock)
@@ -206,7 +206,7 @@ hidet.option.cache_dir("local_cache")
206
  # use float16 data type as the accumulate data type in operators with reduction
207
  #hidet.torch.dynamo_config.use_fp16_reduction(True)
208
  # use tensorcore
209
- #hidet.torch.dynamo_config.use_tensor_core()
210
  hidet.torch.dynamo_config.steal_weights(False)
211
 
212
  # Preload and compile both models
 
191
 
192
  return pipe
193
 
194
+ hidet.option.parallel_build(False)
195
+ #hidet.option.parallel_tune(2,1.5)
196
  #hidet.torch.dynamo_config.steal_weights(False)
197
  torch._dynamo.config.suppress_errors = True
198
  torch._dynamo.disallow_in_graph(diffusers.models.attention.BasicTransformerBlock)
 
206
  # use float16 data type as the accumulate data type in operators with reduction
207
  #hidet.torch.dynamo_config.use_fp16_reduction(True)
208
  # use tensorcore
209
+ hidet.torch.dynamo_config.use_tensor_core()
210
  hidet.torch.dynamo_config.steal_weights(False)
211
 
212
  # Preload and compile both models