KingNish commited on
Commit
f91901a
·
verified ·
1 Parent(s): f76bdda

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -79,7 +79,7 @@ quantization_config = BitsAndBytesConfig(load_in_8bit=True)
79
  model = AutoModelForCausalLM.from_pretrained(
80
  "m-a-p/YuE-s1-7B-anneal-en-cot",
81
  torch_dtype="auto",
82
- attn_implementation="flash_attention_2",
83
  quantization_config=quantization_config,
84
  low_cpu_mem_usage=True,
85
  device_map="auto"
 
79
  model = AutoModelForCausalLM.from_pretrained(
80
  "m-a-p/YuE-s1-7B-anneal-en-cot",
81
  torch_dtype="auto",
82
+ # attn_implementation="flash_attention_2",
83
  quantization_config=quantization_config,
84
  low_cpu_mem_usage=True,
85
  device_map="auto"