RitvikPW commited on
Commit
df990b7
·
verified ·
1 Parent(s): e9e1ff1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -1,5 +1,5 @@
1
- import os
2
- os.system("pip install flash-attn --no-build-isolation")
3
 
4
  import gradio as gr
5
  import transformers
@@ -12,7 +12,7 @@ import threading
12
 
13
  model_id = "PhysicsWallahAI/Aryabhata-1.0"
14
  tokenizer = AutoTokenizer.from_pretrained(model_id)
15
- model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="cuda", attn_implementation="flash_attention_2")
16
 
17
  def process_questions(example):
18
  example["question_text"] = example["question"]
 
1
+ # import os
2
+ # os.system("pip install flash-attn --no-build-isolation")
3
 
4
  import gradio as gr
5
  import transformers
 
12
 
13
  model_id = "PhysicsWallahAI/Aryabhata-1.0"
14
  tokenizer = AutoTokenizer.from_pretrained(model_id)
15
+ model = AutoModelForCausalLM.from_pretrained(model_id)#, torch_dtype=torch.bfloat16, device_map="cuda", attn_implementation="flash_attention_2")
16
 
17
  def process_questions(example):
18
  example["question_text"] = example["question"]