Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -12,7 +12,7 @@ import threading
|
|
12 |
|
13 |
model_id = "PhysicsWallahAI/Aryabhata-1.0"
|
14 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
15 |
-
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="
|
16 |
|
17 |
def process_questions(example):
|
18 |
example["question_text"] = example["question"]
|
@@ -52,7 +52,7 @@ def generate_answer_stream(question):
|
|
52 |
add_generation_prompt=True
|
53 |
)
|
54 |
|
55 |
-
inputs = tokenizer([text], return_tensors="pt")
|
56 |
|
57 |
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
58 |
stopping = StoppingCriteriaList([StopStringCriteria(tokenizer, stop_strings)])
|
|
|
12 |
|
13 |
model_id = "PhysicsWallahAI/Aryabhata-1.0"
|
14 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
15 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="cuda", attn_implementation="flash_attention_2")
|
16 |
|
17 |
def process_questions(example):
|
18 |
example["question_text"] = example["question"]
|
|
|
52 |
add_generation_prompt=True
|
53 |
)
|
54 |
|
55 |
+
inputs = tokenizer([text], return_tensors="pt").to("cuda")
|
56 |
|
57 |
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
58 |
stopping = StoppingCriteriaList([StopStringCriteria(tokenizer, stop_strings)])
|