pw-ai-research commited on
Commit
10928bd
·
verified ·
1 Parent(s): bb8862d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -1
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import gradio as gr
2
  import transformers
3
  from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
@@ -9,7 +10,7 @@ import threading
9
 
10
  model_id = "PhysicsWallahAI/Aryabhata-1.0"
11
  tokenizer = AutoTokenizer.from_pretrained(model_id)
12
- model = AutoModelForCausalLM.from_pretrained(model_id)
13
 
14
  def process_questions(example):
15
  example["question_text"] = example["question"]
@@ -37,6 +38,7 @@ def strip_bad_tokens(s, stop_strings):
37
  return s[:-len(suffix)]
38
  return s
39
 
 
40
  def generate_answer_stream(question):
41
  messages = [
42
  {'role': 'system', 'content': 'Think step-by-step; put only the final answer inside \\boxed{}.'},
 
1
+ import spaces
2
  import gradio as gr
3
  import transformers
4
  from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
 
10
 
11
  model_id = "PhysicsWallahAI/Aryabhata-1.0"
12
  tokenizer = AutoTokenizer.from_pretrained(model_id)
13
+ model = AutoModelForCausalLM.from_pretrained(model_id).to('cuda')
14
 
15
  def process_questions(example):
16
  example["question_text"] = example["question"]
 
38
  return s[:-len(suffix)]
39
  return s
40
 
41
+ @spaces.GPU
42
  def generate_answer_stream(question):
43
  messages = [
44
  {'role': 'system', 'content': 'Think step-by-step; put only the final answer inside \\boxed{}.'},