pw-ai-research commited on
Commit
c7b0cdd
·
verified ·
1 Parent(s): e07408a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -3
app.py CHANGED
@@ -1,4 +1,4 @@
1
- import spaces
2
  import gradio as gr
3
  import transformers
4
  from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
@@ -10,7 +10,7 @@ import threading
10
 
11
  model_id = "PhysicsWallahAI/Aryabhata-1.0"
12
  tokenizer = AutoTokenizer.from_pretrained(model_id)
13
- model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16).to('cuda')
14
 
15
  def process_questions(example):
16
  example["question_text"] = example["question"]
@@ -38,7 +38,6 @@ def strip_bad_tokens(s, stop_strings):
38
  return s[:-len(suffix)]
39
  return s
40
 
41
- @spaces.GPU
42
  def generate_answer_stream(question):
43
  messages = [
44
  {'role': 'system', 'content': 'Think step-by-step; put only the final answer inside \\boxed{}.'},
 
1
+ # import spaces
2
  import gradio as gr
3
  import transformers
4
  from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
 
10
 
11
  model_id = "PhysicsWallahAI/Aryabhata-1.0"
12
  tokenizer = AutoTokenizer.from_pretrained(model_id)
13
+ model = AutoModelForCausalLM.from_pretrained(model_id)
14
 
15
  def process_questions(example):
16
  example["question_text"] = example["question"]
 
38
  return s[:-len(suffix)]
39
  return s
40
 
 
41
  def generate_answer_stream(question):
42
  messages = [
43
  {'role': 'system', 'content': 'Think step-by-step; put only the final answer inside \\boxed{}.'},