rohansampath commited on
Commit
7aad8f7
·
verified ·
1 Parent(s): 792f192

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -65,7 +65,7 @@ def generate_answer(question):
65
  model, tokenizer = load_model()
66
 
67
  # Mistral instruction format
68
- prompt = f"""<s>[INST] {question}. Provide only the answer. [/INST]"""
69
 
70
  inputs = tokenizer(prompt, return_tensors="pt").to('cuda')
71
  with torch.no_grad():
 
65
  model, tokenizer = load_model()
66
 
67
  # Mistral instruction format
68
+ prompt = f"""<s>[INST] {question}. Provide only the numerical answer. [/INST]"""
69
 
70
  inputs = tokenizer(prompt, return_tensors="pt").to('cuda')
71
  with torch.no_grad():