w601sxs commited on
Commit
0308ed5
·
1 Parent(s): dfed847

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -6,7 +6,7 @@ from datasets import load_dataset
6
  from trl import SFTTrainer
7
 
8
  ref_model = AutoModelForCausalLM.from_pretrained("EleutherAI/pythia-70m-deduped-v0", torch_dtype=torch.bfloat16)
9
- peft_model_id = "w601sxs/pythia-70m-instruct-orca-chkpt-64000"
10
 
11
  config = PeftConfig.from_pretrained(peft_model_id)
12
  model = PeftModel.from_pretrained(ref_model, peft_model_id)
@@ -17,8 +17,8 @@ model.eval()
17
  def predict(text):
18
  inputs = tokenizer(text, return_tensors="pt")
19
  with torch.no_grad():
20
- outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=10)
21
- out_text = tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)[0]
22
 
23
  return out_text.split(text)[-1]
24
 
 
6
  from trl import SFTTrainer
7
 
8
  ref_model = AutoModelForCausalLM.from_pretrained("EleutherAI/pythia-70m-deduped-v0", torch_dtype=torch.bfloat16)
9
+ peft_model_id = "w601sxs/pythia-70m-instruct-orca-chkpt-1245000"
10
 
11
  config = PeftConfig.from_pretrained(peft_model_id)
12
  model = PeftModel.from_pretrained(ref_model, peft_model_id)
 
17
  def predict(text):
18
  inputs = tokenizer(text, return_tensors="pt")
19
  with torch.no_grad():
20
+ outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=128)
21
+ out_text = tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)[0].split("answer:")[-1]
22
 
23
  return out_text.split(text)[-1]
24