Akshat1000 commited on
Commit
f1b9369
·
verified ·
1 Parent(s): f75296e

Update getans.py

Browse files
Files changed (1) hide show
  1. getans.py +19 -20
getans.py CHANGED
@@ -1,20 +1,19 @@
1
- from mmap import mmap
2
-
3
- import torch
4
- from transformers import AutoTokenizer, AutoModelForCausalLM
5
- from huggingface_hub import login
6
- token1="hf_"
7
- token2="rPlNHzkJScHYmtGSaQPcaoKcjJGYQEpjLu"
8
- login(token=token1+token2)
9
-
10
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
11
- mm="finetuning/output/pytorch_model.bin"
12
- tokenizer = AutoTokenizer.from_pretrained(mm)
13
- model = AutoModelForCausalLM.from_pretrained(mm)
14
-
15
- def get_response(prompt, max_new_tokens=50):
16
- inputs = tokenizer(prompt, return_tensors="pt")
17
- outputs = model.generate(**inputs, max_new_tokens=max_new_tokens, temperature=0.0001, do_sample=True)
18
- response = tokenizer.decode(outputs[0], skip_special_tokens=True) # Use indexing instead of calling
19
- ans=response.toString()
20
- return ans
 
1
+ from mmap import mmap
2
+
3
+ import torch
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM
5
+ from huggingface_hub import login
6
+ token1="hf_"
7
+ token2="vWSzjVPShXPbgmylDJduWwHinTfHNqZVpj"
8
+ login(token=token1+token2,add_to_git_credential=True)
9
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
10
+ mm="finetuning/output/pytorch_model.bin"
11
+ tokenizer = AutoTokenizer.from_pretrained(mm)
12
+ model = AutoModelForCausalLM.from_pretrained(mm)
13
+
14
+ def get_response(prompt, max_new_tokens=50):
15
+ inputs = tokenizer(prompt, return_tensors="pt")
16
+ outputs = model.generate(**inputs, max_new_tokens=max_new_tokens, temperature=0.0001, do_sample=True)
17
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True) # Use indexing instead of calling
18
+ ans=response.toString()
19
+ return ans