Spaces:
Paused
Paused
Jose Benitez
commited on
Commit
·
94f213a
1
Parent(s):
61debfb
update app.py
Browse files
app.py
CHANGED
@@ -21,8 +21,10 @@ import torch
|
|
21 |
from transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
|
22 |
from threading import Thread
|
23 |
|
24 |
-
tokenizer = AutoTokenizer.from_pretrained("togethercomputer/RedPajama-INCITE-Chat-3B-v1")
|
25 |
-
model = AutoModelForCausalLM.from_pretrained("togethercomputer/RedPajama-INCITE-Chat-3B-v1", torch_dtype=torch.float16)
|
|
|
|
|
26 |
model = model.to('cuda:0')
|
27 |
|
28 |
class StopOnTokens(StoppingCriteria):
|
@@ -41,6 +43,10 @@ def predict(message, history):
|
|
41 |
messages = "".join(["".join(["\n<human>:"+item[0], "\n<bot>:"+item[1]]) #curr_system_message +
|
42 |
for item in history_transformer_format])
|
43 |
|
|
|
|
|
|
|
|
|
44 |
model_inputs = tokenizer([messages], return_tensors="pt").to("cuda")
|
45 |
streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
|
46 |
generate_kwargs = dict(
|
|
|
21 |
from transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
|
22 |
from threading import Thread
|
23 |
|
24 |
+
#tokenizer = AutoTokenizer.from_pretrained("togethercomputer/RedPajama-INCITE-Chat-3B-v1")
|
25 |
+
#model = AutoModelForCausalLM.from_pretrained("togethercomputer/RedPajama-INCITE-Chat-3B-v1", torch_dtype=torch.float16)
|
26 |
+
model = AutoModelForCausalLM.from_pretrained("mattshumer/mistral-8x7b-chat", trust_remote_code=True)
|
27 |
+
tokenizer = AutoTokenizer.from_pretrained("mattshumer/mistral-8x7b-chat")
|
28 |
model = model.to('cuda:0')
|
29 |
|
30 |
class StopOnTokens(StoppingCriteria):
|
|
|
43 |
messages = "".join(["".join(["\n<human>:"+item[0], "\n<bot>:"+item[1]]) #curr_system_message +
|
44 |
for item in history_transformer_format])
|
45 |
|
46 |
+
# x = tok.encode(PROMPT, return_tensors="pt").cuda()
|
47 |
+
# x = model.generate(x, max_new_tokens=512).cpu()
|
48 |
+
# return tok.batch_decode(x)
|
49 |
+
|
50 |
model_inputs = tokenizer([messages], return_tensors="pt").to("cuda")
|
51 |
streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
|
52 |
generate_kwargs = dict(
|