Update app.py
Browse files
app.py
CHANGED
@@ -20,13 +20,13 @@ class StopOnTokens(StoppingCriteria):
|
|
20 |
return True
|
21 |
return False
|
22 |
|
23 |
-
@spaces.GPU(duration=
|
24 |
def predict(message, history, temperature, max_tokens, top_p, top_k):
|
25 |
history_transformer_format = history + [[message, ""]]
|
26 |
stop = StopOnTokens()
|
27 |
messages = "".join(["".join(["\n<|end|>\n<|user|>\n"+item[0], "\n<|end|>\n<|assistant|>\n"+item[1]]) for item in history_transformer_format])
|
28 |
model_inputs = tokenizer([messages], return_tensors="pt").to("cuda")
|
29 |
-
streamer = TextIteratorStreamer(tokenizer, timeout=
|
30 |
generate_kwargs = dict(
|
31 |
model_inputs,
|
32 |
streamer=streamer,
|
|
|
20 |
return True
|
21 |
return False
|
22 |
|
23 |
+
@spaces.GPU(duration=60)
|
24 |
def predict(message, history, temperature, max_tokens, top_p, top_k):
|
25 |
history_transformer_format = history + [[message, ""]]
|
26 |
stop = StopOnTokens()
|
27 |
messages = "".join(["".join(["\n<|end|>\n<|user|>\n"+item[0], "\n<|end|>\n<|assistant|>\n"+item[1]]) for item in history_transformer_format])
|
28 |
model_inputs = tokenizer([messages], return_tensors="pt").to("cuda")
|
29 |
+
streamer = TextIteratorStreamer(tokenizer, timeout=300., skip_prompt=True, skip_special_tokens=True)
|
30 |
generate_kwargs = dict(
|
31 |
model_inputs,
|
32 |
streamer=streamer,
|