first commit
Browse files
app.py
CHANGED
@@ -1,5 +1,27 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
"""
|
5 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
+
from unsloth import FastLanguageModel
|
4 |
+
from transformers import TextStreamer
|
5 |
+
|
6 |
+
model = 1
|
7 |
+
tokenizer = 1
|
8 |
+
|
9 |
+
FastLanguageModel.for_inference(model) # Enable native 2x faster inference
|
10 |
+
|
11 |
+
messages = [
|
12 |
+
{"role": "user", "content": "Continue the fibonnaci sequence: 1, 1, 2, 3, 5, 8,"},
|
13 |
+
]
|
14 |
+
inputs = tokenizer.apply_chat_template(
|
15 |
+
messages,
|
16 |
+
tokenize = True,
|
17 |
+
add_generation_prompt = True, # Must add for generation
|
18 |
+
return_tensors = "pt",
|
19 |
+
).to("cuda")
|
20 |
+
|
21 |
+
|
22 |
+
text_streamer = TextStreamer(tokenizer, skip_prompt = True)
|
23 |
+
_ = model.generate(input_ids = inputs, streamer = text_streamer, max_new_tokens = 128,
|
24 |
+
use_cache = True, temperature = 1.5, min_p = 0.1)
|
25 |
|
26 |
"""
|
27 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|