Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,9 +1,7 @@
|
|
1 |
from huggingface_hub import InferenceClient
|
2 |
import gradio as gr
|
3 |
|
4 |
-
client = InferenceClient(
|
5 |
-
"mistralai/Mixtral-8x7B-Instruct-v0.1"
|
6 |
-
)
|
7 |
|
8 |
# Formats the prompt to hold all of the past messages
|
9 |
def format_prompt(message, history):
|
@@ -40,12 +38,9 @@ def format_prompt_grammar(message, history):
|
|
40 |
|
41 |
|
42 |
def generate(prompt, history, system_prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
|
43 |
-
print(f"\n\nSystem Prompt: '{system_prompt}'")
|
44 |
-
|
45 |
temperature = float(temperature)
|
46 |
if temperature < 1e-2: temperature = 1e-2
|
47 |
top_p = float(top_p)
|
48 |
-
|
49 |
|
50 |
generate_kwargs = dict(temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, repetition_penalty=repetition_penalty, do_sample=True, seed=42,)
|
51 |
|
@@ -70,6 +65,7 @@ additional_inputs=[
|
|
70 |
gr.Slider( label="Max new tokens", value=256, minimum=0, maximum=1048, step=64, interactive=True, info="The maximum numbers of new tokens", ),
|
71 |
gr.Slider( label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens", ),
|
72 |
gr.Slider( label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens", )
|
|
|
73 |
]
|
74 |
|
75 |
examples=['Give me the grammatically correct version of the sentence: "We shood buy an car"', "Give me an example exam question testing students on square roots on basic integers", "Would this block of HTML code run?\n```\n\n```"]
|
|
|
1 |
from huggingface_hub import InferenceClient
|
2 |
import gradio as gr
|
3 |
|
4 |
+
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
|
|
|
|
5 |
|
6 |
# Formats the prompt to hold all of the past messages
|
7 |
def format_prompt(message, history):
|
|
|
38 |
|
39 |
|
40 |
def generate(prompt, history, system_prompt, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
|
|
|
|
|
41 |
temperature = float(temperature)
|
42 |
if temperature < 1e-2: temperature = 1e-2
|
43 |
top_p = float(top_p)
|
|
|
44 |
|
45 |
generate_kwargs = dict(temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, repetition_penalty=repetition_penalty, do_sample=True, seed=42,)
|
46 |
|
|
|
65 |
gr.Slider( label="Max new tokens", value=256, minimum=0, maximum=1048, step=64, interactive=True, info="The maximum numbers of new tokens", ),
|
66 |
gr.Slider( label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens", ),
|
67 |
gr.Slider( label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens", )
|
68 |
+
gr.Button
|
69 |
]
|
70 |
|
71 |
examples=['Give me the grammatically correct version of the sentence: "We shood buy an car"', "Give me an example exam question testing students on square roots on basic integers", "Would this block of HTML code run?\n```\n\n```"]
|