Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,12 +1,8 @@
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
|
4 |
-
|
5 |
-
#For more information on huggingface_hub Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
6 |
-
|
7 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
8 |
|
9 |
-
|
10 |
def respond(
|
11 |
message,
|
12 |
history: list[tuple[str, str]],
|
@@ -40,12 +36,23 @@ def respond(
|
|
40 |
yield response
|
41 |
|
42 |
|
43 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
|
45 |
demo = gr.ChatInterface(
|
46 |
respond,
|
|
|
47 |
additional_inputs=[
|
48 |
-
|
49 |
gr.Slider(minimum=256, maximum=8192, value=512, step=1, label="Max Gen tokens"),
|
50 |
gr.Slider(minimum=0.3, maximum=2.5, value=0.8, step=0.1, label="Creativity"),
|
51 |
gr.Slider(
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
|
|
|
|
|
|
|
4 |
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
5 |
|
|
|
6 |
def respond(
|
7 |
message,
|
8 |
history: list[tuple[str, str]],
|
|
|
36 |
yield response
|
37 |
|
38 |
|
39 |
+
# Add a title to the UI
|
40 |
+
title = "Corenet"
|
41 |
+
|
42 |
+
# Modify the pre-prompt to be editable but greyed out
|
43 |
+
pre_prompt = gr.Textbox(
|
44 |
+
value="You are a friendly Chatbot, and you are a finetuned version of Llama-3 8B made possible by HX",
|
45 |
+
label="Pre-prompt",
|
46 |
+
interactive=True,
|
47 |
+
placeholder="Type here...",
|
48 |
+
style={"color": "grey"}
|
49 |
+
)
|
50 |
|
51 |
demo = gr.ChatInterface(
|
52 |
respond,
|
53 |
+
title=title,
|
54 |
additional_inputs=[
|
55 |
+
pre_prompt,
|
56 |
gr.Slider(minimum=256, maximum=8192, value=512, step=1, label="Max Gen tokens"),
|
57 |
gr.Slider(minimum=0.3, maximum=2.5, value=0.8, step=0.1, label="Creativity"),
|
58 |
gr.Slider(
|