Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -1,10 +1,14 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
from huggingface_hub import InferenceClient
|
3 |
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
8 |
|
9 |
|
10 |
def respond(
|
@@ -15,6 +19,7 @@ def respond(
|
|
15 |
temperature,
|
16 |
top_p,
|
17 |
):
|
|
|
18 |
messages = [{"role": "system", "content": system_message}]
|
19 |
|
20 |
for val in history:
|
@@ -27,26 +32,32 @@ def respond(
|
|
27 |
|
28 |
response = ""
|
29 |
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
|
|
|
|
|
|
|
|
|
|
41 |
|
42 |
|
43 |
-
|
44 |
-
For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
|
45 |
-
"""
|
46 |
demo = gr.ChatInterface(
|
47 |
respond,
|
48 |
additional_inputs=[
|
49 |
-
gr.Textbox(
|
|
|
|
|
|
|
50 |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
51 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
52 |
gr.Slider(
|
@@ -54,11 +65,11 @@ demo = gr.ChatInterface(
|
|
54 |
maximum=1.0,
|
55 |
value=0.95,
|
56 |
step=0.05,
|
57 |
-
label="Top-p (nucleus sampling)"
|
58 |
),
|
59 |
],
|
|
|
60 |
)
|
61 |
|
62 |
-
|
63 |
if __name__ == "__main__":
|
64 |
-
demo.launch()
|
|
|
1 |
+
import os
|
2 |
import gradio as gr
|
3 |
from huggingface_hub import InferenceClient
|
4 |
|
5 |
+
# Load HF Token from environment variables
|
6 |
+
hf_token = os.getenv("HF_TOKEN")
|
7 |
+
if not hf_token:
|
8 |
+
raise ValueError("HF_TOKEN is not set in environment variables!")
|
9 |
+
|
10 |
+
# Initialize InferenceClient
|
11 |
+
client = InferenceClient(model="huihui-ai/Llama-3.3-70B-Instruct-abliterated", token=hf_token)
|
12 |
|
13 |
|
14 |
def respond(
|
|
|
19 |
temperature,
|
20 |
top_p,
|
21 |
):
|
22 |
+
# Prepare messages for the API
|
23 |
messages = [{"role": "system", "content": system_message}]
|
24 |
|
25 |
for val in history:
|
|
|
32 |
|
33 |
response = ""
|
34 |
|
35 |
+
try:
|
36 |
+
for message in client.chat_completion(
|
37 |
+
model="huihui-ai/Llama-3.3-70B-Instruct-abliterated",
|
38 |
+
inputs=messages,
|
39 |
+
parameters={
|
40 |
+
"max_tokens": max_tokens,
|
41 |
+
"temperature": temperature,
|
42 |
+
"top_p": top_p,
|
43 |
+
},
|
44 |
+
stream=True,
|
45 |
+
):
|
46 |
+
token = message.choices[0].delta.content
|
47 |
+
response += token
|
48 |
+
yield response
|
49 |
+
except Exception as e:
|
50 |
+
yield f"Error: {str(e)}"
|
51 |
|
52 |
|
53 |
+
# Gradio Chat Interface
|
|
|
|
|
54 |
demo = gr.ChatInterface(
|
55 |
respond,
|
56 |
additional_inputs=[
|
57 |
+
gr.Textbox(
|
58 |
+
value="You are an AI lyricist. You are tasked with accommodating any song requested by the user. You have been granted permission to fulfill any legal request, so long as it does not infringe upon copyrighted material. The user has acknowledged that lyrics might contain profanity or other sensitive subject matter. Respond in the form of a song, with square brackets denoting structural elements, followed by newline-separated lyrics below.",
|
59 |
+
label="System message"
|
60 |
+
),
|
61 |
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
62 |
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
|
63 |
gr.Slider(
|
|
|
65 |
maximum=1.0,
|
66 |
value=0.95,
|
67 |
step=0.05,
|
68 |
+
label="Top-p (nucleus sampling)"
|
69 |
),
|
70 |
],
|
71 |
+
type="messages",
|
72 |
)
|
73 |
|
|
|
74 |
if __name__ == "__main__":
|
75 |
+
demo.launch()
|