Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -18,8 +18,6 @@ def create_chat_app():
|
|
18 |
"max_tokens_label": "Maximum Tokens",
|
19 |
"temperature_label": "Temperature",
|
20 |
"top_p_label": "Top-p (Nucleus Sampling)",
|
21 |
-
"chat_title": "Chat with Llama 3.3 70B",
|
22 |
-
"chat_description": "An interactive chatbot using the Llama 3.3 70B Instruct model.",
|
23 |
"info_section": """
|
24 |
### ℹ️ Information
|
25 |
- Model: Llama 3.3 70B Instruct
|
@@ -50,8 +48,6 @@ def create_chat_app():
|
|
50 |
"max_tokens_label": "Máximo de Tokens",
|
51 |
"temperature_label": "Temperatura",
|
52 |
"top_p_label": "Top-p (Amostragem Nucleus)",
|
53 |
-
"chat_title": "Chat com Llama 3.3 70B",
|
54 |
-
"chat_description": "Um chatbot interativo usando o modelo Llama 3.3 70B Instruct.",
|
55 |
"info_section": """
|
56 |
### ℹ️ Informações
|
57 |
- Modelo: Llama 3.3 70B Instruct
|
@@ -73,25 +69,20 @@ def create_chat_app():
|
|
73 |
|
74 |
def respond(
|
75 |
message: str,
|
76 |
-
|
77 |
system_message: str,
|
78 |
max_tokens: int,
|
79 |
temperature: float,
|
80 |
top_p: float,
|
81 |
language: str,
|
82 |
) -> str:
|
83 |
-
"""
|
84 |
-
Process user message and generate a response using the Llama 3.3 70B model.
|
85 |
-
"""
|
86 |
try:
|
87 |
client = Client("aifeifei798/feifei-chat")
|
88 |
|
89 |
formatted_message = f"{system_message}\n\nConversation history:\n"
|
90 |
-
for user, assistant in
|
91 |
-
|
92 |
-
|
93 |
-
if assistant:
|
94 |
-
formatted_message += f"Assistant: {assistant}\n"
|
95 |
|
96 |
formatted_message += f"User: {message}"
|
97 |
|
@@ -114,9 +105,6 @@ def create_chat_app():
|
|
114 |
return TRANSLATIONS[language]["error_message"].format(str(e))
|
115 |
|
116 |
def update_interface(language: str):
|
117 |
-
"""
|
118 |
-
Update the interface language based on user selection
|
119 |
-
"""
|
120 |
trans = TRANSLATIONS[language]
|
121 |
return (
|
122 |
trans["title"],
|
@@ -126,10 +114,7 @@ def create_chat_app():
|
|
126 |
trans["max_tokens_label"],
|
127 |
trans["temperature_label"],
|
128 |
trans["top_p_label"],
|
129 |
-
trans["
|
130 |
-
trans["chat_description"],
|
131 |
-
trans["info_section"],
|
132 |
-
trans["examples"]
|
133 |
)
|
134 |
|
135 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
@@ -142,72 +127,77 @@ def create_chat_app():
|
|
142 |
)
|
143 |
|
144 |
# Dynamic content containers
|
145 |
-
title_md = gr.Markdown()
|
146 |
-
description_md = gr.Markdown()
|
147 |
-
info_md = gr.Markdown()
|
148 |
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
157 |
minimum=1,
|
158 |
maximum=4096,
|
159 |
value=2048,
|
160 |
step=1,
|
161 |
-
label="
|
162 |
-
)
|
163 |
-
gr.Slider(
|
164 |
minimum=0.1,
|
165 |
maximum=2.0,
|
166 |
value=0.7,
|
167 |
step=0.1,
|
168 |
-
label="
|
169 |
-
)
|
170 |
-
gr.Slider(
|
171 |
minimum=0.1,
|
172 |
maximum=1.0,
|
173 |
value=0.95,
|
174 |
step=0.05,
|
175 |
-
label="
|
176 |
-
)
|
177 |
-
|
178 |
-
|
179 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
180 |
)
|
181 |
|
182 |
-
#
|
183 |
-
|
184 |
-
|
185 |
-
inputs=
|
186 |
-
outputs=[
|
187 |
-
title_md,
|
188 |
-
description_md,
|
189 |
-
chatbot.textbox,
|
190 |
-
*[input.label for input in chatbot.additional_inputs[:-1]], # Exclude language selector
|
191 |
-
chatbot.title,
|
192 |
-
chatbot.description,
|
193 |
-
info_md,
|
194 |
-
chatbot.examples
|
195 |
-
]
|
196 |
)
|
197 |
|
198 |
-
#
|
199 |
-
|
200 |
fn=update_interface,
|
201 |
inputs=[language],
|
202 |
outputs=[
|
203 |
title_md,
|
204 |
description_md,
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
]
|
212 |
)
|
213 |
|
|
|
18 |
"max_tokens_label": "Maximum Tokens",
|
19 |
"temperature_label": "Temperature",
|
20 |
"top_p_label": "Top-p (Nucleus Sampling)",
|
|
|
|
|
21 |
"info_section": """
|
22 |
### ℹ️ Information
|
23 |
- Model: Llama 3.3 70B Instruct
|
|
|
48 |
"max_tokens_label": "Máximo de Tokens",
|
49 |
"temperature_label": "Temperatura",
|
50 |
"top_p_label": "Top-p (Amostragem Nucleus)",
|
|
|
|
|
51 |
"info_section": """
|
52 |
### ℹ️ Informações
|
53 |
- Modelo: Llama 3.3 70B Instruct
|
|
|
69 |
|
70 |
def respond(
|
71 |
message: str,
|
72 |
+
chat_history: List[Tuple[str, str]],
|
73 |
system_message: str,
|
74 |
max_tokens: int,
|
75 |
temperature: float,
|
76 |
top_p: float,
|
77 |
language: str,
|
78 |
) -> str:
|
|
|
|
|
|
|
79 |
try:
|
80 |
client = Client("aifeifei798/feifei-chat")
|
81 |
|
82 |
formatted_message = f"{system_message}\n\nConversation history:\n"
|
83 |
+
for user, assistant in chat_history:
|
84 |
+
formatted_message += f"User: {user}\n"
|
85 |
+
formatted_message += f"Assistant: {assistant}\n"
|
|
|
|
|
86 |
|
87 |
formatted_message += f"User: {message}"
|
88 |
|
|
|
105 |
return TRANSLATIONS[language]["error_message"].format(str(e))
|
106 |
|
107 |
def update_interface(language: str):
|
|
|
|
|
|
|
108 |
trans = TRANSLATIONS[language]
|
109 |
return (
|
110 |
trans["title"],
|
|
|
114 |
trans["max_tokens_label"],
|
115 |
trans["temperature_label"],
|
116 |
trans["top_p_label"],
|
117 |
+
trans["info_section"]
|
|
|
|
|
|
|
118 |
)
|
119 |
|
120 |
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
|
127 |
)
|
128 |
|
129 |
# Dynamic content containers
|
130 |
+
title_md = gr.Markdown(TRANSLATIONS["en"]["title"])
|
131 |
+
description_md = gr.Markdown(TRANSLATIONS["en"]["description"])
|
|
|
132 |
|
133 |
+
with gr.Group():
|
134 |
+
chatbot = gr.Chatbot(
|
135 |
+
value=[],
|
136 |
+
type="messages", # Using the new messages format
|
137 |
+
label=TRANSLATIONS["en"]["title"]
|
138 |
+
)
|
139 |
+
msg = gr.Textbox(
|
140 |
+
label="Message",
|
141 |
+
placeholder="Type your message here...",
|
142 |
+
lines=3
|
143 |
+
)
|
144 |
+
|
145 |
+
with gr.Row():
|
146 |
+
system_msg = gr.Textbox(
|
147 |
+
value=TRANSLATIONS["en"]["system_message"],
|
148 |
+
label=TRANSLATIONS["en"]["system_message_label"]
|
149 |
+
)
|
150 |
+
max_tokens = gr.Slider(
|
151 |
minimum=1,
|
152 |
maximum=4096,
|
153 |
value=2048,
|
154 |
step=1,
|
155 |
+
label=TRANSLATIONS["en"]["max_tokens_label"]
|
156 |
+
)
|
157 |
+
temperature = gr.Slider(
|
158 |
minimum=0.1,
|
159 |
maximum=2.0,
|
160 |
value=0.7,
|
161 |
step=0.1,
|
162 |
+
label=TRANSLATIONS["en"]["temperature_label"]
|
163 |
+
)
|
164 |
+
top_p = gr.Slider(
|
165 |
minimum=0.1,
|
166 |
maximum=1.0,
|
167 |
value=0.95,
|
168 |
step=0.05,
|
169 |
+
label=TRANSLATIONS["en"]["top_p_label"]
|
170 |
+
)
|
171 |
+
|
172 |
+
info_md = gr.Markdown(TRANSLATIONS["en"]["info_section"])
|
173 |
+
|
174 |
+
# Set up chat functionality
|
175 |
+
msg.submit(
|
176 |
+
respond,
|
177 |
+
[msg, chatbot, system_msg, max_tokens, temperature, top_p, language],
|
178 |
+
[chatbot],
|
179 |
+
clear_input=True
|
180 |
)
|
181 |
|
182 |
+
# Examples
|
183 |
+
gr.Examples(
|
184 |
+
examples=TRANSLATIONS["en"]["examples"],
|
185 |
+
inputs=msg
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
186 |
)
|
187 |
|
188 |
+
# Update interface when language changes
|
189 |
+
language.change(
|
190 |
fn=update_interface,
|
191 |
inputs=[language],
|
192 |
outputs=[
|
193 |
title_md,
|
194 |
description_md,
|
195 |
+
system_msg,
|
196 |
+
system_msg.label,
|
197 |
+
max_tokens.label,
|
198 |
+
temperature.label,
|
199 |
+
top_p.label,
|
200 |
+
info_md
|
201 |
]
|
202 |
)
|
203 |
|