Reality123b commited on
Commit
bc34025
·
verified ·
1 Parent(s): 4fa6f85

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +190 -96
app.py CHANGED
@@ -156,108 +156,202 @@ def romanized_to_bengali(text: str) -> str:
156
 
157
  return text_lower
158
 
159
- def respond(
160
- message,
161
- history: list[tuple[str, str]],
162
- system_message,
163
- max_tokens,
164
- temperature,
165
- top_p,
166
- ):
167
- # First check for custom responses
168
- custom_response = check_custom_responses(message)
169
- if custom_response:
170
- yield custom_response
171
- return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
 
173
- # Check if this is an image generation request
174
- if is_image_request(message):
175
- try:
176
- image = generate_image(message)
177
- if image:
178
- yield f"Here's your generated image based on: {message}"
179
- # You'll need to implement the actual image display logic
180
- # depending on your Gradio interface requirements
181
- return
182
- else:
183
- yield "Sorry, I couldn't generate the image. Please try again."
184
- return
185
- except Exception as e:
186
- yield f"An error occurred while generating the image: {str(e)}"
187
- return
188
 
189
- # Handle translation with more conservative approach
190
- translated_msg, original_lang, was_transliterated = translate_text(message)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191
 
192
- # Prepare conversation history - only translate if necessary
193
- messages = [{"role": "system", "content": system_message}]
194
- for val in history:
195
- if val[0]:
196
- # Only translate longer messages
197
- if len(val[0].split()) > 2:
198
- trans_user_msg, _, _ = translate_text(val[0])
199
- messages.append({"role": "user", "content": trans_user_msg})
200
- else:
201
- messages.append({"role": "user", "content": val[0]})
202
- if val[1]:
203
- messages.append({"role": "assistant", "content": val[1]})
204
-
205
- messages.append({"role": "user", "content": translated_msg})
206
 
207
- # Get response from model
208
- response = ""
209
- for message in text_client.chat_completion(
210
- messages,
211
- max_tokens=max_tokens,
212
- stream=True,
213
- temperature=temperature,
214
- top_p=top_p,
215
- ):
216
- token = message.choices[0].delta.content
217
- response += token
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
 
219
- # Only translate back if the original was definitely non-English
220
- if original_lang != 'en' and len(message.split()) > 2:
221
- try:
222
- translator = GoogleTranslator(source='en', target=original_lang)
223
- translated_response = translator.translate(response)
224
- yield translated_response
225
- except:
226
- yield response
227
- else:
228
- yield response
229
 
230
- # Updated Gradio interface to handle images
231
- demo = gr.ChatInterface(
232
- respond,
233
- additional_inputs=[
234
- gr.Textbox(
235
- value="You are a friendly Chatbot who always responds in English unless the user specifically uses another language.",
236
- label="System message"
237
- ),
238
- gr.Slider(
239
- minimum=1,
240
- maximum=2048,
241
- value=512,
242
- step=1,
243
- label="Max new tokens"
244
- ),
245
- gr.Slider(
246
- minimum=0.1,
247
- maximum=4.0,
248
- value=0.7,
249
- step=0.1,
250
- label="Temperature"
251
- ),
252
- gr.Slider(
253
- minimum=0.1,
254
- maximum=1.0,
255
- value=0.95,
256
- step=0.05,
257
- label="Top-p (nucleus sampling)"
258
- ),
259
- ]
260
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
261
 
262
  if __name__ == "__main__":
263
  demo.launch(share=True)
 
 
156
 
157
  return text_lower
158
 
159
+ def create_chat_interface():
160
+ # Custom CSS for better styling
161
+ custom_css = """
162
+ .container {
163
+ max-width: 850px !important;
164
+ margin: auto;
165
+ }
166
+ .chat-window {
167
+ height: 600px !important;
168
+ overflow-y: auto;
169
+ border-radius: 15px !important;
170
+ box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1) !important;
171
+ }
172
+ .chat-message {
173
+ padding: 1rem !important;
174
+ margin: 0.5rem !important;
175
+ border-radius: 10px !important;
176
+ }
177
+ .user-message {
178
+ background-color: #e3f2fd !important;
179
+ }
180
+ .bot-message {
181
+ background-color: #f5f5f5 !important;
182
+ }
183
+ .settings-block {
184
+ padding: 1rem !important;
185
+ background-color: #ffffff !important;
186
+ border-radius: 10px !important;
187
+ margin-top: 1rem !important;
188
+ }
189
+ """
190
 
191
+ # Create the interface with custom theme
192
+ with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
193
+ # Header
194
+ with gr.Row():
195
+ gr.HTML("""
196
+ <div style="text-align: center; margin-bottom: 1rem">
197
+ <h1 style="font-size: 2.5rem; font-weight: 600; color: #1a237e">Xylaria Chat</h1>
198
+ <p style="color: #666">Your AI Assistant for Multiple Languages</p>
199
+ </div>
200
+ """)
 
 
 
 
 
201
 
202
+ # Main chat interface
203
+ with gr.Row():
204
+ with gr.Column(scale=4):
205
+ chatbot = gr.Chatbot(
206
+ height=500,
207
+ show_label=False,
208
+ container=True,
209
+ elem_classes=["chat-window"]
210
+ )
211
+
212
+ # Input area with buttons
213
+ with gr.Row():
214
+ txt = gr.Textbox(
215
+ show_label=False,
216
+ placeholder="Type your message here...",
217
+ container=False
218
+ )
219
+ send_btn = gr.Button("Send", variant="primary")
220
+ clear_btn = gr.Button("Clear")
221
 
222
+ # Additional features bar
223
+ with gr.Row():
224
+ audio_input = gr.Audio(source="microphone", type="filepath", label="Voice Input")
225
+ image_output = gr.Image(label="Generated Image", visible=False)
 
 
 
 
 
 
 
 
 
 
226
 
227
+ # Settings panel (collapsible)
228
+ with gr.Accordion("Advanced Settings", open=False):
229
+ with gr.Row():
230
+ with gr.Column():
231
+ system_msg = gr.Textbox(
232
+ value="You are a friendly Chatbot who always responds in English unless the user specifically uses another language.",
233
+ label="System Message",
234
+ lines=2
235
+ )
236
+ max_tokens = gr.Slider(
237
+ minimum=1,
238
+ maximum=2048,
239
+ value=512,
240
+ step=1,
241
+ label="Max Tokens"
242
+ )
243
+ with gr.Column():
244
+ temperature = gr.Slider(
245
+ minimum=0.1,
246
+ maximum=4.0,
247
+ value=0.7,
248
+ step=0.1,
249
+ label="Temperature"
250
+ )
251
+ top_p = gr.Slider(
252
+ minimum=0.1,
253
+ maximum=1.0,
254
+ value=0.95,
255
+ step=0.05,
256
+ label="Top-p (nucleus sampling)"
257
+ )
258
 
259
+ # Function to handle sending messages
260
+ def user_message(message, history):
261
+ if message:
262
+ return "", history + [[message, None]]
263
+ return "", history
 
 
 
 
 
264
 
265
+ def bot_response(history, system_msg, max_tokens, temperature, top_p):
266
+ if len(history) == 0:
267
+ return history
268
+
269
+ # Get the last user message
270
+ message = history[-1][0]
271
+
272
+ # Check for custom responses first
273
+ custom_response = check_custom_responses(message)
274
+ if custom_response:
275
+ history[-1][1] = custom_response
276
+ return history
277
+
278
+ # Check for image generation request
279
+ if is_image_request(message):
280
+ try:
281
+ image = generate_image(message)
282
+ if image:
283
+ history[-1][1] = "Here's your generated image!"
284
+ # Handle image display logic
285
+ return history
286
+ except Exception as e:
287
+ history[-1][1] = f"Sorry, I couldn't generate the image: {str(e)}"
288
+ return history
289
+
290
+ # Handle regular text responses
291
+ try:
292
+ translated_msg, original_lang, was_transliterated = translate_text(message)
293
+ response = respond(
294
+ translated_msg,
295
+ history[:-1],
296
+ system_msg,
297
+ max_tokens,
298
+ temperature,
299
+ top_p
300
+ )
301
+
302
+ # Stream the response
303
+ partial_response = ""
304
+ for chunk in response:
305
+ partial_response += chunk
306
+ history[-1][1] = partial_response
307
+ yield history
308
+ time.sleep(0.02) # Add slight delay for smooth streaming
309
+
310
+ except Exception as e:
311
+ history[-1][1] = f"An error occurred: {str(e)}"
312
+ yield history
313
+
314
+ # Event handlers
315
+ txt_msg = txt.submit(
316
+ user_message,
317
+ [txt, chatbot],
318
+ [txt, chatbot],
319
+ queue=False
320
+ ).then(
321
+ bot_response,
322
+ [chatbot, system_msg, max_tokens, temperature, top_p],
323
+ chatbot
324
+ )
325
+
326
+ send_btn.click(
327
+ user_message,
328
+ [txt, chatbot],
329
+ [txt, chatbot],
330
+ queue=False
331
+ ).then(
332
+ bot_response,
333
+ [chatbot, system_msg, max_tokens, temperature, top_p],
334
+ chatbot
335
+ )
336
+
337
+ clear_btn.click(lambda: None, None, chatbot, queue=False)
338
+
339
+ # Handle voice input
340
+ def process_audio(audio_file):
341
+ # Add your audio transcription logic here
342
+ return "Audio input received! (Add your transcription logic)"
343
+
344
+ audio_input.change(
345
+ process_audio,
346
+ inputs=[audio_input],
347
+ outputs=[txt]
348
+ )
349
+
350
+ return demo
351
+
352
+ # Create and launch the interface
353
+ demo = create_chat_interface()
354
 
355
  if __name__ == "__main__":
356
  demo.launch(share=True)
357
+