Reality123b commited on
Commit
04a9af6
·
verified ·
1 Parent(s): 65a6bd0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +97 -224
app.py CHANGED
@@ -156,236 +156,109 @@ def romanized_to_bengali(text: str) -> str:
156
 
157
  return text_lower
158
 
159
- def create_chat_interface():
160
- # Custom CSS for better styling
161
- custom_css = """
162
- body {
163
- font-family: 'Inter', sans-serif;
164
- }
 
 
 
 
 
 
 
165
 
166
- .chat-container {
167
- padding-top: 0;
168
- padding-bottom: 0;
169
- }
170
-
171
- .chat-messages {
172
- scroll-behavior: smooth;
173
- }
174
-
175
- .input-container {
176
- border-top: 1px solid #ccc;
177
- }
178
-
179
- .input-container textarea {
180
- border-radius: 12px 0 0 12px;
181
- }
182
-
183
- .input-container button {
184
- border-radius: 0 12px 12px 0;
185
- }
186
-
187
- .loading {
188
- animation: pulse 1.5s ease-in-out infinite;
189
- }
190
-
191
- @keyframes pulse {
192
- 0% { opacity: 1; }
193
- 50% { opacity: 0.5; }
194
- 100% { opacity: 1; }
195
- }
196
- """
197
-
198
- # Create the interface with custom theme
199
- with gr.Blocks(css=custom_css, theme=gr.themes.Soft()) as demo:
200
- # Header
201
- with gr.Row():
202
- gr.HTML("""
203
- <!DOCTYPE html>
204
- <html lang="en">
205
- <head>
206
- <meta charset="UTF-8">
207
- <meta name="viewport" content="width=device-width, initial-scale=1.0">
208
- <title>Xylaria Chat</title>
209
- <link rel="stylesheet" href="styles.css">
210
- <link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/full.css">
211
- <script src="https://cdn.jsdelivr.net/npm/[email protected]/dist/vue.js"></script>
212
- </head>
213
- <body>
214
- <div id="app" class="mx-auto max-w-5xl">
215
- <header class="bg-white rounded shadow-lg p-10 text-center mb-10">
216
- <h1 class="text-6xl font-bold text-violet-600 mb-5">✨ Xylaria Chat</h1>
217
- <p class="text-lg font-medium text-gray-600">Your Intelligent Multilingual Assistant</p>
218
- </header>
219
- <section class="bg-white rounded shadow-lg p-10 chat-container">
220
- <div class="chat-messages overflow-y-auto max-h-screen">
221
- <div v-for="(message, index) in messages" :key="index" class="my-4" :class="{ 'flex justify-end': message.type === 'user' }">
222
- <div class="rounded-lg py-4 px-6" :class="{ 'bg-violet-500 text-white': message.type === 'user', 'bg-gray-200': message.type === 'bot' }">{{ message.text }}</div>
223
- </div>
224
- </div>
225
- <div class="input-container flex mt-6">
226
- <textarea v-model="inputText" class="w-full p-4 rounded-lg border-2 border-gray-400 resize-y" rows="3" placeholder="Type a message..."></textarea>
227
- <button @click="sendMessage" class="bg-violet-500 text-white p-3 rounded-lg ml-4 hover:bg-violet-600 transition duration-300">Send</button>
228
- </div>
229
- </section>
230
- </div>
231
- <script src="script.js"></script>
232
- </body>
233
- </html>
234
- """)
235
-
236
- # Main chat interface
237
- with gr.Row():
238
- with gr.Column(scale=4):
239
- chatbot = gr.Chatbot(
240
- height=500,
241
- show_label=False,
242
- container=True,
243
- elem_classes=["chat-window"]
244
- )
245
-
246
- # Input area with buttons
247
- with gr.Row():
248
- txt = gr.Textbox(
249
- show_label=False,
250
- placeholder="Type your message here...",
251
- container=False
252
- )
253
- send_btn = gr.Button("Send", variant="primary")
254
- clear_btn = gr.Button("Clear")
255
-
256
- # Additional features bar
257
- with gr.Row():
258
- audio_input = gr.Audio(source="microphone", type="filepath", label="Voice Input")
259
- image_output = gr.Image(label="Generated Image", visible=False)
260
-
261
- # Settings panel (collapsible)
262
- with gr.Accordion("Advanced Settings", open=False):
263
- with gr.Row():
264
- with gr.Column():
265
- system_msg = gr.Textbox(
266
- value="You are a friendly Chatbot who always responds in English unless the user specifically uses another language.",
267
- label="System Message",
268
- lines=2
269
- )
270
- max_tokens = gr.Slider(
271
- minimum=1,
272
- maximum=2048,
273
- value=512,
274
- step=1,
275
- label="Max Tokens"
276
- )
277
- with gr.Column():
278
- temperature = gr.Slider(
279
- minimum=0.1,
280
- maximum=4.0,
281
- value=0.7,
282
- step=0.1,
283
- label="Temperature"
284
- )
285
- top_p = gr.Slider(
286
- minimum=0.1,
287
- maximum=1.0,
288
- value=0.95,
289
- step=0.05,
290
- label="Top-p (nucleus sampling)"
291
- )
292
-
293
- # Function to handle sending messages
294
- def user_message(message, history):
295
- if message:
296
- return "", history + [[message, None]]
297
- return "", history
298
-
299
- def bot_response(history, system_msg, max_tokens, temperature, top_p):
300
- if len(history) == 0:
301
- return history
302
-
303
- # Get the last user message
304
- message = history[-1][0]
305
-
306
- # Check for custom responses first
307
- custom_response = check_custom_responses(message)
308
- if custom_response:
309
- history[-1][1] = custom_response
310
- return history
311
-
312
- # Check for image generation request
313
- if is_image_request(message):
314
- try:
315
- image = generate_image(message)
316
- if image:
317
- history[-1][1] = "Here's your generated image!"
318
- # Handle image display logic
319
- return history
320
- except Exception as e:
321
- history[-1][1] = f"Sorry, I couldn't generate the image: {str(e)}"
322
- return history
323
-
324
- # Handle regular text responses
325
- try:
326
- translated_msg, original_lang, was_transliterated = translate_text(message)
327
- response = respond(
328
- translated_msg,
329
- history[:-1],
330
- system_msg,
331
- max_tokens,
332
- temperature,
333
- top_p
334
- )
335
-
336
- # Stream the response
337
- partial_response = ""
338
- for chunk in response:
339
- partial_response += chunk
340
- history[-1][1] = partial_response
341
- yield history
342
- time.sleep(0.02) # Add slight delay for smooth streaming
343
-
344
- except Exception as e:
345
- history[-1][1] = f"An error occurred: {str(e)}"
346
- yield history
347
-
348
- # Event handlers
349
- txt_msg = txt.submit(
350
- user_message,
351
- [txt, chatbot],
352
- [txt, chatbot],
353
- queue=False
354
- ).then(
355
- bot_response,
356
- [chatbot, system_msg, max_tokens, temperature, top_p],
357
- chatbot
358
- )
359
-
360
- send_btn.click(
361
- user_message,
362
- [txt, chatbot],
363
- [txt, chatbot],
364
- queue=False
365
- ).then(
366
- bot_response,
367
- [chatbot, system_msg, max_tokens, temperature, top_p],
368
- chatbot
369
- )
370
 
371
- clear_btn.click(lambda: None, None, chatbot, queue=False)
 
372
 
373
- # Handle voice input
374
- def process_audio(audio_file):
375
- # Add your audio transcription logic here
376
- return "Audio input received! (Add your transcription logic)"
 
 
 
 
 
 
 
 
 
 
377
 
378
- audio_input.change(
379
- process_audio,
380
- inputs=[audio_input],
381
- outputs=[txt]
382
- )
 
 
 
 
 
 
383
 
384
- return demo
 
 
 
 
 
 
 
 
 
385
 
386
- # Create and launch the interface
387
- demo = create_chat_interface()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
388
 
389
  if __name__ == "__main__":
390
  demo.launch(share=True)
391
-
 
156
 
157
  return text_lower
158
 
159
+ def respond(
160
+ message,
161
+ history: list[tuple[str, str]],
162
+ system_message,
163
+ max_tokens,
164
+ temperature,
165
+ top_p,
166
+ ):
167
+ # First check for custom responses
168
+ custom_response = check_custom_responses(message)
169
+ if custom_response:
170
+ yield custom_response
171
+ return
172
 
173
+ # Check if this is an image generation request
174
+ if is_image_request(message):
175
+ try:
176
+ image = generate_image(message)
177
+ if image:
178
+ yield f"Here's your generated image based on: {message}"
179
+ # You'll need to implement the actual image display logic
180
+ # depending on your Gradio interface requirements
181
+ return
182
+ else:
183
+ yield "Sorry, I couldn't generate the image. Please try again."
184
+ return
185
+ except Exception as e:
186
+ yield f"An error occurred while generating the image: {str(e)}"
187
+ return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
 
189
+ # Handle translation with more conservative approach
190
+ translated_msg, original_lang, was_transliterated = translate_text(message)
191
 
192
+ # Prepare conversation history - only translate if necessary
193
+ messages = [{"role": "system", "content": system_message}]
194
+ for val in history:
195
+ if val[0]:
196
+ # Only translate longer messages
197
+ if len(val[0].split()) > 2:
198
+ trans_user_msg, _, _ = translate_text(val[0])
199
+ messages.append({"role": "user", "content": trans_user_msg})
200
+ else:
201
+ messages.append({"role": "user", "content": val[0]})
202
+ if val[1]:
203
+ messages.append({"role": "assistant", "content": val[1]})
204
+
205
+ messages.append({"role": "user", "content": translated_msg})
206
 
207
+ # Get response from model
208
+ response = ""
209
+ for message in text_client.chat_completion(
210
+ messages,
211
+ max_tokens=max_tokens,
212
+ stream=True,
213
+ temperature=temperature,
214
+ top_p=top_p,
215
+ ):
216
+ token = message.choices[0].delta.content
217
+ response += token
218
 
219
+ # Only translate back if the original was definitely non-English
220
+ if original_lang != 'en' and len(message.split()) > 2:
221
+ try:
222
+ translator = GoogleTranslator(source='en', target=original_lang)
223
+ translated_response = translator.translate(response)
224
+ yield translated_response
225
+ except:
226
+ yield response
227
+ else:
228
+ yield response
229
 
230
+ # Updated Gradio interface to handle images
231
+ demo = gr.ChatInterface(
232
+ respond,
233
+ additional_inputs=[
234
+ gr.Textbox(
235
+ value="You are a friendly Chatbot who always responds in English unless the user specifically uses another language.",
236
+ label="System message"
237
+ ),
238
+ gr.Slider(
239
+ minimum=1,
240
+ maximum=2048,
241
+ value=512,
242
+ step=1,
243
+ label="Max new tokens"
244
+ ),
245
+ gr.Slider(
246
+ minimum=0.1,
247
+ maximum=4.0,
248
+ value=0.7,
249
+ step=0.1,
250
+ label="Temperature"
251
+ ),
252
+ gr.Slider(
253
+ minimum=0.1,
254
+ maximum=1.0,
255
+ value=0.95,
256
+ step=0.05,
257
+ label="Top-p (nucleus sampling)"
258
+ ),
259
+ ]
260
+ )
261
 
262
  if __name__ == "__main__":
263
  demo.launch(share=True)
264
+