UKURIKIYEYEZU commited on
Commit
8462695
·
verified ·
1 Parent(s): 0c7d227

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +89 -68
app.py CHANGED
@@ -318,6 +318,42 @@ rag_prompt = PromptTemplate.from_template(template)
318
 
319
  retriever = vectorstore.as_retriever()
320
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
321
  class OpenRouterLLM:
322
  def __init__(self, key: str):
323
  try:
@@ -356,7 +392,7 @@ class UserSession:
356
  self.welcome_message = None
357
  self.conversation_history = [] # Add conversation history storage
358
  self.llm = llm # Store the LLM instance
359
-
360
  def set_user(self, user_info):
361
  self.current_user = user_info
362
  self.set_welcome_message(user_info.get("Nickname", "Guest"))
@@ -365,13 +401,12 @@ class UserSession:
365
  self.conversation_history = [
366
  {"role": "assistant", "content": welcome},
367
  ]
368
-
369
  def get_user(self):
370
  return self.current_user
371
-
372
- def set_welcome_message(self, Nickname):
373
  """Set a dynamic welcome message using the OpenRouterLLM."""
374
- # Define a prompt for the LLM to generate a welcome message
375
  prompt = (
376
  f"Create a very brief welcome message for {Nickname} that fits in 3 lines. "
377
  f"The message should: "
@@ -381,28 +416,31 @@ class UserSession:
381
  f"4. Use a tone that is warm, reassuring, and professional. "
382
  f"5. Keep the message concise and impactful, ensuring it fits within the character limit."
383
  )
384
-
385
  # Use the OpenRouterLLM to generate the message
386
- welcome_text = "".join(self.llm.stream(prompt)) # Stream and concatenate the response
387
-
 
388
  # Format the message with HTML styling
389
  self.welcome_message = (
 
 
390
  f"<div style='font-size: 20px;'>"
391
  f"{welcome_text}"
392
  f"</div>"
393
  )
394
-
395
  def get_welcome_message(self):
396
  return self.welcome_message
397
-
398
  def add_to_history(self, role, message):
399
  """Add a message to the conversation history"""
400
  self.conversation_history.append({"role": role, "content": message})
401
-
402
  def get_conversation_history(self):
403
  """Get the full conversation history"""
404
  return self.conversation_history
405
-
406
  def get_formatted_history(self):
407
  """Get conversation history formatted as a string for the LLM"""
408
  formatted_history = ""
@@ -411,11 +449,12 @@ class UserSession:
411
  formatted_history += f"{role}: {entry['content']}\n\n"
412
  return formatted_history
413
 
414
- api_key =api
415
- llm_instance = OpenRouterLLM(key=api_key)
 
416
  user_session = UserSession(llm_instance)
417
 
418
- # Store user details and handle session
419
  def collect_user_info(Nickname):
420
  if not Nickname:
421
  return "Nickname is required to proceed.", gr.update(visible=False), gr.update(visible=True), []
@@ -474,68 +513,53 @@ def create_rag_chain(retriever, template, api_key):
474
 
475
  return stream_func
476
 
477
- def rag_memory_stream(message, history):
478
- # Add user message to history
479
- user_session.add_to_history("user", message)
480
 
481
- # Initialize with empty response
482
- partial_text = ""
483
- full_response = ""
484
-
485
- # Use the rag_chain with the question
486
- for new_text in rag_chain({"question": message}):
487
- partial_text += new_text
488
- full_response = partial_text
489
- yield partial_text
490
 
491
- # After generating the complete response, add it to history
492
- user_session.add_to_history("assistant", full_response)
493
 
494
 
495
- import gradio as gr
496
- api_key = api
497
- # Gradio Interface Setup with improved UX
498
- def chatbot_interface():
499
- # Get API key (in a real application, handle this more securely)
500
- api_key = api # This should be properly defined or imported elsewhere
501
 
502
- # Update the template to include conversation history
503
- # template = """
504
- global template
505
- # template = """
506
- # You are a compassionate and supportive AI assistant specializing in helping individuals affected by Gender-Based Violence (GBV) with emotional awareness.
507
 
508
- # **Previous conversation:**
509
- # {conversation_history}
510
 
511
- # **Context information:**
512
- # {context}
513
 
514
- # **User's Question:** {question}
515
 
516
- # Respond to {first_name} with these guidelines:
 
 
 
 
 
 
 
 
517
 
518
- # 1. **Emotional Awareness**
519
- # - Acknowledge emotions with empathy (e.g., "I understand how you feel")
520
- # - Offer comfort for negative emotions
521
 
522
- # 2. **Communication Approach**
523
- # - Use gentle check-ins when appropriate (e.g., "How are you holding up today, {first_name}?")
524
- # - Occasionaly use a warm welcome or check-in (e.g., "Hello {first_name}! How are you feeling today?")
525
- # - Use occasional appropriate emojis (😊, 🤗, ❤️)
526
- # - Keep responses concise unless details are requested
527
- # - Maintain warm but professional tone
528
- # - Refer to {conversation_history} to maintain conversation
529
 
530
- # 3. **Information Handling**
531
- # - Extract relevant details from the {context}
532
- # - Provide links when specifically requested
533
- # - If information isn't available, respond with: "I don't have that information at the moment, {first_name}. 😊"
534
- # - Never generate unsupported content or speculate
535
 
536
- # Your response should be supportive, accurate, and centered on the user's immediate needs.
537
- # """
538
- # Global template for GBV support chatbot
539
  template = """
540
  You are a compassionate and supportive AI assistant specializing in helping individuals affected by Gender-Based Violence (GBV). Your primary goal is to provide emotionally intelligent support while maintaining appropriate boundaries.
541
 
@@ -582,10 +606,7 @@ def chatbot_interface():
582
  Your response should balance emotional support with practical guidance, always centered on {first_name}'s expressed needs and current emotional state.
583
  """
584
 
585
- # # Create the RAG chain with user context
586
- # rag_chain = create_rag_chain(retriever, template, api_key)
587
-
588
- # Create the RAG chain with user context
589
  global rag_chain
590
  rag_chain = create_rag_chain(retriever, template, api_key)
591
 
 
318
 
319
  retriever = vectorstore.as_retriever()
320
 
321
+ import requests
322
+
323
+ API_TOKEN = TOKEN
324
+
325
+ model_name = "facebook/nllb-200-distilled-600M"
326
+
327
+ url = f"https://api-inference.huggingface.co/models/{model_name}"
328
+
329
+ headers = {
330
+ "Authorization": f"Bearer {API_TOKEN}"
331
+ }
332
+
333
+ def translate_text(text, src_lang, tgt_lang):
334
+ """Translate text using Hugging Face API"""
335
+ response = requests.post(
336
+ url,
337
+ headers=headers,
338
+ json={
339
+ "inputs": text,
340
+ "parameters": {
341
+ "src_lang": src_lang,
342
+ "tgt_lang": tgt_lang
343
+ }
344
+ }
345
+ )
346
+
347
+ if response.status_code == 200:
348
+ result = response.json()
349
+ if isinstance(result, list) and len(result) > 0:
350
+ return result[0]['translation_text']
351
+ return result['translation_text']
352
+ else:
353
+ print(f"Translation error: {response.status_code}, {response.text}")
354
+ return text # Return original text if translation fails
355
+
356
+
357
  class OpenRouterLLM:
358
  def __init__(self, key: str):
359
  try:
 
392
  self.welcome_message = None
393
  self.conversation_history = [] # Add conversation history storage
394
  self.llm = llm # Store the LLM instance
395
+
396
  def set_user(self, user_info):
397
  self.current_user = user_info
398
  self.set_welcome_message(user_info.get("Nickname", "Guest"))
 
401
  self.conversation_history = [
402
  {"role": "assistant", "content": welcome},
403
  ]
404
+
405
  def get_user(self):
406
  return self.current_user
407
+
408
+ def set_welcome_message(self, Nickname, src_lang="eng_Latn", tgt_lang="kin_Latn"):
409
  """Set a dynamic welcome message using the OpenRouterLLM."""
 
410
  prompt = (
411
  f"Create a very brief welcome message for {Nickname} that fits in 3 lines. "
412
  f"The message should: "
 
416
  f"4. Use a tone that is warm, reassuring, and professional. "
417
  f"5. Keep the message concise and impactful, ensuring it fits within the character limit."
418
  )
419
+
420
  # Use the OpenRouterLLM to generate the message
421
+ welcome = "".join(self.llm.stream(prompt)) # Stream and concatenate the response
422
+ welcome_text=translate_text(welcome, src_lang, tgt_lang)
423
+
424
  # Format the message with HTML styling
425
  self.welcome_message = (
426
+ f"<div style='font-size: 24px; font-weight: bold; color: #2E86C1;'>"
427
+ f"Welcome {Nickname}! 👋</div>"
428
  f"<div style='font-size: 20px;'>"
429
  f"{welcome_text}"
430
  f"</div>"
431
  )
432
+
433
  def get_welcome_message(self):
434
  return self.welcome_message
435
+
436
  def add_to_history(self, role, message):
437
  """Add a message to the conversation history"""
438
  self.conversation_history.append({"role": role, "content": message})
439
+
440
  def get_conversation_history(self):
441
  """Get the full conversation history"""
442
  return self.conversation_history
443
+
444
  def get_formatted_history(self):
445
  """Get conversation history formatted as a string for the LLM"""
446
  formatted_history = ""
 
449
  formatted_history += f"{role}: {entry['content']}\n\n"
450
  return formatted_history
451
 
452
+ api_key =api
453
+ llm_instance = OpenRouterLLM(key=api_key)
454
+ #llm_instance = model
455
  user_session = UserSession(llm_instance)
456
 
457
+
458
  def collect_user_info(Nickname):
459
  if not Nickname:
460
  return "Nickname is required to proceed.", gr.update(visible=False), gr.update(visible=True), []
 
513
 
514
  return stream_func
515
 
516
+ # def rag_memory_stream(message, history):
517
+ # # Add user message to history
518
+ # user_session.add_to_history("user", message)
519
 
520
+ # # Initialize with empty response
521
+ # partial_text = ""
522
+ # full_response = ""
523
+
524
+ # # Use the rag_chain with the question
525
+ # for new_text in rag_chain({"question": message}):
526
+ # partial_text += new_text
527
+ # full_response = partial_text
528
+ # yield partial_text
529
 
530
+ # # After generating the complete response, add it to history
531
+ # user_session.add_to_history("assistant", full_response)
532
 
533
 
534
+ def rag_memory_stream(message, history, user_lang="kin_Latn", system_lang="eng_Latn"):
535
+ english_message = translate_text(message, user_lang, system_lang)
 
 
 
 
536
 
537
+ user_session.add_to_history("user", english_message)
 
 
 
 
538
 
539
+ full_response = ""
 
540
 
541
+ for new_text in rag_chain({"question": english_message}):
542
+ full_response += new_text
543
 
 
544
 
545
+ translated_response = translate_text(full_response, system_lang, user_lang)
546
+
547
+ user_session.add_to_history("assistant", full_response)
548
+
549
+ yield translated_response
550
+
551
+
552
+
553
+ import gradio as gr
554
 
 
 
 
555
 
556
+ api_key = api
 
 
 
 
 
 
557
 
558
+ def chatbot_interface():
559
+ api_key = api
560
+
561
+ global template
 
562
 
 
 
 
563
  template = """
564
  You are a compassionate and supportive AI assistant specializing in helping individuals affected by Gender-Based Violence (GBV). Your primary goal is to provide emotionally intelligent support while maintaining appropriate boundaries.
565
 
 
606
  Your response should balance emotional support with practical guidance, always centered on {first_name}'s expressed needs and current emotional state.
607
  """
608
 
609
+
 
 
 
610
  global rag_chain
611
  rag_chain = create_rag_chain(retriever, template, api_key)
612