File size: 23,873 Bytes
5e4a27f
 
 
 
 
 
 
 
 
 
05b59ed
5e4a27f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b24f7f9
 
 
 
 
 
 
 
 
 
5e4a27f
b24f7f9
 
 
5e4a27f
b24f7f9
 
5e4a27f
b24f7f9
 
 
 
5e4a27f
b24f7f9
 
 
5e4a27f
b24f7f9
 
 
5e4a27f
b24f7f9
 
 
 
5e4a27f
b24f7f9
 
5e4a27f
 
 
 
 
 
 
 
 
fdcab3d
5e4a27f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fdcab3d
 
b24f7f9
 
 
 
 
 
5e4a27f
b24f7f9
 
5e4a27f
b24f7f9
 
 
5e4a27f
 
 
b24f7f9
5e4a27f
b24f7f9
 
 
 
 
 
 
 
 
5e4a27f
b24f7f9
 
5e4a27f
b24f7f9
 
5e4a27f
b24f7f9
 
5e4a27f
 
 
b24f7f9
 
5e4a27f
 
 
6e17553
 
 
5e4a27f
 
 
6e17553
5e4a27f
 
 
 
 
 
 
 
 
05b59ed
5e4a27f
 
 
05b59ed
5e4a27f
 
 
 
 
 
 
 
 
4880aa4
5e4a27f
2558dea
 
 
 
 
 
 
da6e416
2558dea
 
 
 
da6e416
2558dea
 
 
 
 
 
397e757
2558dea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4880aa4
5e4a27f
 
4880aa4
5e4a27f
 
05b59ed
5e4a27f
05b59ed
5e4a27f
8462695
 
91f3525
8462695
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5e4a27f
05b59ed
 
 
 
2558dea
05b59ed
 
 
 
 
 
 
 
 
5e4a27f
 
 
6223ca8
 
5e4a27f
 
 
05b59ed
5e4a27f
05b59ed
 
 
5e4a27f
05b59ed
5e4a27f
12337a2
5e4a27f
71ea214
5e4a27f
05b59ed
baf31a6
71ea214
8462695
5e4a27f
 
baf31a6
 
 
 
 
 
8462695
5e4a27f
 
8462695
 
71ea214
 
 
 
 
 
 
 
 
 
8462695
71ea214
8462695
 
 
71ea214
05b59ed
8462695
 
71ea214
 
05b59ed
 
8462695
05b59ed
 
8462695
baf31a6
12337a2
baf31a6
8462695
baf31a6
12337a2
baf31a6
8462695
baf31a6
12337a2
baf31a6
 
 
 
 
5e4a27f
8462695
 
 
71ea214
5e4a27f
8462695
baf31a6
 
 
 
05b59ed
 
baf31a6
05b59ed
 
baf31a6
05b59ed
 
baf31a6
05b59ed
 
baf31a6
05b59ed
 
baf31a6
05b59ed
 
 
 
 
a78fdc5
 
 
05b59ed
baf31a6
05b59ed
 
 
 
 
 
 
 
baf31a6
05b59ed
 
baf31a6
05b59ed
baf31a6
 
 
 
05b59ed
 
 
baf31a6
 
05b59ed
5e4a27f
05b59ed
 
5e4a27f
05b59ed
5e4a27f
8462695
 
 
baf31a6
8462695
 
 
 
 
 
 
 
 
baf31a6
8462695
 
05b59ed
9ab35ad
8462695
 
acf09e2
8462695
acf09e2
8462695
acf09e2
8462695
 
acf09e2
 
8462695
 
 
 
 
 
 
 
 
baf31a6
acf09e2
8462695
4880aa4
8462695
 
 
 
4880aa4
2558dea
 
 
 
 
 
 
 
 
 
 
 
 
 
da6e416
2558dea
 
 
 
da6e416
2558dea
 
 
 
 
 
397e757
2558dea
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53abcb8
acf09e2
8462695
2558dea
53abcb8
 
 
05b59ed
53abcb8
71ea214
baf31a6
acf09e2
05b59ed
baf31a6
faf6c7b
53abcb8
 
05b59ed
acf09e2
 
 
 
53abcb8
acf09e2
05b59ed
53abcb8
05b59ed
 
4880aa4
d26ee43
05b59ed
acf09e2
05b59ed
faf6c7b
acf09e2
05b59ed
 
baf31a6
 
05b59ed
 
c3a5fcb
53abcb8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0c7d227
05b59ed
5e4a27f
baf31a6
05b59ed
4880aa4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
import os
import PyPDF2
from PyPDF2 import PdfReader

## Embedding model!
from langchain_huggingface import HuggingFaceEmbeddings
embed_model = HuggingFaceEmbeddings(model_name="mixedbread-ai/mxbai-embed-large-v1")

import pandas as pd

folder_path = "./"
context_data = []

# List all files in the folder
files = os.listdir(folder_path)

# Get list of CSV and Excel files
data_files = [f for f in files if f.endswith(('.csv', '.xlsx', '.xls'))]

# Process each file
for f, file in enumerate(data_files, 1):
    print(f"\nProcessing file {f}: {file}")
    file_path = os.path.join(folder_path, file)

    try:
        # Read the file based on its extension
        if file.endswith('.csv'):
            df = pd.read_csv(file_path)
        else:
            df = pd.read_excel(file_path)

        # Extract non-empty values from column 2 and append them
        context_data.extend(df.iloc[:, 2].dropna().astype(str).tolist())

    except Exception as e:
        print(f"Error processing file {file}: {str(e)}")





# def extract_text_from_pdf(pdf_path):
#     """Extracts text from a PDF file."""
#     try:
#         with open(pdf_path, "rb") as file:
#             reader = PyPDF2.PdfReader(file)
#             text = "".join(page.extract_text() or "" for page in reader.pages)  # Handle None cases
#             return text
#     except Exception as e:
#         print(f"Error extracting text from {pdf_path}: {e}")
#         return ""

# folder_path = "./"
# # Initialize the list to hold the extracted text chunks
# text_chunks = []

# # Get all PDF filenames in the folder
# filenames = [f for f in os.listdir(folder_path) if f.lower().endswith(".pdf")]

# # Process each PDF file
# for index, file in enumerate(filenames, 1):
#     print(f"\nProcessing file {index}: {file}")
#     pdf_path = os.path.join(folder_path, file)

#     try:
#         # Extract text from the PDF
#         extracted_text = extract_text_from_pdf(pdf_path)

#         if extracted_text.strip():  # Ensure extracted text is not just whitespace
#             # Split extracted text into chunks of 1000 characters
#             chunks = [extracted_text[i:i+2000] for i in range(0, len(extracted_text), 2000)]

#             # Append extracted chunks to the list
#             text_chunks.extend(chunks)
#         else:
#             print(f"No text found in the PDF: {file}")

#     except Exception as e:
#         print(f"Error reading the PDF {file}: {e}")



from urllib.parse import urljoin, urlparse
import requests
from io import BytesIO

from bs4 import BeautifulSoup
from langchain_core.prompts import ChatPromptTemplate
import gradio as gr


def scrape_websites(base_urls):
    try:
        visited_links = set()  # To avoid revisiting the same link
        content_by_url = {}  # Store content from each URL

        for base_url in base_urls:
            if not base_url.strip():
                continue  # Skip empty or invalid URLs

            print(f"Scraping base URL: {base_url}")
            html_content = fetch_page_content(base_url)
            if html_content:
                cleaned_content = clean_body_content(html_content)
                content_by_url[base_url] = cleaned_content
                visited_links.add(base_url)

                # Extract and process all internal links
                soup = BeautifulSoup(html_content, "html.parser")
                links = extract_internal_links(base_url, soup)

                for link in links:
                    if link not in visited_links:
                        print(f"Scraping link: {link}")
                        page_content = fetch_page_content(link)
                        if page_content:
                            cleaned_content = clean_body_content(page_content)
                            content_by_url[link] = cleaned_content
                            visited_links.add(link)

                        # If the link is a PDF file, extract its content
                        if link.lower().endswith('.pdf'):
                            print(f"Extracting PDF content from: {link}")
                            pdf_content = extract_pdf_text(link)
                            if pdf_content:
                                content_by_url[link] = pdf_content

        return content_by_url

    except Exception as e:
        print(f"Error during scraping: {e}")
        return {}


def fetch_page_content(url):
    try:
        response = requests.get(url, timeout=10)
        response.raise_for_status()
        return response.text
    except requests.exceptions.RequestException as e:
        print(f"Error fetching {url}: {e}")
        return None


def extract_internal_links(base_url, soup):
    links = set()
    for anchor in soup.find_all("a", href=True):
        href = anchor["href"]
        full_url = urljoin(base_url, href)
        if is_internal_link(base_url, full_url):
            links.add(full_url)
    return links


def is_internal_link(base_url, link_url):
    base_netloc = urlparse(base_url).netloc
    link_netloc = urlparse(link_url).netloc
    return base_netloc == link_netloc


def extract_pdf_text(pdf_url):
    try:
        response = requests.get(pdf_url)
        response.raise_for_status()

        # Open the PDF from the response content
        with BytesIO(response.content) as file:
            reader = PdfReader(file)
            pdf_text = ""
            for page in reader.pages:
                pdf_text += page.extract_text()

        return pdf_text if pdf_text else None
    except requests.exceptions.RequestException as e:
        print(f"Error fetching PDF {pdf_url}: {e}")
        return None
    except Exception as e:
        print(f"Error reading PDF {pdf_url}: {e}")
        return None


def clean_body_content(html_content):
    soup = BeautifulSoup(html_content, "html.parser")

    # Remove scripts and styles
    for script_or_style in soup(["script", "style"]):
        script_or_style.extract()

    # Get text and clean up
    cleaned_content = soup.get_text(separator="\n")
    cleaned_content = "\n".join(
        line.strip() for line in cleaned_content.splitlines() if line.strip()
    )
    return cleaned_content



# if __name__ == "__main__":
#     website = [
#                #"https://www.rib.gov.rw/index.php?id=371",
#                "https://haguruka.org.rw/our-work/"
#                ]
#     all_content = scrape_websites(website)

#     # Temporary list to store (url, content) tuples
#     temp_list = []

#     # Process and store each URL with its content
#     for url, content in all_content.items():
#         temp_list.append((url, content)) 



# processed_texts = []

# # Process each element in the temporary list
# for element in temp_list:
#     if isinstance(element, tuple):
#         url, content = element  # Unpack the tuple
#         processed_texts.append(f"url: {url}, content: {content}")
#     elif isinstance(element, str):
#         processed_texts.append(element)
#     else:
#         processed_texts.append(str(element))

# def chunk_string(s, chunk_size=2000):
#     return [s[i:i+chunk_size] for i in range(0, len(s), chunk_size)]

# # List to store the chunks
# chunked_texts = []

# for text in processed_texts:
#   chunked_texts.extend(chunk_string(text))

data = []
data.extend(context_data)
# data.extend([item for item in text_chunks if item not in data])
# data.extend([item for item in chunked_texts if item not in data])



#from langchain_community.vectorstores import Chroma
from langchain_chroma import Chroma



vectorstore = Chroma(
    collection_name="GBV_set", 
    embedding_function=embed_model,
)

vectorstore.get().keys()

# add data to vector nstore
vectorstore.add_texts(data)


api= os.environ.get('V1')



 

from openai import OpenAI
from langchain_core.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
import gradio as gr
from typing import Iterator
import time



#template for GBV support chatbot
template = ("""
You are a compassionate and supportive AI assistant specializing in helping individuals affected by Gender-Based Violence (GBV). Your primary goal is to provide emotionally intelligent support while maintaining appropriate boundaries.

When responding to {first_name}, follow these guidelines:

1. **Emotional Intelligence**
   - Validate feelings without judgment (e.g., "It is completely understandable to feel this way")
   - Offer reassurance when appropriate, always centered on empowerment
   - Adjust your tone based on the emotional state conveyed

2. **Personalized Communication**
   - Avoid contractions (e.g., use I am instead of I'm)
   - Incorporate thoughtful pauses or reflective questions when the conversation involves difficult topics
   - Use selective emojis (😊, πŸ€—, ❀️) only when tone-appropriate and not during crisis discussions
   - Balance warmth with professionalism

3. **Conversation Management**
   - Refer to {conversation_history} to maintain continuity and avoid repetition
   - Keep responses concise unless greater detail is explicitly requested
   - Use clear paragraph breaks for readability
   - Prioritize immediate concerns before addressing secondary issues

4. **Information Delivery**
   - Extract only relevant information from {context} that directly addresses the question
   - Present information in accessible, non-technical language
   - Organize resource recommendations in order of relevance and accessibility
   - Provide links [URL] only when specifically requested, prefaced with clear descriptions
   - When information is unavailable, respond with: "I don't have that specific information right now, {first_name}. Would it be helpful if I focus on [alternative support option]?"

5. **Safety and Ethics**
   - Prioritize user safety in all responses
   - Never generate speculative content about their specific situation
   - Avoid phrases that could minimize experiences or create pressure
   - Include gentle reminders about professional help when discussing serious issues

Your response should balance emotional support with practical guidance, always centered on {first_name}'s expressed needs and current emotional state.

    **Context:** {context}
    **User's Question:** {question}
    **Your Response:**
""")

rag_prompt = PromptTemplate.from_template(template)

retriever = vectorstore.as_retriever()

import requests

API_TOKEN = os.environ.get('TOKEN')

model_name = "facebook/nllb-200-distilled-600M"

url = f"https://api-inference.huggingface.co/models/{model_name}"

headers = {
    "Authorization": f"Bearer {API_TOKEN}"
}

def translate_text(text, src_lang, tgt_lang):
    """Translate text using Hugging Face API"""
    response = requests.post(
        url,
        headers=headers,
        json={
            "inputs": text,
            "parameters": {
                "src_lang": src_lang,
                "tgt_lang": tgt_lang
            }
        }
    )

    if response.status_code == 200:
        result = response.json()
        if isinstance(result, list) and len(result) > 0:
            return result[0]['translation_text']
        return result['translation_text']
    else:
        print(f"Translation error: {response.status_code}, {response.text}")
        return text  # Return original text if translation fails


class OpenRouterLLM:
    def __init__(self, key: str):
        try:
            self.client = OpenAI(
                base_url="https://openrouter.ai/api/v1",
                api_key=key 
            )
            self.headers = {
                "HTTP-Referer": "http://localhost:3000",
                "X-Title": "Local Development"
            }
        except Exception as e:
            print(f"Initialization error: {e}")
            raise
    
    def stream(self, prompt: str) -> Iterator[str]:
        try:
            completion = self.client.chat.completions.create(
                #model="deepseek/deepseek-r1-distill-llama-70b:free",
                model="meta-llama/llama-3.3-70b-instruct:free",
                messages=[{"role": "user", "content": prompt}],
                stream=True
            )
            
            for chunk in completion:
                delta = chunk.choices[0].delta
                if hasattr(delta, "content") and delta.content:
                    yield delta.content
        except Exception as e:
            yield f"Streaming error: {str(e)}"


class UserSession:
    def __init__(self, llm: OpenRouterLLM):  # Accept an instance of OpenRouterLLM
        self.current_user = None
        self.welcome_message = None
        self.conversation_history = []  # Add conversation history storage
        self.llm = llm  # Store the LLM instance

    def set_user(self, user_info):
        self.current_user = user_info
        self.set_welcome_message(user_info.get("Nickname", "Guest"))
        # Initialize conversation history with welcome message
        welcome = self.get_welcome_message()
        self.conversation_history = [
            {"role": "assistant", "content": welcome},
        ]

    def get_user(self):
        return self.current_user

    def set_welcome_message(self, Nickname, src_lang="eng_Latn", tgt_lang="kin_Latn"):
        """Set a dynamic welcome message using the OpenRouterLLM."""
        prompt = (
            f"Create a very brief welcome message for {Nickname} that fits in 3 lines. "
            f"The message should: "
            f"1. Welcome {Nickname} warmly and professionally. "
            f"2. Emphasize that this is a safe and trusted space. "
            f"3. Highlight specialized support for gender-based violence (GBV) and legal assistance. "
            f"4. Use a tone that is warm, reassuring, and professional. "
            f"5. Keep the message concise and impactful, ensuring it fits within the character limit."
        )

        # Use the OpenRouterLLM to generate the message
        welcome = "".join(self.llm.stream(prompt))  # Stream and concatenate the response
        welcome_text=translate_text(welcome, src_lang, tgt_lang)

        # Format the message with HTML styling
        self.welcome_message = (
            f"<div style='font-size: 24px; font-weight: bold; color: #2E86C1;'>"
            f"Welcome {Nickname}! πŸ‘‹</div>"
            f"<div style='font-size: 20px;'>"
            f"{welcome_text}"
            f"</div>"
        )

    def get_welcome_message(self):
        return self.welcome_message

    def add_to_history(self, role, message):
        """Add a message to the conversation history"""
        self.conversation_history.append({"role": role, "content": message})

    def get_conversation_history(self):
        """Get the full conversation history"""
        return self.conversation_history

    def get_formatted_history(self):
        """Get conversation history formatted as a string for the LLM"""
        formatted_history = ""
        for entry in self.conversation_history:
            role = "User" if entry["role"] == "user" else "Assistant"
            formatted_history += f"{role}: {entry['content']}\n\n"
        return formatted_history

api_key =api 
llm_instance = OpenRouterLLM(key=api_key)
#llm_instance = model
user_session = UserSession(llm_instance)


def collect_user_info(Nickname):
    if not Nickname:
        return "Nickname is required to proceed.", gr.update(visible=False), gr.update(visible=True), []

    # Store user info for chat session
    user_info = {
        "Nickname": Nickname,
        "timestamp": time.strftime("%Y-%m-%d %H:%M:%S")
    }

    # Set user in session
    user_session.set_user(user_info)

    # Generate welcome message
    welcome_message = user_session.get_welcome_message()

    # Add initial message to start the conversation
    chat_history = add_initial_message([(None, welcome_message)])

    # Return welcome message and update UI
    return welcome_message, gr.update(visible=True), gr.update(visible=False), chat_history

# Add initial message to start the conversation
def add_initial_message(chatbot):
    #initial_message = (" "
   # )
    return chatbot #+ [(None, initial_message)]

# Create RAG chain with user context and conversation history
def create_rag_chain(retriever, template, api_key):
    llm = OpenRouterLLM(api_key)
    rag_prompt = PromptTemplate.from_template(template)

    def stream_func(input_dict):
        # Get context using the retriever's invoke method
        context = retriever.invoke(input_dict["question"])
        context_str = "\n".join([doc.page_content for doc in context])

        # Get user info from the session
        user_info = user_session.get_user() or {}
        first_name = user_info.get("Nickname", "User")
        
        # Get conversation history
        conversation_history = user_session.get_formatted_history()

        # Format prompt with user context and conversation history
        prompt = rag_prompt.format(
            context=context_str,
            question=input_dict["question"],
            first_name=first_name,
            conversation_history=conversation_history
        )

        # Stream response
        return llm.stream(prompt)

    return stream_func

# def rag_memory_stream(message, history):
#     # Add user message to history
#     user_session.add_to_history("user", message)
    
#     # Initialize with empty response
#     partial_text = ""
#     full_response = ""

#     # Use the rag_chain with the question
#     for new_text in rag_chain({"question": message}):
#         partial_text += new_text
#         full_response = partial_text
#         yield partial_text
    
#     # After generating the complete response, add it to history
#     user_session.add_to_history("assistant", full_response)


def rag_memory_stream(message, history, user_lang="kin_Latn", system_lang="eng_Latn"):
    english_message = translate_text(message, user_lang, system_lang)

    user_session.add_to_history("user", english_message)

    full_response = ""

    for new_text in rag_chain({"question": english_message}):
        full_response += new_text


    translated_response = translate_text(full_response, system_lang, user_lang)

    user_session.add_to_history("assistant", full_response)
    
    yield translated_response



import gradio as gr


api_key = api

def chatbot_interface():
    api_key = api
 
    global template

    template = """
    You are a compassionate and supportive AI assistant specializing in helping individuals affected by Gender-Based Violence (GBV). Your primary goal is to provide emotionally intelligent support while maintaining appropriate boundaries.
    
    **Previous conversation:**
    {conversation_history}
    
    **Context information:**
    {context}
    
    **User's Question:** {question}
    
    When responding to {first_name}, follow these guidelines:
    
    1. **Emotional Intelligence**
       - Validate feelings without judgment (e.g., "It is completely understandable to feel this way")
       - Offer reassurance when appropriate, always centered on empowerment
       - Adjust your tone based on the emotional state conveyed
    
    2. **Personalized Communication**
       - Avoid contractions (e.g., use I am instead of I'm)
       - Incorporate thoughtful pauses or reflective questions when the conversation involves difficult topics
       - Use selective emojis (😊, πŸ€—, ❀️) only when tone-appropriate and not during crisis discussions
       - Balance warmth with professionalism
    
    3. **Conversation Management**
       - Refer to {conversation_history} to maintain continuity and avoid repetition
       - Keep responses concise unless greater detail is explicitly requested
       - Use clear paragraph breaks for readability
       - Prioritize immediate concerns before addressing secondary issues
    
    4. **Information Delivery**
       - Extract only relevant information from {context} that directly addresses the question
       - Present information in accessible, non-technical language
       - Organize resource recommendations in order of relevance and accessibility
       - Provide links only when specifically requested, prefaced with clear descriptions
       - When information is unavailable, respond with: "I don't have that specific information right now, {first_name}. Would it be helpful if I focus on [alternative support option]?"
    
    5. **Safety and Ethics**
       - Prioritize user safety in all responses
       - Never generate speculative content about their specific situation
       - Avoid phrases that could minimize experiences or create pressure
       - Include gentle reminders about professional help when discussing serious issues
    
    Your response should balance emotional support with practical guidance, always centered on {first_name}'s expressed needs and current emotional state.
    """


    global rag_chain
    rag_chain = create_rag_chain(retriever, template, api_key)

    with gr.Blocks() as demo:
        # User registration section
        with gr.Column(visible=True, elem_id="registration_container") as registration_container:
            gr.Markdown("### Your privacy matters to us! Just share a nickname you feel comfy with to start chatting..")

            with gr.Row():
                first_name = gr.Textbox(
                    label="Nickname",
                    placeholder="Enter your Nickname You feel comfy",
                    scale=1,
                    elem_id="input_nickname"
                )

            with gr.Row():
                submit_btn = gr.Button("Start Chatting", variant="primary", scale=2)

            response_message = gr.Markdown()

        # Chatbot section (initially hidden)
        with gr.Column(visible=False, elem_id="chatbot_container") as chatbot_container:
            chat_interface = gr.ChatInterface(
                fn=rag_memory_stream,
                title="Chat with GBVR",
                fill_height=True
            )

            # Footer with version info
            gr.Markdown("Ijwi ry'Ubufasha Chatbot v1.0.0 Β© 2025")

        # Handle user registration
        submit_btn.click(
            collect_user_info,
            inputs=[first_name],
            outputs=[response_message, chatbot_container, registration_container, chat_interface.chatbot]
        )

    demo.css = """
    :root {
        --background: #f0f0f0;
        --text: #000000;
    }

    body, .gradio-container {
        margin: 0;
        padding: 0;
        width: 100vw;
        height: 100vh;
        display: flex;
        flex-direction: column;
        justify-content: center;
        align-items: center;
        background: var(--background);
        color: var(--text);
    }

    .gradio-container {
        max-width: 100%;
        max-height: 100%;
    }

    .gr-box {
        background: var(--background);
        color: var(--text);
        border-radius: 12px;
        padding: 2rem;
        border: 1px solid rgba(0, 0, 0, 0.1);
        box-shadow: 0 4px 6px rgba(0, 0, 0, 0.05);
    }

    .gr-button-primary {
        background: var(--background);
        color: var(--text);
        padding: 12px 24px;
        border-radius: 8px;
        transition: all 0.3s ease;
        border: 1px solid rgba(0, 0, 0, 0.1);
    }

    .gr-button-primary:hover {
        transform: translateY(-1px);
        box-shadow: 0 4px 12px rgba(0, 0, 0, 0.2);
    }

    footer {
        text-align: center;
        color: var(--text);
        opacity: 0.7;
        padding: 1rem;
        font-size: 0.9em;
    }

    .gr-markdown h3 {
        color: var(--text);
        margin-bottom: 1rem;
    }

    .registration-markdown, .chat-title h1 {
        color: var(--text);
    }
    """
    
    return demo

# Launch the interface
if __name__ == "__main__":
    chatbot_interface().launch(share=True)