File size: 4,196 Bytes
b4ceb72
0c80716
b4ceb72
 
 
7613467
c3599b6
b4ceb72
bd4819a
 
 
 
 
7613467
c3599b6
b4ceb72
85bca36
23a30f1
85bca36
23a30f1
7613467
c3599b6
bd4819a
b4ceb72
 
c3599b6
 
 
 
 
b4ceb72
 
7613467
c3599b6
951ef0a
0305332
951ef0a
 
 
 
 
 
 
 
 
 
 
0305332
951ef0a
0305332
 
951ef0a
bd4819a
0305332
 
951ef0a
 
0305332
951ef0a
b4ceb72
0305332
c3599b6
0305332
951ef0a
b4ceb72
0305332
c3599b6
 
0305332
c3599b6
0305332
c3599b6
 
0305332
d99d01c
0305332
d99d01c
 
0305332
 
d99d01c
 
0305332
 
d99d01c
 
0305332
 
d99d01c
 
0305332
951ef0a
dc0224a
bd5560a
dc0224a
 
0305332
951ef0a
 
 
 
 
 
7613467
b4ceb72
c3599b6
b4ceb72
29f9dea
951ef0a
29f9dea
 
c3599b6
29f9dea
bd5560a
c3599b6
bd5560a
951ef0a
bd5560a
951ef0a
bd5560a
951ef0a
 
 
 
bd5560a
e672180
bd5560a
 
 
 
 
 
 
c3599b6
bd4819a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
import os
import gradio as gr
from huggingface_hub import login
from transformers import AutoModelForSeq2SeqLM, T5Tokenizer
from peft import PeftModel, PeftConfig

# Hugging Face login
token = os.environ.get("token")
if token:
    login(token)
    print("Login is successful")
else:
    print("Token not found. Please set your token in the environment variables.")

# Model and tokenizer setup
MODEL_NAME = "google/flan-t5-base"
tokenizer = T5Tokenizer.from_pretrained(MODEL_NAME, use_auth_token=token)
config = PeftConfig.from_pretrained("Komal-patra/results")
base_model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME)
model = PeftModel.from_pretrained(base_model, "Komal-patra/results")

# Text generation function
def generate_text(prompt, max_length=150):
    inputs = tokenizer(prompt, return_tensors="pt")
    outputs = model.generate(
        input_ids=inputs["input_ids"],
        max_length=max_length,
        num_beams=1,
        repetition_penalty=2.2
    )
    generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
    return generated_text

# Custom CSS for the UI
background_image_path = '/mnt/data/image.png'
custom_css = f"""
/* Chatbot container */
.gradio-container {{
    color: white; /* Light text color */
    background-image: url('{background_image_path}'); /* Add background image */
    background-size: cover; /* Cover the entire container */
    background-position: center; /* Center the image */
    background-repeat: no-repeat; /* Do not repeat the image */
    background-color: rgba(0, 0, 0, 0.5); /* Semi-transparent dark background */
}}

/* Transparent message bubbles */
.message.pending {{
    background: rgba(168, 196, 214, 0.1);
}}
.message.bot.svelte-1s78gfg.message-bubble-border {{
    border-color: rgba(38, 107, 153, 0.1);
    background: rgba(255, 255, 255, 0.1);
}}
.message.user.svelte-1s78gfg.message-bubble-border {{
    background: rgba(157, 221, 249, 0.1);
    border-color: rgba(157, 221, 249, 0.1);
}}   

/* For both user and response message as per the document */
span.md.svelte-8tpqd2.chatbot.prose p {{
    color: #266B99;
}}

/* RED (Hex: #DB1616) for action buttons and links only */
.clear-btn {{
    background: #DB1616;
    color: white;
}}
/* Primary colors are set to be used for all sorts */
.submit-btn {{
    background: #266B99;
    color: white;
}}
/* Add icons to messages */
.message.user.svelte-1s78gfg {{
    display: flex;
    align-items: center;
}}
.message.user.svelte-1s78gfg:before {{
    content: url('file=Komal-patra/EU_AI_ACT/user icon.jpeg');
    margin-right: 8px;
}}
.message.bot.svelte-1s78gfg {{
    display: flex;
    align-items: center;
}}
.message.bot.svelte-1s78gfg:before {{
    content: url('file=Komal-patra/EU_AI_ACT/orcawise image.png');
    margin-right: 8px;
}}

/* Enable scrolling for the chatbot messages */
.chatbot .messages {{
    max-height: 500px;  /* Adjust as needed */
    overflow-y: auto;
}}

/* Transparent overall message container */
.gradio-chatbot .wrap {{
    background: transparent; /* Make the container background transparent */
    border: none; /* Remove any borders if necessary */
}}
"""

# Gradio interface setup
with gr.Blocks(css=custom_css) as demo:
    chatbot = gr.Chatbot()
    msg = gr.Textbox(placeholder="Ask your question...", show_label=False)
    submit_button = gr.Button("Submit", elem_classes="submit-btn")
    clear = gr.Button("Clear", elem_classes="clear-btn")

    def user(user_message, history):
        return "", history + [[user_message, None]]

    def bot(history):
        if len(history) == 1:
            bot_message = "Hi there! How can I help you today?"
            history[-1][1] = bot_message
        else:
            history[-1][1] = ""
            previous_message = history[-1][0]
            bot_message = generate_text(previous_message)
            history[-1][1] = bot_message
        return history

    submit_button.click(user, [msg, chatbot], [msg, chatbot], queue=False).then(
        bot, chatbot, chatbot
    )
    msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
        bot, chatbot, chatbot
    )
    clear.click(lambda: None, None, chatbot, queue=False)

demo.launch()