Spaces:
Sleeping
Sleeping
File size: 3,738 Bytes
b4ceb72 0c80716 b4ceb72 7613467 c3599b6 b4ceb72 667ab64 7613467 c3599b6 b4ceb72 85bca36 23a30f1 85bca36 23a30f1 7613467 c3599b6 f95e467 b4ceb72 c3599b6 b4ceb72 7613467 c3599b6 69b124f 0305332 c96430c 0305332 b4ceb72 0305332 c96430c 0305332 b4ceb72 0305332 c96430c 0305332 b4ceb72 0305332 c3599b6 0305332 c3599b6 0305332 c3599b6 0305332 b4ceb72 0305332 c3599b6 0305332 c3599b6 0305332 c3599b6 0305332 d99d01c 0305332 d99d01c 0305332 d99d01c 0305332 d99d01c 0305332 d99d01c 0305332 dc0224a b9acf04 dc0224a 0305332 b9acf04 7613467 b4ceb72 c3599b6 b4ceb72 29f9dea c3599b6 29f9dea b9acf04 c3599b6 b9acf04 e672180 b9acf04 c3599b6 b9acf04 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 |
import os
import gradio as gr
from huggingface_hub import login
from transformers import AutoModelForSeq2SeqLM, T5Tokenizer
from peft import PeftModel, PeftConfig
# Hugging Face login
token = os.environ.get("token")
login(token)
print("login is succesful")
max_length=512
# Model and tokenizer setup
MODEL_NAME = "google/flan-t5-base"
tokenizer = T5Tokenizer.from_pretrained(MODEL_NAME, use_auth_token=token)
config = PeftConfig.from_pretrained("Komal-patra/results")
base_model = AutoModelForSeq2SeqLM.from_pretrained(MODEL_NAME)
model = PeftModel.from_pretrained(base_model, "Komal-patra/results")
# Text generation function
def generate_text(prompt, max_length=512):
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(
input_ids=inputs["input_ids"],
max_length=max_length,
num_beams=1,
repetition_penalty=2.2
)
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
return generated_text
# Custom CSS for the UI
background_image_path = 'https://www.shlegal-technology.com/sites/default/files/insight/ExploringTheLegislativeBackgroundBANNER.jpg'
custom_css = f"""
.message.pending {{
background: #A8C4D6;
}}
/* Response message */
.message.bot.svelte-1s78gfg.message-bubble-border {{
border-color: #266B99;
}}
/* User message */
.message.user.svelte-1s78gfg.message-bubble-border {{
background: #9DDDF9;
border-color: #9DDDF9;
}}
/* For both user and response message as per the document */
span.md.svelte-8tpqd2.chatbot.prose p {{
color: #266B99;
}}
/* Chatbot container */
.gradio-container {{
background: #1c1c1c; /* Dark background */
color: white; /* Light text color */
background-image: url('{background_image_path}'); /* Add background image */
background-size: cover; /* Cover the entire container */
background-position: center; /* Center the image */
background-repeat: no-repeat; /* Do not repeat the image */
}}
/* RED (Hex: #DB1616) for action buttons and links only */
.clear-btn {{
background: #DB1616;
color: white;
}}
/* Primary colors are set to be used for all sorts */
.submit-btn {{
background: #266B99;
color: white;
}}
/* Add icons to messages */
.message.user.svelte-1s78gfg {{
display: flex;
align-items: center;
}}
.message.user.svelte-1s78gfg:before {{
content: url('file=Komal-patra/EU_AI_ACT/user icon.jpeg');
margin-right: 8px;
}}
.message.bot.svelte-1s78gfg {{
display: flex;
align-items: center;
}}
.message.bot.svelte-1s78gfg:before {{
content: url('file=Komal-patra/EU_AI_ACT/orcawise image.png');
margin-right: 8px;
}}
/* Enable scrolling for the chatbot messages */
.chatbot.messages {{
max-height: 500px; /* Adjust as needed */
overflow-y: auto;
}}
/* Add transparency to chatbox */
.chatbot {{
background-color: rgba(255, 255, 255, 0.5); /* 50% transparent white background */
border: none;
box-shadow: none;
}}
"""
# Gradio interface setup
with gr.Blocks(css=custom_css) as demo:
gr.Markdown("<h1>Ask a question about the EU AI Act</h1>")
chatbot = gr.Chatbot()
msg = gr.Textbox(placeholder="Ask your question...", show_label=False) # Add placeholder text
submit_button = gr.Button("Submit", elem_classes="submit-btn")
clear = gr.Button("Clear", elem_classes="clear-btn")
# Function to handle user input
def user(user_message, history):
response = generate_text(user_message)
return [user_message, response]
# Event listener for submit button
submit_button.click(fn=user, inputs=[msg, chatbot], outputs=[chatbot, msg])
# Event listener for clear button
clear.click(fn=lambda: "", inputs=None, outputs=msg)
demo.launch() |