Spaces:
Runtime error
Runtime error
File size: 5,844 Bytes
888b8af c83801c 888b8af 4e10583 888b8af f05ee1b 888b8af 50d69b4 888b8af |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 |
# Importing Necessary Libraries
import gradio as gr
from llama_index import download_loader, ServiceContext, VectorStoreIndex
from llama_index.embeddings import HuggingFaceEmbedding
from llama_index import Prompt
import torch
device = torch.device("cpu")
# Loading the Zephyr Model using Llama CPP
from llama_index.llms import LlamaCPP
llm = LlamaCPP(
model_url='https://huggingface.co/TheBloke/zephyr-7B-beta-GGUF/resolve/main/zephyr-7b-beta.Q5_K_M.gguf?download=true',
model_path=None,
temperature=0.5,
max_new_tokens=2000,
context_window=3900,
# set to at least 1 to use GPU
model_kwargs={"n_gpu_layers": 0},
verbose=True
)
# Loading Embedding Model
embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-base-en-v1.5")
# Defining custom Prompt
TEMPLATE_STR = (
'''You are an helpful and responsible AI assistant who is excited to help user and answer the question politely but will never harm humans or engage in the activity that causes harm to anyone. Use the given context below if useful.
{context}
<|user|>\n
{query_str}\n
<|assistant|>\n'''
)
QA_TEMPLATE = Prompt(TEMPLATE_STR)
# User Interface functions
def build_the_bot(file):
global service_context, index
if file is not None and file.name.endswith(".xlsx"):
# Loading Data
PandasExcelReader = download_loader("PandasExcelReader")
loader = PandasExcelReader(pandas_config={"header": 0})
documents = loader.load_data(file=file)
service_context = ServiceContext.from_defaults(
chunk_size=150,chunk_overlap=10,
llm=llm,embed_model=embed_model,
)
index = VectorStoreIndex.from_documents(documents, service_context=service_context,text_qa_template=QA_TEMPLATE)
return (gr.update(visible=True),gr.update(visible=True),gr.update(visible=True),gr.update(visible=True,)) #(4 gr.update because the outputs are 4 of upload.change)
else:
# Display a message if no file is uploaded
return (gr.Textbox(placeholder="Please upload an excel file, refresh the page to restart the app"),gr.update(visible=True),gr.update(visible=False),gr.update(visible=True))
def chat(user_input,history):
if user_input=="":
return "Please write your query so that I can assist you even better.",history
else:
global service_context, index
query_engine = index.as_query_engine(streaming=False)
bot_response = query_engine.query(user_input)
bot_response = str(bot_response)
history.append((user_input, bot_response))
return "", history
def clear_everything():
return (None, None, None)
"""# Adding themes in UI Interface
custom_theme = gr.themes.Monochrome()"""
# CSS
colors = ["#64A087", "green", "black"]
CSS = """
#question input {
font-size: 16px;
}
#app-title {
width: 100%;
margin: auto;
}
#url-textbox {
padding: 0 !important;
}
#short-upload-box .w-full {
min-height: 10rem !important;
}
#select-a-file {
display: block;
width: 100%;
}
#file-clear {
padding-top: 2px !important;
padding-bottom: 2px !important;
padding-left: 8px !important;
padding-right: 8px !important;
margin-top: 10px;
}
.gradio-container .gr-button-primary {
background: linear-gradient(180deg, #CDF9BE 0%, #AFF497 100%);
border: 1px solid #B0DCCC;
border-radius: 8px;
color: #1B8700;
}
.gradio-container.dark button#submit-button {
background: linear-gradient(180deg, #CDF9BE 0%, #AFF497 100%);
border: 1px solid #B0DCCC;
border-radius: 8px;
color: #1B8700
}
table.gr-samples-table tr td {
border: none;
outline: none;
}
table.gr-samples-table tr td:first-of-type {
width: 0%;
}
div#short-upload-box div.absolute {
display: none !important;
}
gradio-app > div > div > div > div.w-full > div, .gradio-app > div > div > div > div.w-full > div {
gap: 0px 2%;
}
gradio-app div div div div.w-full, .gradio-app div div div div.w-full {
gap: 0px;
}
gradio-app h2, .gradio-app h2 {
padding-top: 10px;
}
#answer {
overflow-y: scroll;
color: white;
background: #666;
border-color: #666;
font-size: 20px;
font-weight: bold;
}
#answer span {
color: white;
}
#answer textarea {
color:white;
background: #777;
border-color: #777;
font-size: 18px;
}
#url-error input {
color: red;
}
"""
# UI Design and Logic
with gr.Blocks(css=CSS,title="Marketing Email Generator") as demo:
gr.HTML("<h1 style='text-align: center;'>Marketing Email Generator</h1>")
gr.Markdown("Drop you Excel file here 👇 and ask your query about it!")
with gr.Row():
with gr.Column(scale=3):
upload = gr.File(label="Upload Your Excel File only", type="filepath")
with gr.Row():
clear_button = gr.Button("Clear", variant="secondary")
with gr.Column(scale=6):
chatbot = gr.Chatbot()
with gr.Row():
with gr.Column(scale=8):
question = gr.Textbox(
show_label=False,
placeholder="Type your query here after uploading the excel file...",
)
with gr.Column(scale=1, min_width=60):
submit_button = gr.Button("Ask me 🤖", variant="primary")
upload.change(fn=build_the_bot,
inputs=[upload],
outputs=[question,clear_button,submit_button,chatbot],
api_name="upload")
question.submit(chat, [question, chatbot], [question, chatbot])
submit_button.click(chat, [question, chatbot], [question, chatbot])
clear_button.click(fn=clear_everything,inputs=[],
outputs=[upload, question, chatbot],
api_name="clear")
if __name__ == "__main__":
demo.launch(share=True, debug=True) |