File size: 12,163 Bytes
1893b91 ca164c1 1893b91 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 |
import os
import time
import base64
import logging
import torch
import streamlit as st
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.llms import HuggingFacePipeline
from langchain.retrievers import ContextualCompressionRetriever
from langchain.retrievers.document_compressors import LLMChainExtractor
from langchain.embeddings import HuggingFaceBgeEmbeddings
from langchain.llms import HuggingFacePipeline
from langchain.vectorstores import Chroma
@st.cache_resource
def load_model(model_name, logger, ):
logger.info("Loading model ..")
start_time = time.time()
if model_name=='llama':
from langchain.llms import CTransformers
model = CTransformers(model="TheBloke/Llama-2-7B-Chat-GGML", model_file = 'llama-2-7b-chat.ggmlv3.q2_K.bin',
model_type='llama', gpu_layers=0, config={"context_length":2048,})
tokenizer = None
elif model_name=='mistral':
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
model_id="filipealmeida/Mistral-7B-Instruct-v0.1-sharded"
quant_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_use_double_quant=True,
bnb_4bit_compute_dtype=torch.bfloat16)
model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True, quantization_config=quant_config, device_map="auto")
tokenizer = AutoTokenizer.from_pretrained(model_id, trust_remote_code=True)
tokenizer.pad_token = tokenizer.eos_token
logger.info(f"Model Loading Time : {time.time() - start_time} .")
return model, tokenizer
@st.cache_resource
def load_db(logger, device, local_embed=False, CHROMA_PATH = './ChromaDB'):
"""
Load vector embeddings and Chroma database
"""
encode_kwargs = {'normalize_embeddings': True}
embed_id = "BAAI/bge-large-en-v1.5"
start_time = time.time()
#TODO : LOOK INTO LOADING ONLY A SINGLE FILE FROM HF REPO TO REDUCE MEMORY
if local_embed:
from transformers import AutoModel
PATH_TO_EMBEDDING_FOLDER = ""
# TODO : load only pytorch bin file
embeddings = AutoModel.from_pretrained(PATH_TO_EMBEDDING_FOLDER, trust_remote_code=True)
embeddings = HuggingFaceBgeEmbeddings(model_name="whatever-model-you-are-using", model_kwargs={"trust_remote_code":True})
logger.info('Loading embeddings locally.')
# Test the local embeddings
embed = embeddings.get_text_embedding("Hello World!")
print(len(embed))
print(embed[:5])
else:
embeddings = HuggingFaceBgeEmbeddings(model_name=embed_id , model_kwargs={"device": device}, encode_kwargs=encode_kwargs)
logger.info('Loading embeddings from Hub.')
db = Chroma(persist_directory=CHROMA_PATH, embedding_function=embeddings)
logger.info(f"Vector Embeddings and Chroma Database Loading Time : {time.time() - start_time} .")
return db
def wrap_model(model, tokenizer):
"""wrap transformers pipeline with HuggingFacePipeline
"""
text_generation_pipeline = pipeline(
model=model,
tokenizer=tokenizer,
task="text-generation",
temperature=0.2,
repetition_penalty=1.1,
#return_full_text=True,
max_new_tokens=1000,
pad_token_id=2,
do_sample=True)
HF_pipeline = HuggingFacePipeline(pipeline=text_generation_pipeline)
return HF_pipeline
def fetch_context(db, model, query, logger, template, use_compressor=True):
"""
Perform similarity search and retrieve related context to query.
I have stored large documents in db so I can apply compressor on the set of retrived documents to
make sure that returned compressed context is relevant to the query.
"""
if use_compressor:
if model_name=='llama':
compressor = LLMChainExtractor.from_llm(model)
compressor.llm_chain.prompt.template = template['llama_rag_template']
elif model_name=='mistral':
HF_pipeline_model = wrap_model(model)
global HF_pipeline_model
compressor = LLMChainExtractor.from_llm(HF_pipeline_model)
compressor.llm_chain.prompt.template = template['rag_template']
retriever = db.as_retriever(search_type = "mmr")
compression_retriever = ContextualCompressionRetriever(base_compressor=compressor,
base_retriever=retriever)
logger.info(f"User Query : {query}")
compressed_docs = compression_retriever.get_relevant_documents(query)
logger.info(f"Retrieved Compressed Docs : {compressed_docs}")
return compressed_docs
docs = db.max_marginal_relevance_search(query)
logger.info(f"Retrieved Docs : {docs}")
return docs
def format_context(docs):
"""
clean and format chunks into documents to pass as context
"""
cleaned_docs = [doc for doc in docs if ">>>" not in doc.page_content]
return "\n\n".join(doc.page_content for doc in cleaned_docs)
def llm_chain_with_context(model, model_name, query, context, template, logger):
"""
Run simple chain with formatted prompt including query and retrieved context and the underlying model to generate a response.
"""
formated_context = format_context(context)
# Give a precise answer to the question based on the context. Don't be verbose.
if model_name=='llama':
prompt_template = PromptTemplate(input_variables=['context', 'user_query'], template = template['llama_prompt_template'])
llm_chain = LLMChain(llm=model, prompt=prompt_template)
elif model_name=='mistral':
prompt_template = PromptTemplate(input_variables=['context', 'user_query'], template = template['prompt_template'])
llm_chain = LLMChain(llm=HF_pipeline_model, prompt=prompt_template)
output = llm_chain.predict(user_query=query, context=formated_context)
return output
def generate_response(query, model, template, logger):
start_time = time.time()
progress_text = "Loading model. Please wait."
my_bar = st.progress(0, text=progress_text)
context = fetch_context(db, model, model_name, query, template, logger)
# fill those as appropriate
my_bar.progress(0.1, "Loading Database. Please wait.")
my_bar.progress(0.3, "Loading Model. Please wait.")
my_bar.progress(0.5, "Running RAG. Please wait.")
my_bar.progress(0.7, "Generating Answer. Please wait.")
response = llm_chain_with_context(model, model_name, query, context, template, logger)
logger.info(f"Total Execution Time: {time.time() - start_time}")
my_bar.progress(0.9, "Post Processing. Please wait.")
my_bar.progress(1.0, "Done")
time. sleep(1)
my_bar.empty()
return response
# show background image
def convert_to_base64(bin_file):
with open(bin_file, 'rb') as f:
data = f.read()
return base64.b64encode(data).decode()
def set_as_background_img(png_file):
bin_str = convert_to_base64(png_file)
background_img = '''
<link href='https://fonts.googleapis.com/css?family=Libre Baskerville' rel='stylesheet'>
<style>
.stApp {
background-image: url("data:image/png;base64,%s");
background-size: cover;
background-repeat: no-repeat;
background-attachment: scroll;
}
</style>
''' % bin_str
st.markdown(background_img, unsafe_allow_html=True)
return
if __name__=="__main__":
st.set_page_config(page_title='StoicCyber', page_icon="🏛️", layout="centered", initial_sidebar_state="collapsed")
set_as_background_img('pxfuel.jpg')
# header
original_title = '<h1 style="font-family: Libre Baskerville; color:#faf8f8; font-size: 30px; text-align: left; ">STOIC Ω CYBER</h1>'
st.markdown(original_title, unsafe_allow_html=True)
user_question = st.chat_input('What do you want to ask ..')
# hide footer and header
hide_st_style = """
<style>
header {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_st_style, unsafe_allow_html=True)
# set logger
logger = logging.getLogger(__name__)
logging.basicConfig(
filename="app.log",
filemode="a",
format="%(asctime)s.%(msecs)03d %(levelname)s [%(funcName)s] %(message)s",
level=logging.INFO,
datefmt="%Y-%m-%d %H:%M:%S",)
# model to use in spaces depends on the available device
device = "cuda" if torch.cuda.is_available() else "cpu"
model_name = "llama" if device=="cpu" else "mistral"
logger.info(f'Running {model_name} model for inference on {device}')
all_templates = { "llama_prompt_template" : """<s>[INST]\n<<SYS>>\nYou are a stoic teacher that provide guidance and advice inspired by Stoic philosophy on navigating life's challenges with resilience and inner peace. Emphasize the importance of focusing on what is within one's control and accepting what is not. Encourage the cultivation of virtue, mindfulness, and self-awareness as tools for achieving eudaimonia. Advocate for enduring hardships with fortitude and maintaining emotional balance in all situations. Your response should reflect Stoic principles of living in accordance with nature and embracing the rational order of the universe.
You should guide the reader towards a fulfilling life focused on virtue rather than external things because living in accordance with virtue leads to eudaimonia or flourishing.
context:
{context}\n<</SYS>>\n\n
question:
{user_query}
[/INST]""",
"llmaa_rag_prompt" :"""<s>[INST]\n<<SYS>>\nGiven the following question and context, summarize the parts that are relevant to answer the question. If none of the context is relevant return NO_OUTPUT.\n\n>
- Do not mention quotes.\n\n
- Reply using a single sentence.\n\n
> Context:\n
>>>\n{context}\n>>>\n<</SYS>>\n\n
Question: {question}\n
[/INST]
The relevant parts of the context are:
""",
"prompt_template":"""You are a stoic teacher that provide guidance and advice inspired by Stoic philosophy on navigating life's challenges with resilience and inner peace. Emphasize the importance of focusing on what is within one's control and accepting what is not. Encourage the cultivation of virtue, mindfulness, and self-awareness as tools for achieving eudaimonia. Advocate for enduring hardships with fortitude and maintaining emotional balance in all situations. Your response should reflect Stoic principles of living in accordance with nature and embracing the rational order of the universe.
You should guide the reader towards a fulfilling life focused on virtue rather than external things because living in accordance with virtue leads to eudaimonia or flourishing.
context:
{context}
question:
{user_query}
Answer:
""",
"rag_prompt" : """Given the following question and context, summarize the parts that are relevant to answer the question. If none of the context is relevant return NO_OUTPUT.\n\n>
- Do not mention quotes.\n\n>
- Reply using a single sentence.\n\n>
Question: {question}\n> Context:\n>>>\n{context}\n>>>\nRelevant parts"""}
db = load_db(logger, device)
model, tokenizer = load_model(model_name, logger)
# streamlit chat
if user_question is not None and user_question!="":
with st.chat_message("Human", avatar="🧔🏻"):
st.write(user_question)
response = generate_response(user_question, model, all_templates, logger)
with st.chat_message("AI", avatar="🏛️"):
st.write(response)
|