update app.py
Browse files
app.py
CHANGED
@@ -12,7 +12,7 @@ from langchain.retrievers.document_compressors import LLMChainExtractor
|
|
12 |
from langchain.embeddings import HuggingFaceBgeEmbeddings
|
13 |
from langchain.llms import HuggingFacePipeline
|
14 |
from langchain.vectorstores import Chroma
|
15 |
-
|
16 |
|
17 |
|
18 |
@st.cache_resource(show_spinner=False)
|
@@ -258,49 +258,10 @@ if __name__=="__main__":
|
|
258 |
bar.progress(0.5, "Loading Model. Please wait.")
|
259 |
model, tokenizer = load_model(model_name)
|
260 |
bar.progress(0.9, "Ready to ask? Go ahead and type your question.")
|
261 |
-
time.sleep(2)
|
262 |
-
bar.empty()
|
263 |
-
|
264 |
-
all_templates = { "llama_prompt_template" : """<s>[INST]\n<<SYS>>\nYou are a stoic teacher that provide guidance and advice inspired by Stoic philosophy on navigating life's challenges with resilience and inner peace. Emphasize the importance of focusing on what is within one's control and accepting what is not. Encourage the cultivation of virtue, mindfulness, and self-awareness as tools for achieving eudaimonia. Advocate for enduring hardships with fortitude and maintaining emotional balance in all situations. Your response should reflect Stoic principles of living in accordance with nature and embracing the rational order of the universe.
|
265 |
-
You should guide the reader towards a fulfilling life focused on virtue rather than external things because living in accordance with virtue leads to eudaimonia or flourishing.\n\n
|
266 |
-
Give a precise answer to the question based on the context. Don't be verbose.\n\n
|
267 |
-
context:
|
268 |
-
{context}\n<</SYS>>\n\n
|
269 |
-
question:
|
270 |
-
{user_query}
|
271 |
-
[/INST]""",
|
272 |
-
|
273 |
-
"llama_rag_template" :"""<s>[INST]\n<<SYS>>\nGiven the following question and context, summarize the parts that are relevant to answer the question. If none of the context is relevant return NO_OUTPUT.\n\n>
|
274 |
-
- Do not mention quotes.\n\n
|
275 |
-
- Reply using a single sentence.\n\n
|
276 |
-
> Context:\n
|
277 |
-
>>>\n{context}\n>>>\n<</SYS>>\n\n
|
278 |
-
Question: {question}\n
|
279 |
-
[/INST]
|
280 |
-
The relevant parts of the context are:
|
281 |
-
""",
|
282 |
-
|
283 |
-
"prompt_template":"""You are a stoic teacher that provide guidance and advice inspired by Stoic philosophy on navigating life's challenges with resilience and inner peace. Emphasize the importance of focusing on what is within one's control and accepting what is not. Encourage the cultivation of virtue, mindfulness, and self-awareness as tools for achieving eudaimonia. Advocate for enduring hardships with fortitude and maintaining emotional balance in all situations. Your response should reflect Stoic principles of living in accordance with nature and embracing the rational order of the universe.
|
284 |
-
You should guide the reader towards a fulfilling life focused on virtue rather than external things because living in accordance with virtue leads to eudaimonia or flourishing.
|
285 |
-
context:
|
286 |
-
{context}
|
287 |
-
|
288 |
-
question:
|
289 |
-
{user_query}
|
290 |
-
|
291 |
-
Answer:
|
292 |
-
""",
|
293 |
-
"rag_prompt" : """Given the following question and context, summarize the parts that are relevant to answer the question. If none of the context is relevant return NO_OUTPUT.\n\n>
|
294 |
-
- Do not mention quotes.\n\n>
|
295 |
-
- Reply using a single sentence.\n\n>
|
296 |
-
|
297 |
-
Question: {question}\n> Context:\n>>>\n{context}\n>>>\nRelevant parts"""}
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
|
302 |
# streamlit chat
|
303 |
-
|
304 |
user_question = st.chat_input('What do you want to ask ..')
|
305 |
|
306 |
if user_question is not None and user_question!="":
|
|
|
12 |
from langchain.embeddings import HuggingFaceBgeEmbeddings
|
13 |
from langchain.llms import HuggingFacePipeline
|
14 |
from langchain.vectorstores import Chroma
|
15 |
+
from templates import all_templates
|
16 |
|
17 |
|
18 |
@st.cache_resource(show_spinner=False)
|
|
|
258 |
bar.progress(0.5, "Loading Model. Please wait.")
|
259 |
model, tokenizer = load_model(model_name)
|
260 |
bar.progress(0.9, "Ready to ask? Go ahead and type your question.")
|
261 |
+
#time.sleep(2)
|
262 |
+
#bar.empty()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
263 |
|
264 |
# streamlit chat
|
|
|
265 |
user_question = st.chat_input('What do you want to ask ..')
|
266 |
|
267 |
if user_question is not None and user_question!="":
|