|
import streamlit as st |
|
import random |
|
import time |
|
import os |
|
from langchain_together import ChatTogether |
|
from langchain_text_splitters import RecursiveCharacterTextSplitter |
|
from langchain_community.document_loaders import TextLoader |
|
from langchain_core.prompts import ChatPromptTemplate |
|
from langchain_community.vectorstores import FAISS |
|
from langchain_core.output_parsers import StrOutputParser |
|
from langchain_core.runnables import RunnablePassthrough |
|
from langchain_together import TogetherEmbeddings |
|
|
|
os.environ["TOGETHER_API_KEY"] = "bafbab854ae828c3b90f675c45c8263e9404d278b5694909ea0855f437b9d1f3" |
|
|
|
|
|
loader = TextLoader("Resume_data.txt") |
|
documents = loader.load() |
|
|
|
|
|
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=0) |
|
docs = text_splitter.split_documents(documents) |
|
vectorstore = FAISS.from_documents(docs, |
|
TogetherEmbeddings(model="togethercomputer/m2-bert-80M-8k-retrieval") |
|
) |
|
|
|
retriever = vectorstore.as_retriever() |
|
print("assigning model") |
|
model = ChatTogether( |
|
|
|
model = "deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B", |
|
|
|
temperature=0.0, |
|
max_tokens=500,) |
|
|
|
prompt = ChatPromptTemplate([ |
|
("system", "You are an assistant for question-answering tasks. Use the following pieces of retrieved context to answer the question. If you don't know the answer, just say that you don't know. answer as if person is responding. and if user greets then greet back"), |
|
("user", "context : {context}, Question: {question}") |
|
]) |
|
|
|
chain = ( |
|
{"context": retriever, "question": RunnablePassthrough()} |
|
| prompt |
|
| model |
|
| StrOutputParser() |
|
) |
|
|
|
|
|
st.title("Chat with me") |
|
|
|
|
|
if "messages" not in st.session_state: |
|
st.session_state.messages = [] |
|
|
|
|
|
for message in st.session_state.messages: |
|
with st.chat_message(message["role"]): |
|
st.markdown(message["content"]) |
|
|
|
|
|
if prompt := st.chat_input("What is up?"): |
|
|
|
with st.chat_message("user"): |
|
st.markdown(prompt) |
|
|
|
st.session_state.messages.append({"role": "user", "content": prompt}) |
|
|
|
|
|
|
|
def response_generator(): |
|
query = f"{prompt}" |
|
if query != "None": |
|
for m in chain.stream(query): |
|
yield m |
|
time.sleep(0.05) |
|
else: |
|
yield "How can i help you?" |
|
|
|
|
|
|
|
with st.chat_message("assistant"): |
|
response = st.write_stream(response_generator()) |
|
|
|
st.session_state.messages.append({"role": "assistant", "content": response}) |