Spaces:
Runtime error
Runtime error
File size: 3,771 Bytes
f0fa217 7591ccd f0fa217 7591ccd f0fa217 c35d700 7591ccd f0fa217 7591ccd f0fa217 7591ccd f0fa217 7591ccd f0fa217 7591ccd 7010bf6 7591ccd f0fa217 7591ccd f0fa217 7591ccd f0fa217 7591ccd f0fa217 7591ccd f0fa217 7591ccd f0fa217 7591ccd f0fa217 7591ccd f0fa217 7591ccd f0fa217 7591ccd f0fa217 7591ccd f0fa217 b496893 7591ccd f0fa217 7591ccd f0fa217 7591ccd f0fa217 7591ccd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
import streamlit as st
from huggingface_hub import InferenceClient
import fitz # PyMuPDF
import os
st.title("ChatGPT-like Chatbot")
base_url = "https://api-inference.huggingface.co/models/"
API_KEY = os.environ.get('HUGGINGFACE_API_KEY')
headers = {"Authorization": "Bearer " + str(API_KEY)}
model_links = {
"Mistral-7B": base_url + "mistralai/Mistral-7B-Instruct-v0.2"
}
model_info = {
"Mistral-7B": {
#'description': "Good Model",
#'logo': 'model.jpg'
}
}
def format_prompt(message, custom_instructions=None):
prompt = ""
if custom_instructions:
prompt += f"[INST] {custom_instructions} [/INST]"
prompt += f"[INST] {message} [/INST]"
return prompt
def reset_conversation():
st.session_state.conversation = []
st.session_state.messages = []
return None
def read_pdf(file):
pdf_document = fitz.open(file)
text = ""
for page_num in range(len(pdf_document)):
page = pdf_document[page_num]
text += page.get_text()
return text
models = [key for key in model_links.keys()]
# Create the sidebar with the dropdown for model selection
selected_model = st.sidebar.selectbox("Select Model", models)
# Create a temperature slider
temp_values = st.sidebar.slider('Select a temperature value', 0.0, 1.0, (0.5))
# Add reset button to clear conversation
st.sidebar.button('Reset Chat', on_click=reset_conversation) # Reset button
# Create model description
st.sidebar.write(f"You're now chatting with {selected_model}")
#st.sidebar.markdown(model_info[selected_model]['description'])
#st.sidebar.image(model_info[selected_model]['logo'])
st.sidebar.markdown("Generated content may be inaccurate or false.")
st.sidebar.markdown("\nLearn how to build this chatbot here.")
if "prev_option" not in st.session_state:
st.session_state.prev_option = selected_model
if st.session_state.prev_option != selected_model:
st.session_state.messages = []
st.session_state.prev_option = selected_model
reset_conversation()
# Pull in the model we want to use
repo_id = model_links[selected_model]
st.subheader(f'AI - {selected_model}')
st.title(f'ChatBot Using {selected_model}')
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Upload PDF
uploaded_file = st.file_uploader("Choose a PDF file", type="pdf")
if uploaded_file is not None:
pdf_text = read_pdf(uploaded_file)
st.session_state.pdf_text = pdf_text
st.write("PDF content loaded successfully!")
# Accept user input
if prompt := st.chat_input(f"Hi I'm {selected_model}, ask me a question"):
custom_instruction = "Act like a Human in conversation"
if "pdf_text" in st.session_state:
prompt = f"{st.session_state.pdf_text}\n\nQuestion: {prompt}"
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
formated_text = format_prompt(prompt, custom_instruction)
# Display assistant response in chat message container
with st.chat_message("assistant"):
client = InferenceClient(
model=model_links[selected_model],
headers=headers)
output = client.text_generation(
formated_text,
temperature=temp_values, # 0.5
max_new_tokens=3000,
stream=True
)
response = st.write_stream(output)
st.session_state.messages.append({"role": "assistant", "content": response})
|