Spaces:
Sleeping
Sleeping
File size: 6,564 Bytes
24de7c1 92ba087 1f112e1 83335e9 92ba087 24de7c1 3a8035c 92ba087 c93e7fa 24de7c1 83335e9 532a9fd 99a4ea4 60afc29 6e797dc 0d97318 24de7c1 0d97318 92ba087 0d97318 92ba087 0d97318 92ba087 0d97318 c93e7fa 0d97318 92ba087 c93e7fa 92ba087 0d97318 0f27244 e29d248 f707c12 c93e7fa 0f27244 c93e7fa 0f27244 e29d248 0f27244 84a3a3a 0f27244 a70d919 6e797dc 92ba087 0f27244 bc56fe8 92ba087 0f27244 92ba087 013aa35 0f27244 92ba087 6e797dc 5de2c92 1d01cd7 fb28392 99a4ea4 83335e9 5de2c92 24de7c1 c10f0c7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 |
from omegaconf import OmegaConf
from query import VectaraQuery
import os
import streamlit as st
from streamlit_pills import pills
from streamlit_feedback import streamlit_feedback
from langdetect import detect_langs
from langdetect import DetectorFactory
from langcodes import Language
from PIL import Image
max_examples = 4
languages = {'English': 'en', 'Spanish': 'es', 'French': 'fr', 'German': 'de', 'Arabic': 'ar', 'Chinese': 'zh-cn',
'Hebrew': 'he', 'Hindi': 'hi', 'Italian': 'it', 'Japanese': 'ja', 'Korean': 'ko', 'Portuguese': 'pt'}
def isTrue(x) -> bool:
if isinstance(x, bool):
return x
return x.strip().lower() == 'true'
def identifyLanguage(response):
lang_code = detect_langs(response)[0].lang
return Language.make(language=lang_code).display_name()
def thumbs_feedback(feedback, **kwargs):
print(f'Debug: Feedback Received {feedback["score"]} FROM user question {kwargs.get("prompt", "No user input")} AND chat response {kwargs.get("response", "No chat response")}. Detected response language {kwargs.get("language", "unknown")}')
# print(f'Debug: Feedback Received {feedback["score"]} FROM user question {kwargs.get("prompt", "No user input")} AND chat response {kwargs.get("response", "No chat response")}')
st.session_state.feedback_key += 1
if "feedback_key" not in st.session_state:
st.session_state.feedback_key = 0
def launch_bot():
def generate_response(question):
response = vq.submit_query(question)
return response
def generate_streaming_response(question):
response = vq.submit_query_streaming(question)
return response
def show_example_questions():
if len(st.session_state.example_messages) > 0 and st.session_state.first_turn:
selected_example = pills("Queries to Try:", st.session_state.example_messages, index=None)
if selected_example:
st.session_state.ex_prompt = selected_example
st.session_state.first_turn = False
return True
return False
if 'cfg' not in st.session_state:
corpus_keys = str(os.environ['corpus_keys']).split(',')
cfg = OmegaConf.create({
'corpus_keys': corpus_keys,
'api_key': str(os.environ['api_key']),
'title': os.environ['title'],
'source_data_desc': os.environ['source_data_desc'],
'streaming': isTrue(os.environ.get('streaming', False)),
'prompt_name': os.environ.get('prompt_name', None),
'examples': os.environ.get('examples', None),
'language': 'English'
})
st.session_state.cfg = cfg
st.session_state.ex_prompt = None
st.session_state.first_turn = True
st.session_state.language = cfg.language
example_messages = [example.strip() for example in cfg.examples.split(",")]
st.session_state.example_messages = [em for em in example_messages if len(em)>0][:max_examples]
st.session_state.vq = VectaraQuery(cfg.api_key, cfg.corpus_keys, cfg.prompt_name)
cfg = st.session_state.cfg
vq = st.session_state.vq
st.set_page_config(page_title=cfg.title, layout="wide")
# left side content
with st.sidebar:
image = Image.open('Vectara-logo.png')
st.image(image, width=175)
st.markdown(f"## About\n\n"
f"This demo uses Retrieval Augmented Generation to ask questions about {cfg.source_data_desc}\n")
cfg.language = st.selectbox('Language:', languages.keys())
if st.session_state.language != cfg.language:
st.session_state.language = cfg.language
st.rerun()
st.markdown("\n")
st.markdown("---")
st.markdown(
"## How this works?\n"
"This app was built with [Vectara](https://vectara.com).\n"
"Vectara's [Indexing API](https://docs.vectara.com/docs/api-reference/indexing-apis/indexing) was used to ingest the data into a Vectara corpus (or index).\n\n"
"This app uses Vectara [Chat API](https://docs.vectara.com/docs/console-ui/vectara-chat-overview) to query the corpus and present the results to you, answering your question.\n\n"
)
st.markdown("---")
st.markdown(f"<center> <h2> Vectara AI Assistant: {cfg.title} </h2> </center>", unsafe_allow_html=True)
if "messages" not in st.session_state.keys():
st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
example_container = st.empty()
with example_container:
if show_example_questions():
example_container.empty()
st.rerun()
# select prompt from example question or user provided input
if st.session_state.ex_prompt:
prompt = st.session_state.ex_prompt
else:
prompt = st.chat_input()
if prompt:
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.write(prompt)
st.session_state.ex_prompt = None
# Generate a new response if last message is not from assistant
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
if cfg.streaming:
stream = generate_streaming_response(prompt)
response = st.write_stream(stream)
else:
with st.spinner("Thinking..."):
response = generate_response(prompt)
st.write(response)
message = {"role": "assistant", "content": response}
st.session_state.messages.append(message)
st.rerun()
if (st.session_state.messages[-1]["role"] == "assistant") & (st.session_state.messages[-1]["content"] != "How may I help you?"):
streamlit_feedback(feedback_type="thumbs", on_submit = thumbs_feedback, key = st.session_state.feedback_key,
kwargs = {"prompt": st.session_state.messages[-2]["content"],
"response": st.session_state.messages[-1]["content"],
"language": identifyLanguage(st.session_state.messages[-1]["content"])})
if __name__ == "__main__":
launch_bot() |