File size: 4,768 Bytes
24de7c1
 
 
 
 
92ba087
 
24de7c1
3a8035c
92ba087
24de7c1
 
 
 
 
 
0d97318
 
 
 
24de7c1
0d97318
 
 
92ba087
 
 
 
 
 
 
 
 
0d97318
 
92ba087
0d97318
92ba087
0d97318
 
 
 
 
92ba087
0d97318
 
92ba087
 
 
 
 
 
0d97318
 
 
 
 
0f27244
 
 
e29d248
f707c12
0f27244
 
 
 
 
 
 
 
 
 
e29d248
0f27244
84a3a3a
0f27244
 
 
92ba087
0f27244
 
 
 
 
1bd439f
 
 
 
bc56fe8
 
 
 
 
 
92ba087
 
 
 
 
 
0f27244
 
 
92ba087
 
0f27244
 
 
 
 
 
 
 
 
 
 
 
92ba087
24de7c1
 
c10f0c7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
from omegaconf import OmegaConf
from query import VectaraQuery
import os

import streamlit as st
from streamlit_pills import pills

from PIL import Image

max_examples = 4

def isTrue(x) -> bool:
    if isinstance(x, bool):
        return x
    return x.strip().lower() == 'true'

def launch_bot():
    def generate_response(question):
        response = vq.submit_query(question)
        return response
    
    def generate_streaming_response(question):
        response = vq.submit_query_streaming(question)
        return response
    
    def show_example_questions():        
        if len(st.session_state.example_messages) > 0 and st.session_state.first_turn:            
            selected_example = pills("Queries to Try:", st.session_state.example_messages, index=None)
            if selected_example:
                st.session_state.ex_prompt = selected_example
                st.session_state.first_turn = False
                return True
        return False

    if 'cfg' not in st.session_state:
        corpus_keys = str(os.environ['corpus_keys']).split(',')
        cfg = OmegaConf.create({
            'corpus_keys': corpus_keys,
            'api_key': str(os.environ['api_key']),
            'title': os.environ['title'],
            'source_data_desc': os.environ['source_data_desc'],
            'streaming': isTrue(os.environ.get('streaming', False)),
            'prompt_name': os.environ.get('prompt_name', None),
            'examples': os.environ.get('examples', None)
        })
        st.session_state.cfg = cfg
        st.session_state.ex_prompt = None
        st.session_state.first_turn = True        
        example_messages = [example.strip() for example in cfg.examples.split(",")]
        st.session_state.example_messages = [em for em in example_messages if len(em)>0][:max_examples]
        
        st.session_state.vq = VectaraQuery(cfg.api_key, cfg.corpus_keys, cfg.prompt_name)

    cfg = st.session_state.cfg
    vq = st.session_state.vq
    st.set_page_config(page_title=cfg.title, layout="wide")

    # left side content
    with st.sidebar:
        image = Image.open('Vectara-logo.png')
        st.image(image, width=175)
        st.markdown(f"## About\n\n"
                    f"This demo uses Retrieval Augmented Generation to ask questions about {cfg.source_data_desc}\n\n")

        st.markdown("---")
        st.markdown(
            "## How this works?\n"
            "This app was built with [Vectara](https://vectara.com).\n"
            "Vectara's [Indexing API](https://docs.vectara.com/docs/api-reference/indexing-apis/indexing) was used to ingest the data into a Vectara corpus (or index).\n\n"
            "This app uses Vectara [Chat API](https://docs.vectara.com/docs/console-ui/vectara-chat-overview) to query the corpus and present the results to you, answering your question.\n\n"
        )
        st.markdown("---")
        

    st.markdown(f"<center> <h2> Vectara AI Assistant: {cfg.title} </h2> </center>", unsafe_allow_html=True)

    if "messages" not in st.session_state.keys():
        st.session_state.messages = [{"role": "assistant", "content": "How may I help you?"}]
                
    # Display chat messages
    for message in st.session_state.messages:
        with st.chat_message(message["role"]):
            st.write(message["content"])

        if (message["role"] == "assistant") & (not st.session_state.first_turn):
            if st.button(":thumbsup:", on_click = like_button_clicked):
                print("DEBUG: Thumbs up pressed")

    example_container = st.empty()
    with example_container:
        if show_example_questions():
            example_container.empty()
            st.rerun()

    # select prompt from example question or user provided input
    if st.session_state.ex_prompt:
        prompt = st.session_state.ex_prompt
    else:
        prompt = st.chat_input()
    if prompt:
        st.session_state.messages.append({"role": "user", "content": prompt})
        with st.chat_message("user"):
            st.write(prompt)
        st.session_state.ex_prompt = None
        
    # Generate a new response if last message is not from assistant
    if st.session_state.messages[-1]["role"] != "assistant":
        with st.chat_message("assistant"):
            if cfg.streaming:
                stream = generate_streaming_response(prompt) 
                response = st.write_stream(stream) 
            else:
                with st.spinner("Thinking..."):
                    response = generate_response(prompt)
                    st.write(response)
            message = {"role": "assistant", "content": response}
            st.session_state.messages.append(message)
            st.rerun()
    
if __name__ == "__main__":
    launch_bot()