File size: 2,449 Bytes
e763e8a
 
 
0d179e3
 
11a9727
 
 
 
 
156af69
e763e8a
156af69
e763e8a
 
 
 
55c7d01
4e308cb
e763e8a
0d179e3
156af69
fed1aac
 
 
 
 
d41ae8b
 
fed1aac
 
 
 
 
 
 
156af69
 
88d9519
93d3a3d
 
d9f4fdd
fd284aa
93d3a3d
9f19633
43ce7b5
f30d8c7
93d3a3d
9538882
 
5df36a8
bd0797d
01e50b7
bd0797d
 
f29db00
1c2b3eb
bd0797d
6cea44b
1c2b3eb
bd0797d
 
419bcbf
9098a3a
 
946f9bf
1c2b3eb
b47e796
a148c7b
4e308cb
4e8b18f
6a868af
 
50d6f71
5df36a8
4d3885f
0d179e3
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
from omegaconf import OmegaConf
from query import VectaraQuery
import os
import gradio as gr

def isTrue(x) -> bool:
    if isinstance(x, bool):
        return x
    return x.strip().lower() == 'true'

corpus_keys = str(os.environ['corpus_keys']).split(',')
cfg = OmegaConf.create({
    'corpus_keys': corpus_keys,
    'api_key': str(os.environ['api_key']),
    'title': os.environ['title'],
    'source_data_desc': os.environ['source_data_desc'],
    'streaming': isTrue(os.environ.get('streaming', False)),
    'prompt_name': os.environ.get('prompt_name', None),
    'examples': os.environ.get('examples', None)
})

vq = VectaraQuery(cfg.api_key, cfg.corpus_keys, cfg.prompt_name)

def respond(message, history):
    if cfg.streaming:
        # Call stream response and stream output
        stream = vq.submit_query_streaming(message)
        
        
        outputs = ""
        for output in stream:
            outputs += output
            yield outputs
    else:
        # Call non-stream response and return message output
        response = vq.submit_query(message)
        yield response

heading_html = f'''
                <table>
                  <tr>
                      <td style="width: 80%; text-align: left; vertical-align: middle;"> <h1>Vectara AI Assistant: {cfg.title}</h1> </td>
                      <td style="width: 20%; text-align: right; vertical-align: middle;"> <img src="https://github.com/david-oplatka/chatbot-streamlit/blob/main/Vectara-logo.png?raw=true"> </td>
                  </tr>
                  <tr>
                      <td colspan="2" style="font-size: 16px;">This demo uses Retrieval Augmented Generation from <a href="https://vectara.com/">Vectara</a> to ask questions about {cfg.source_data_desc}.</td>
                  </tr>
                </table>
                '''

bot_css = """
table {
  border: none;
  width: 100%;
  table-layout: fixed;
  border-collapse: separate;
}
td {
  vertical-align: middle;
  border: none;
}
img {
  width: 75%;
}
h1 {
  font-size: 2em; /* Adjust the size as needed */
}
"""

if cfg.examples:
    app_examples = [example.strip() for example in cfg.examples.split(",")]
else:
    app_examples = None

demo = gr.ChatInterface(respond, description = heading_html, css = bot_css,
                        chatbot = gr.Chatbot(value = [[None, "How may I help you?"]]), examples = app_examples, cache_examples = False)

if __name__ == "__main__":
    demo.launch()