Spaces:
Sleeping
Sleeping
File size: 3,148 Bytes
e763e8a 0d179e3 11a9727 156af69 e763e8a 156af69 e763e8a 55c7d01 4e308cb e763e8a 0d179e3 156af69 fed1aac d41ae8b fed1aac 156af69 44da4e7 fe53452 1c2b3eb 93d3a3d 7620fa1 93d3a3d 9f19633 93d3a3d 9f19633 9538882 1c2b3eb bd0797d 01e50b7 bd0797d f29db00 1c2b3eb bd0797d efcc756 1c2b3eb bd0797d e73f66c 9098a3a 1c2b3eb b47e796 a148c7b 4e308cb 4e8b18f 6a868af 50d6f71 68e3c0d 892991f b6c9ea3 0d179e3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
from omegaconf import OmegaConf
from query import VectaraQuery
import os
import gradio as gr
def isTrue(x) -> bool:
if isinstance(x, bool):
return x
return x.strip().lower() == 'true'
corpus_keys = str(os.environ['corpus_keys']).split(',')
cfg = OmegaConf.create({
'corpus_keys': corpus_keys,
'api_key': str(os.environ['api_key']),
'title': os.environ['title'],
'description': os.environ['description'],
'source_data_desc': os.environ['source_data_desc'],
'streaming': isTrue(os.environ.get('streaming', False)),
'prompt_name': os.environ.get('prompt_name', None),
'examples': os.environ.get('examples', None)
})
vq = VectaraQuery(cfg.api_key, cfg.corpus_keys, cfg.prompt_name)
def respond(message, history):
if cfg.streaming:
# Call stream response and stream output
stream = vq.submit_query_streaming(message)
outputs = ""
for output in stream:
outputs += output
yield outputs
else:
# Call non-stream response and return message output
response = vq.submit_query(message)
yield response
# cfg.description = f'''
# <table>
# <tr>
# <td style="width: 33%; vertical-align: top;"> <img src="https://github.com/david-oplatka/chatbot-streamlit/blob/main/Vectara-logo.png?raw=true"> </td>
# <td style="width: 34%; vertical-align: middle;"> <h1>{cfg.title}</h1> </td>
# <td style="width: 33%; vertical-align: bottom; text-align: left"> This demo uses Retrieval Augmented Generation from <a href="https://vectara.com/">Vectara</a><br>to ask questions about {cfg.source_data_desc}. </td>
# </tr>
# </table>
# <center> <h2>{cfg.description}</h2></center>
# '''
cfg.description = f'''
<table>
<tr>
<td style="width: 50%; text-align: left;"> <h1>{cfg.title}</h1> </td>
<td style="width: 50%; text-align: right;"> <img src="https://github.com/david-oplatka/chatbot-streamlit/blob/main/Vectara-logo.png?raw=true"> </td>
</tr>
<tr>
<td rowspan="2">This demo uses Retrieval Augmented Generation from <a href="https://vectara.com/">Vectara</a> to ask questions about {cfg.source_data_desc}.</td>
</table>
<p style="text-indent: 20px;"></p>
'''
css = """
table {
border: none;
width: 100%;
table-layout: fixed;
border-collapse: separate;
}
td {
vertical-align: top;
border: none;
}
img {
width: 25%;
}
h1 {
font-size: 3em; /* Adjust the size as needed */
}
"""
if cfg.examples:
app_examples = [example.strip() for example in cfg.examples.split(",")]
else:
app_examples = None
demo = gr.ChatInterface(respond, description = cfg.description, css = css,
chatbot = gr.Chatbot(value = [[None, "How may I help you?"]]), examples = app_examples, cache_examples = False)
if __name__ == "__main__":
demo.launch() |