Spaces:
Runtime error
Runtime error
Tao Wei
commited on
Commit
·
f26e192
1
Parent(s):
e7d64b7
Add application file
Browse files- app_chat.py +199 -0
- assets/bot.png +0 -0
- assets/openai.png +0 -0
- assets/user.png +0 -0
- llms.py +101 -0
- prompts.py +20 -0
app_chat.py
ADDED
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# app.py : Multimodal Chatbot
|
2 |
+
import os
|
3 |
+
import time
|
4 |
+
import logging
|
5 |
+
from dotenv import load_dotenv
|
6 |
+
import gradio as gr
|
7 |
+
|
8 |
+
################################################################
|
9 |
+
# Load .env and logging
|
10 |
+
################################################################
|
11 |
+
|
12 |
+
load_dotenv() # take environment variables from .env.
|
13 |
+
|
14 |
+
logging.basicConfig(level=logging.WARN, format='%(asctime)-15s] %(message)s', datefmt="%m/%d/%Y %I:%M:%S %p %Z")
|
15 |
+
|
16 |
+
def print(*args, **kwargs):
|
17 |
+
sep = kwargs['sep'] if 'sep' in kwargs else ' '
|
18 |
+
logging.warning(sep.join([str(val) for val in args])) # use level WARN for print, as gradio level INFO print unwanted messages
|
19 |
+
|
20 |
+
################################################################
|
21 |
+
# Extra loading
|
22 |
+
################################################################
|
23 |
+
|
24 |
+
from llms import _openai_stream_bot_fn as _llm_call_stream
|
25 |
+
# from prompts import PROMPTS
|
26 |
+
PROMPTS = {}
|
27 |
+
|
28 |
+
################################################################
|
29 |
+
# Global variables
|
30 |
+
################################################################
|
31 |
+
|
32 |
+
TITLE = "AI Chat"
|
33 |
+
|
34 |
+
DESCRIPTION = """
|
35 |
+
# AI Chat
|
36 |
+
|
37 |
+
Simply enter text and press ENTER in the textbox to interact with the chatbot.
|
38 |
+
"""
|
39 |
+
|
40 |
+
ATTACHMENTS = {
|
41 |
+
'openai_api_key' : dict(cls='Textbox', interactive=True, label="OpenAI API Key",
|
42 |
+
info='Please input your OpenAI API key here.'),
|
43 |
+
'chat_role': dict(cls='Dropdown', choices=list(PROMPTS.keys()), value="<none>",
|
44 |
+
interactive=True, label='Awesome ChatGPT Prompt',
|
45 |
+
info='Set system prompt to act as the specified role.'),
|
46 |
+
'system_prompt': dict(cls='Textbox', interactive=True, lines=5, label="System prompt"),
|
47 |
+
}
|
48 |
+
|
49 |
+
SETTINGS = {
|
50 |
+
'chat_engine': dict(cls='Radio', choices=['auto', 'gpt-3.5-turbo-16k', 'gpt-4'],
|
51 |
+
value='auto',
|
52 |
+
interactive=True, label="Chat engine"),
|
53 |
+
}
|
54 |
+
|
55 |
+
PARAMETERS = {
|
56 |
+
'temperature': dict(cls='Slider', minimum=0, maximum=1, value=0.7, step=0.1, interactive=True, label="Temperature",
|
57 |
+
info="Lower temperator for determined output; hight temperate for randomness"),
|
58 |
+
}
|
59 |
+
|
60 |
+
KWARGS = {} # use for chatbot additional_inputs, do NOT change
|
61 |
+
|
62 |
+
################################################################
|
63 |
+
# utils
|
64 |
+
################################################################
|
65 |
+
|
66 |
+
def _create_from_dict(PARAMS, tabbed=False):
|
67 |
+
params = {}
|
68 |
+
for name, kwargs in PARAMS.items():
|
69 |
+
cls_ = kwargs['cls']; del kwargs['cls']
|
70 |
+
if not tabbed:
|
71 |
+
params[name] = getattr(gr, cls_)(**kwargs)
|
72 |
+
else:
|
73 |
+
tab_name = kwargs['label'] if 'label' in kwargs else name
|
74 |
+
with gr.Tab(tab_name):
|
75 |
+
params[name] = getattr(gr, cls_)(**kwargs)
|
76 |
+
return params
|
77 |
+
|
78 |
+
################################################################
|
79 |
+
# Bot fn
|
80 |
+
################################################################
|
81 |
+
|
82 |
+
|
83 |
+
def bot_fn(message, history, *args):
|
84 |
+
__TIC = time.time()
|
85 |
+
kwargs = {name: value for name, value in zip(KWARGS.keys(), args)}
|
86 |
+
kwargs['verbose'] = True # auto print llm calls
|
87 |
+
|
88 |
+
AUTOS = {'chat_engine': 'gpt-3.5-turbo-16k'}
|
89 |
+
# set param to default value if param is "auto"
|
90 |
+
for param, default_value in AUTOS.items():
|
91 |
+
kwargs[param] = default_value if kwargs[param] == 'auto' else kwargs[param]
|
92 |
+
|
93 |
+
if 'openai_api_key' in kwargs:
|
94 |
+
if kwargs['openai_api_key'].startswith('sk-'):
|
95 |
+
os.environ['OPENAI_API_KEY'] = kwargs['openai_api_key']
|
96 |
+
else:
|
97 |
+
import base64
|
98 |
+
os.environ['OPENAI_API_KEY'] = base64.b64decode(kwargs['openai_api_key'].encode('ascii')).decode('ascii')
|
99 |
+
|
100 |
+
if 'OPENAI_API_KEY' in os.environ and os.environ['OPENAI_API_KEY']:
|
101 |
+
bot_message = _llm_call_stream(message, history, **kwargs)
|
102 |
+
else:
|
103 |
+
bot_message = f'**ERROR**: Please input your OpenAI API key and retry!\nYou can obtain an API key from https://platform.openai.com/account/api-keys'
|
104 |
+
|
105 |
+
if isinstance(bot_message, str):
|
106 |
+
yield bot_message
|
107 |
+
else:
|
108 |
+
for m in bot_message:
|
109 |
+
yield m
|
110 |
+
bot_message = m
|
111 |
+
|
112 |
+
print(kwargs)
|
113 |
+
__TOC = time.time()
|
114 |
+
print(f'Elapsed time: {__TOC-__TIC}')
|
115 |
+
|
116 |
+
################################################################
|
117 |
+
# Gradio app
|
118 |
+
################################################################
|
119 |
+
|
120 |
+
def get_demo():
|
121 |
+
|
122 |
+
# use css and elem_id to format
|
123 |
+
css="""#chatbot {
|
124 |
+
min-height: 600px;
|
125 |
+
}"""
|
126 |
+
with gr.Blocks(css=css) as demo:
|
127 |
+
# title
|
128 |
+
gr.HTML(f"<center><h1>{TITLE}</h1></center>")
|
129 |
+
# description
|
130 |
+
with gr.Accordion("Expand to see Introduction and Usage", open=False):
|
131 |
+
gr.Markdown(f"{DESCRIPTION}")
|
132 |
+
with gr.Row():
|
133 |
+
# attachments, settings, and parameters
|
134 |
+
with gr.Column(scale=1):
|
135 |
+
attachments = _create_from_dict(ATTACHMENTS)
|
136 |
+
with gr.Accordion("Settings", open=False) as settings_accordin:
|
137 |
+
settings = _create_from_dict(SETTINGS)
|
138 |
+
with gr.Accordion("Parameters", open=False) as parameters_accordin:
|
139 |
+
parameters = _create_from_dict(PARAMETERS)
|
140 |
+
|
141 |
+
with gr.Column(scale=9):
|
142 |
+
# chatbot
|
143 |
+
global KWARGS
|
144 |
+
KWARGS = {**attachments, **settings, **parameters}
|
145 |
+
KWARGS = {k: v for k, v in KWARGS.items() if not isinstance(v, (gr.Markdown, gr.HTML, gr.JSON))}
|
146 |
+
chatbot = gr.ChatInterface(bot_fn, # chatbot=_chatbot, textbox=_textbox,
|
147 |
+
additional_inputs=list(KWARGS.values()),
|
148 |
+
retry_btn="Retry", undo_btn="Undo", clear_btn="Clear",
|
149 |
+
)
|
150 |
+
chatbot.chatbot.elem_id = 'chatbot' # for css
|
151 |
+
chatbot.chatbot.avatar_images = ("assets/user.png", "assets/openai.png")
|
152 |
+
|
153 |
+
# examples
|
154 |
+
with gr.Accordion("Examples", open=False) as examples_accordin:
|
155 |
+
chat_examples = gr.Examples(
|
156 |
+
["What's the Everett interpretation of quantum mechanics?",
|
157 |
+
'Give me a list of the top 10 dive sites you would recommend around the world.',
|
158 |
+
'Write a Python code to calculate Fibonacci numbers.'
|
159 |
+
],
|
160 |
+
inputs=chatbot.textbox, label="AI Chat Examples",
|
161 |
+
)
|
162 |
+
# additional handlers
|
163 |
+
for name, attach in attachments.items():
|
164 |
+
if hasattr(chatbot, '_upload_fn') and isinstance(attach, (gr.Image, gr.Audio, gr.Video, gr.File)):
|
165 |
+
attach.change(chatbot._upload_fn,
|
166 |
+
[chatbot.textbox, attach],
|
167 |
+
[chatbot.textbox], queue=False, api_name=False)
|
168 |
+
if 'chat_role' in attachments and 'system_prompt' in attachments:
|
169 |
+
chat_role = attachments['chat_role']
|
170 |
+
system_prompt = attachments['system_prompt']
|
171 |
+
chat_role.change(lambda x: PROMPTS[x]['prompt'].strip(), chat_role, system_prompt)
|
172 |
+
return demo
|
173 |
+
|
174 |
+
def parse_args():
|
175 |
+
"""Parse input arguments."""
|
176 |
+
import argparse
|
177 |
+
parser = argparse.ArgumentParser(
|
178 |
+
description='Multimodal Chatbot',
|
179 |
+
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
180 |
+
parser.add_argument(
|
181 |
+
'-p', '--port', default=7860, type=int,
|
182 |
+
help='port number.')
|
183 |
+
parser.add_argument(
|
184 |
+
'--debug', action='store_true',
|
185 |
+
help='debug mode.')
|
186 |
+
|
187 |
+
args = parser.parse_args()
|
188 |
+
return args
|
189 |
+
|
190 |
+
|
191 |
+
if __name__ == '__main__':
|
192 |
+
|
193 |
+
args = parse_args()
|
194 |
+
|
195 |
+
if 'OPENAI_API_KEY' in os.environ:
|
196 |
+
del ATTACHMENTS['openai_api_key']
|
197 |
+
|
198 |
+
demo = get_demo()
|
199 |
+
demo.queue().launch(server_name='0.0.0.0', server_port=args.port)
|
assets/bot.png
ADDED
![]() |
assets/openai.png
ADDED
![]() |
assets/user.png
ADDED
![]() |
llms.py
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
################################################################
|
4 |
+
# Format LLM messages
|
5 |
+
################################################################
|
6 |
+
|
7 |
+
def _format_messages(history, message=None, system=None, format='plain',
|
8 |
+
user_name='user', bot_name='assistant'):
|
9 |
+
_history = []
|
10 |
+
if format == 'openai_chat':
|
11 |
+
if system:
|
12 |
+
_history.append({'role': 'system', 'content': system})
|
13 |
+
for human, ai in history:
|
14 |
+
if human:
|
15 |
+
_history.append({'role': 'user', 'content': human})
|
16 |
+
if ai:
|
17 |
+
_history.append({'role': 'assistant', 'content': ai})
|
18 |
+
if message:
|
19 |
+
_history.append({'role': 'user', 'content': message})
|
20 |
+
return _history
|
21 |
+
|
22 |
+
elif format == 'plain':
|
23 |
+
if system:
|
24 |
+
_history.append(system)
|
25 |
+
for human, ai in history:
|
26 |
+
if human:
|
27 |
+
_history.append(f'{user_name}: {human}')
|
28 |
+
if ai:
|
29 |
+
_history.append(f'{bot_name}: {ai}')
|
30 |
+
if message:
|
31 |
+
_history.append(f'{user_name}: {message}')
|
32 |
+
_history.append(f'{bot_name}: ')
|
33 |
+
return '\n'.join(_history)
|
34 |
+
|
35 |
+
else:
|
36 |
+
raise ValueError(f"Invalid messages to format: {format}")
|
37 |
+
|
38 |
+
class bcolors:
|
39 |
+
HEADER = '\033[95m'
|
40 |
+
OKBLUE = '\033[94m'
|
41 |
+
OKCYAN = '\033[96m'
|
42 |
+
OKGREEN = '\033[92m'
|
43 |
+
WARNING = '\033[93m'
|
44 |
+
FAIL = '\033[91m'
|
45 |
+
ENDC = '\033[0m'
|
46 |
+
BOLD = '\033[1m'
|
47 |
+
UNDERLINE = '\033[4m'
|
48 |
+
|
49 |
+
def _print_messages(history, message, bot_message, system=None,
|
50 |
+
user_name='user', bot_name='assistant', format='plain', variant='primary', tag=None):
|
51 |
+
"""history is list of tuple [(user_msg, bot_msg), ...]"""
|
52 |
+
prompt = _format_messages(history, message, system=system, user_name=user_name, bot_name=bot_name, format=format)
|
53 |
+
bot_msg_color = {'primary': bcolors.OKGREEN, 'secondary': bcolors.HEADER,
|
54 |
+
'warning': bcolors.WARNING, 'error': bcolors.FAIL}.get(variant, bcolors.BOLD)
|
55 |
+
tag = f'\n:: {tag}' if tag is not None else ''
|
56 |
+
print(f'{bcolors.OKCYAN}{prompt}{bot_msg_color}{bot_message}{bcolors.WARNING}{tag}{bcolors.ENDC}')
|
57 |
+
|
58 |
+
|
59 |
+
################################################################
|
60 |
+
# LLM bot fn
|
61 |
+
################################################################
|
62 |
+
|
63 |
+
def _openai_bot_fn(message, history, **kwargs):
|
64 |
+
_kwargs = dict(temperature=kwargs.get('temperature', 0))
|
65 |
+
system = kwargs['system_prompt'] if 'system_prompt' in kwargs and kwargs['system_prompt'] else None
|
66 |
+
chat_engine = kwargs.get('chat_engine', 'gpt-3.5-turbo')
|
67 |
+
import openai
|
68 |
+
openai.api_key = os.environ["OPENAI_API_KEY"]
|
69 |
+
|
70 |
+
resp = openai.ChatCompletion.create(
|
71 |
+
model=chat_engine,
|
72 |
+
messages=_format_messages(history, message, system=system, format='openai_chat'),
|
73 |
+
**_kwargs,
|
74 |
+
)
|
75 |
+
bot_message = resp.choices[0].message.content
|
76 |
+
if 'verbose' in kwargs and kwargs['verbose']:
|
77 |
+
_print_messages(history, message, bot_message, system=system, tag=f'openai ({chat_engine})')
|
78 |
+
return bot_message
|
79 |
+
|
80 |
+
def _openai_stream_bot_fn(message, history, **kwargs):
|
81 |
+
_kwargs = dict(temperature=kwargs.get('temperature', 0))
|
82 |
+
system = kwargs['system_prompt'] if 'system_prompt' in kwargs and kwargs['system_prompt'] else None
|
83 |
+
chat_engine = kwargs.get('chat_engine', 'gpt-3.5-turbo')
|
84 |
+
import openai
|
85 |
+
openai.api_key = os.environ["OPENAI_API_KEY"]
|
86 |
+
|
87 |
+
resp = openai.ChatCompletion.create(
|
88 |
+
model=chat_engine,
|
89 |
+
messages=_format_messages(history, message, system=system, format='openai_chat'),
|
90 |
+
stream=True,
|
91 |
+
**_kwargs,
|
92 |
+
)
|
93 |
+
|
94 |
+
bot_message = ""
|
95 |
+
for _resp in resp:
|
96 |
+
if 'content' in _resp.choices[0].delta: # last resp delta is empty
|
97 |
+
bot_message += _resp.choices[0].delta.content # need to accumulate previous message
|
98 |
+
yield bot_message.strip() # accumulated message can easily be postprocessed
|
99 |
+
if 'verbose' in kwargs and kwargs['verbose']:
|
100 |
+
_print_messages(history, message, bot_message, system=system, tag=f'openai_stream ({chat_engine})')
|
101 |
+
return bot_message
|
prompts.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
from datasets import load_dataset
|
3 |
+
|
4 |
+
dataset = load_dataset('fka/awesome-chatgpt-prompts')
|
5 |
+
|
6 |
+
def _to_command(display):
|
7 |
+
res = '_'.join(display.lower().replace('/', ' ').strip().split())
|
8 |
+
res = re.sub(r'[^A-Za-z0-9_]+', '', res)
|
9 |
+
return res
|
10 |
+
|
11 |
+
PROMPTS = {}
|
12 |
+
PROMPTS["<none>"] = dict(act="<none>", prompt="") # always reset to <none> after a prompt
|
13 |
+
PROMPTS["english_translator"] = dict(act="English translator", prompt="""
|
14 |
+
I want you to act as an English translator. \
|
15 |
+
I will speak to you in any language and you will detect the language and translate it into English. \
|
16 |
+
I want you to only reply the translation and nothing else, do not write explanations. \
|
17 |
+
If the input is already in English, simplify reply with the original text. My first sentence is "Aloha!".
|
18 |
+
""")
|
19 |
+
PROMPTS.update({_to_command(prompt['act']): prompt for prompt in dataset['train']})
|
20 |
+
|