File size: 1,832 Bytes
66089a3 6453f68 66089a3 7d6f7a6 66089a3 7d6f7a6 66089a3 7d6f7a6 66089a3 7d6f7a6 66089a3 7d6f7a6 66089a3 7d6f7a6 66089a3 417ef43 66089a3 7d6f7a6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
import gradio as gr
import os
import openai
openai.api_key = os.getenv("openai_key")
prompt = '请你调用你能获取的所有的博物百科资源库,用小助手的口吻来回答问题,开头第一句是你好啊,好奇宝宝!结尾是有没有解答你的疑惑呢?欢迎继续提问!由于是面向青少年儿童,请务必做到内容有理有据语言风格有趣生动,附上来源出处'
history = {}
def chat(p, qid, uid, rating=None):
global history
if uid in history:
msgs = history[uid]
else:
msgs = []
response = callapi(p, msgs)
if rating is not None and len(msgs) > 0:
msgs[-1]["rating"] = rating
history[uid] = msgs + [{"question": p, "answer": response}]
return ["text", response]
def callapi(p, msgs):
if (len(msgs) > 8):
msgs = msgs[-8:]
data = [{"role":"system", "content":prompt}]
for m in msgs:
data = data + [
{"role":"user", "content":m["question"]},
{"role":"assistant", "content":m["answer"]}
]
data = data + [{"role":"user", "content":p}]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages= data
)
print(response)
response = response["choices"][0]["message"]["content"]
while response.startswith("\n"):
response = response[1:]
return response
iface = gr.Interface(fn=chat,
inputs=["text", "text", "text", gr.inputs.Slider(minimum=1, maximum=5, step=1, default=None, label="评分")],
outputs=["text", "text"],
description="""这是一个面向青少年儿童的博物百科全书问答助手,希望能够让你天马行空的想象力和好奇心得到满足!
""")
iface.launch()
|