twn39's picture
update
95d0aed
raw
history blame
2.8 kB
import gradio as gr
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage, AIMessage
from llm import DeepSeekLLM
from config import settings
deep_seek_llm = DeepSeekLLM(api_key=settings.deep_seek_api_key)
chat = ChatOpenAI(model=deep_seek_llm.default_model, api_key=deep_seek_llm.api_key, base_url=deep_seek_llm.base_url)
def predict(message, history, model: str, temperature: float, max_tokens: int):
print('???model', model, temperature, max_tokens)
history_messages = []
for human, assistant in history:
history_messages.append(HumanMessage(content=human))
history_messages.append(AIMessage(content=assistant))
history_messages.append(HumanMessage(content=message.text))
response_message = ''
for chunk in chat.stream(history_messages):
response_message = response_message + chunk.content
yield response_message
with gr.Blocks() as app:
with gr.Tab('聊天'):
with gr.Row():
with gr.Column(scale=2, min_width=600):
chatbot = gr.ChatInterface(
predict,
multimodal=True,
chatbot=gr.Chatbot(elem_id="chatbot", height=600),
textbox=gr.MultimodalTextbox(),
additional_inputs=[
gr.Dropdown(choices=deep_seek_llm.support_models, label='模型'),
gr.Slider(
minimum=0.0,
maximum=1.0,
step=0.1,
label="Temperature",
key="temperature",
),
gr.Number(
minimum=1024,
maximum=1024 * 20,
step=128,
value=4096,
label="Max Tokens",
key="max_tokens",
)
],
)
with gr.Column(scale=1, min_width=300):
with gr.Accordion('Select Model', open=True):
with gr.Column():
gr.Number(
minimum=1024,
maximum=1024 * 20,
step=128,
value=4096,
label="Max Tokens",
key="max_tokens",
)
with gr.Tab('画图'):
with gr.Row():
with gr.Column(scale=2, min_width=600):
gr.Image(label="Input Image")
with gr.Column(scale=1, min_width=300):
gr.Textbox(label="LoRA")
app.launch(debug=settings.debug)