File size: 2,795 Bytes
bb159c0
 
58384c0
 
95d0aed
bb159c0
95d0aed
 
bb159c0
 
95d0aed
 
bb159c0
 
 
 
9d661c1
bb159c0
 
 
 
 
 
 
95d0aed
 
bb159c0
 
95d0aed
9d661c1
 
 
 
95d0aed
 
9d661c1
 
 
 
 
 
95d0aed
 
 
 
 
 
 
 
9d661c1
95d0aed
 
 
 
 
9d661c1
 
 
 
 
 
 
 
 
95d0aed
9d661c1
 
 
 
 
 
 
95d0aed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
import gradio as gr
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage, AIMessage
from llm import DeepSeekLLM
from config import settings

deep_seek_llm = DeepSeekLLM(api_key=settings.deep_seek_api_key)
chat = ChatOpenAI(model=deep_seek_llm.default_model, api_key=deep_seek_llm.api_key, base_url=deep_seek_llm.base_url)


def predict(message, history, model: str, temperature: float, max_tokens: int):
    print('???model', model, temperature, max_tokens)
    history_messages = []
    for human, assistant in history:
        history_messages.append(HumanMessage(content=human))
        history_messages.append(AIMessage(content=assistant))
    history_messages.append(HumanMessage(content=message.text))

    response_message = ''
    for chunk in chat.stream(history_messages):
        response_message = response_message + chunk.content
        yield response_message


with gr.Blocks() as app:
    with gr.Tab('聊天'):
        with gr.Row():
            with gr.Column(scale=2, min_width=600):
                chatbot = gr.ChatInterface(
                    predict,
                    multimodal=True,
                    chatbot=gr.Chatbot(elem_id="chatbot", height=600),
                    textbox=gr.MultimodalTextbox(),
                    additional_inputs=[
                        gr.Dropdown(choices=deep_seek_llm.support_models, label='模型'),
                        gr.Slider(
                            minimum=0.0,
                            maximum=1.0,
                            step=0.1,
                            label="Temperature",
                            key="temperature",
                        ),
                        gr.Number(
                            minimum=1024,
                            maximum=1024 * 20,
                            step=128,
                            value=4096,
                            label="Max Tokens",
                            key="max_tokens",
                        )
                    ],
                )
            with gr.Column(scale=1, min_width=300):
                with gr.Accordion('Select Model', open=True):
                    with gr.Column():
                        gr.Number(
                            minimum=1024,
                            maximum=1024 * 20,
                            step=128,
                            value=4096,
                            label="Max Tokens",
                            key="max_tokens",
                        )

    with gr.Tab('画图'):
        with gr.Row():
            with gr.Column(scale=2, min_width=600):
                gr.Image(label="Input Image")
            with gr.Column(scale=1, min_width=300):
                gr.Textbox(label="LoRA")


app.launch(debug=settings.debug)