File size: 1,231 Bytes
b04a659
3538aac
bf0e3ce
3538aac
1916db7
dbf90b5
bf0e3ce
dbf90b5
1916db7
 
60e3903
dbf90b5
60e3903
 
 
bf0e3ce
 
60e3903
 
 
bf0e3ce
 
60e3903
 
bf0e3ce
1916db7
dbf90b5
60e3903
bf0e3ce
 
 
 
 
60e3903
1916db7
60e3903
bf0e3ce
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import gradio as gr

MODELS = ["Mixtral-8x7B-Instruct-v0.1"]

def generate(message, chat_history, model, system_prompt):
    """Simulates response generation."""
    response = f"Simulated response for: {message}"
    
    # Update chat history as a list of tuples
    chat_history.append((message, response))

    return chat_history, ""

DEFAULT_SYSTEM_PROMPT = """
You are a helpful assistant in normal conversation.
When given a problem to solve, you are an expert problem-solving assistant.
Your task is to provide a detailed, step-by-step solution to a given question.
"""

with gr.Blocks() as demo:
    gr.Markdown("# Custom Chat Interface")
    
    with gr.Row():
        model = gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[0])
    
    system_prompt = gr.Textbox(value=DEFAULT_SYSTEM_PROMPT, lines=5, label="System Prompt")
    chatbot = gr.Chatbot(label="Chat")
    msg = gr.Textbox(label="Type your message here...", placeholder="Enter your message...")
    
    def clear_chat():
        return [], ""
    
    gr.Button("Clear Chat").click(clear_chat, inputs=None, outputs=[chatbot, msg])

    msg.submit(generate, inputs=[msg, chatbot, model, system_prompt], outputs=[chatbot, msg])

demo.launch()