File size: 634 Bytes
e6bd9b6
714eef2
e6bd9b6
 
 
 
 
 
 
 
 
 
 
714eef2
 
e6bd9b6
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
import gradio as gr
from inference_fine_tune import generate_response  # generator-based inference code

def chat_interface(prompt):
    return generate_response(prompt)  # returns a generator

with gr.Blocks() as demo:
    gr.Markdown("## Chat with the Model")
    with gr.Row():
        inp = gr.Textbox(label="Your Prompt", placeholder="Enter your message...", lines=3)
    out = gr.Textbox(label="Model Response", lines=10)

    btn = gr.Button("Send")
    # ✅ Enable streaming to allow generator output to be rendered step-by-step
    btn.click(chat_interface, inputs=inp, outputs=out, streaming=True)

demo.launch(share=True)