File size: 1,210 Bytes
967f284
 
3264612
86ef0b6
 
 
 
 
41dc826
3264612
 
 
 
 
 
 
 
 
 
 
86ef0b6
 
3264612
 
86ef0b6
3264612
86ef0b6
3264612
 
 
86ef0b6
3264612
 
86ef0b6
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import gradio as gr

# Custom CSS for pastel gradient
css = """
.gradio-container {
    background: linear-gradient(to right, #FFDEE9, #B5FFFC);
}
"""

# Load the Mistral-7B-Instruct-v0.3 model via Gradio's load function
model = gr.load("models/mistralai/Mistral-7B-Instruct-v0.3")

def inference_fn(prompt):
    """
    This function calls the loaded model with the user's prompt.
    gr.load(...) returns a Gradio interface object, so we can call it like a function.
    """
    # If the loaded model is a pipeline or interface, calling it directly returns the response.
    response = model(prompt)
    return response

with gr.Blocks(css=css) as demo:
    # Greeting at the top
    gr.Markdown("<h1 style='text-align: center;'>Bonjour Dans le chat du consentement</h1>")

    # Create the input/output layout
    with gr.Row():
        user_input = gr.Textbox(label="Entrez votre message ici:", lines=3)
        output = gr.Textbox(label="Réponse du Modèle Mistral-7B-Instruct:", lines=5)
    send_button = gr.Button("Envoyer")

    # Link the button to inference_fn
    send_button.click(fn=inference_fn, inputs=user_input, outputs=output)

# Launch the app
if __name__ == "__main__":
    demo.launch()