vortex123's picture
Update app.py
e72c8c4 verified
raw
history blame
648 Bytes
import gradio as gr
model = gr.load("models/mistralai/Mixtral-8x7B-Instruct-v0.1")
def chat(prompt, response_type):
response = model(prompt)
max_tokens = 200
if response_type == "Short":
max_tokens = 50
elif response_type == "Medium":
max_tokens = 100
return response[:max_tokens]
demo = gr.Interface(
fn=chat,
inputs=[
gr.Textbox(label="Prompt"),
gr.Radio(["Short", "Medium", "Long"], label="Response Type")
],
outputs=gr.Textbox(label="Response"),
title="Mixtral-8x7B-Instruct-v0.1 Chat",
description="Chat with the Mixtral-8x7B-Instruct-v0.1 model."
)
demo.launch()