RandoML / app.py
acecalisto3's picture
Create app.py
0028c9d verified
raw
history blame
2.14 kB
import gradio as gr
from transformers import pipeline
def model_inference(model_name, task, input_data):
try:
# Load the model pipeline dynamically based on user selection
model_pipeline = pipeline(task, model=model_name)
# Perform the inference
result = model_pipeline(input_data, max_length=100)
# Handle different output formats
if isinstance(result, list):
return result[0]['generated_text'] if 'generated_text' in result[0] else str(result)
return result
except Exception as e:
# Return error message to the user interface
return f"An error occurred: {str(e)}"
def setup_interface():
# Define the available models and tasks
models = {
"Text Generation": ["gpt2", "EleutherAI/gpt-neo-2.7B"],
"Text Classification": ["bert-base-uncased", "roberta-base"],
"Token Classification": ["dbmdz/bert-large-cased-finetuned-conll03-english"]
}
tasks = {
"Text Generation": "text-generation",
"Text Classification": "text-classification",
"Token Classification": "token-classification"
}
with gr.Blocks() as demo:
gr.Markdown("### Hugging Face Model Playground")
with gr.Row():
selected_task = gr.Dropdown(label="Select Task", choices=list(models.keys()), value="Text Generation")
model_name = gr.Dropdown(label="Select Model", choices=models[selected_task.value])
input_data = gr.Textbox(label="Input", placeholder="Type here...")
output = gr.Textbox(label="Output", placeholder="Results will appear here...")
# Update the model dropdown based on task selection
def update_models(task):
return gr.Dropdown.update(choices=models[task])
selected_task.change(fn=update_models, inputs=selected_task, outputs=model_name)
# Run model inference when input data changes
input_data.change(fn=model_inference, inputs=[model_name, selected_task, input_data], outputs=output)
return demo
if __name__ == "__main__":
interface = setup_interface()
interface.launch()