meta-llama / app.py
richardkimsm89's picture
Update app.py
01655b3 verified
raw
history blame
404 Bytes
from transformers import pipeline
import gradio as gr
pipeline = pipeline(model = "meta-llama/Llama-3.2-1B-Instruct")
def inference(input):
output = pipeline(input)
return output
app = gr.Interface(
fn = inference,
inputs = [gr.Textbox(label = "Input")],
outputs = [gr.Textbox(label = "Output")],
title = "Demo",
examples = [
["Hello, World."]
]
)
app.launch()