meta-llama / app.py
richardkimsm89's picture
Update app.py
427c52f verified
raw
history blame
517 Bytes
from transformers import pipeline
import gradio as gr
pipe1 = pipeline(model = "meta-llama/Llama-3.2-1B-Instruct")
#pipe2 = pipeline(model = "meta-llama/Llama-3.2-3B-Instruct")
def inference(input):
output = pipe1(
text_inputs = input
)
return output[0]['generated_text'][len(input):]
app = gr.Interface(
fn = inference,
inputs = [gr.Textbox(label = "Input")],
outputs = [gr.Textbox(label = "Output")],
title = "Demo",
examples = [
["Hello, Llama."]
]
).launch()