# Inference import gradio as gr app = gr.load( "meta-llama/Llama-3.2-1B-Instruct", src = "models", inputs = [gr.Textbox(label = "Input")], outputs = [gr.Textbox(label = "Output")], title = "Meta Llama", examples = [ ["Hello, World."] ] ).launch() """ # Pipeline import gradio as gr from transformers import pipeline pipe = pipeline(model = "meta-llama/Llama-3.2-1B-Instruct") def fn(input): output = pipe( input, ) return output[0]["generated_text"]#[len(input):] app = gr.Interface( fn = fn, inputs = [gr.Textbox(label = "Input")], outputs = [gr.Textbox(label = "Output")], title = "Meta Llama", examples = [ ["Hello, World."] ] ).launch() """