llama-mh / app.py
lucas-w's picture
Create app.py
8b5dc32
raw
history blame
267 Bytes
import gradio as gr
from transformers import pipeline
pipe = pipeline("chat", model="huggingface-projects/llama-2-7b-chat")
def predict(text):
return pipe(text)[0]["text"]
demo = gr.Interface(
fn=predict,
inputs='text',
outputs='text',
)
demo.launch()