llama / app.py
hereoncollab's picture
Update app.py
b6144f6 verified
raw
history blame
655 Bytes
import gradio as gr
from transformers import pipeline
pipe = pipeline("text-generation", model="distilgpt2")
def generate_response(user_input):
formatted_input = f"Human: {user_input}\nAI:"
response = pipe(formatted_input, max_length=150, num_return_sequences=1)
generated_text = response[0]['generated_text']
ai_response = generated_text.split('AI:')[-1].strip()
return ai_response
interface = gr.Interface(
fn=generate_response,
inputs=gr.Textbox(label="shitty mesage:", lines=2, placeholder="Type your message here..."),
outputs="text",
title="distilgpt2",
description="distel gppt 20"
)
interface.launch()