testsson / app.py
cakemus's picture
gpt3
6eb3422
raw
history blame
797 Bytes
import gradio as gr
from transformers import pipeline
from spaces import GPU # Import the GPU decorator for ZeroGPU
# Decorate the function to indicate it needs GPU resources
@GPU
def generate_text(prompt):
# Load the model within the function so that it only runs on GPU when the function is called
model = pipeline("text-generation", model="gpt3", device=0)
return model(prompt, max_length=50)[0]["generated_text"]
# Create the Gradio interface
interface = gr.Interface(
fn=generate_text,
inputs=gr.Textbox(label="Enter your prompt here"),
outputs=gr.Textbox(label="Generated Text"),
title="AI Text Generator",
description="This app generates text based on your input prompt. Try it out!",
theme="dark"
)
if __name__ == "__main__":
interface.launch()