testsson / app.py
cakemus's picture
gpu avail
ed5d53c
raw
history blame
729 Bytes
import gradio as gr
from transformers import pipeline
import torch
# Check if a GPU is available
device = 0 if torch.cuda.is_available() else -1
print("Using GPU" if device == 0 else "Using CPU")
# Load the model on the GPU if available
model = pipeline("text-generation", model="gpt2", device=device)
def generate_text(prompt):
return model(prompt, max_length=50)[0]["generated_text"]
interface = gr.Interface(
fn=generate_text,
inputs=gr.Textbox(label="Enter your prompt here"),
outputs=gr.Textbox(label="Generated Text"),
title="AI Text Generator",
description="This app generates text based on your input prompt. Try it out!",
theme="dark"
)
if __name__ == "__main__":
interface.launch()