|
import gradio as gr |
|
from transformers import pipeline |
|
import torch |
|
|
|
|
|
device = 0 if torch.cuda.is_available() else -1 |
|
print("Using GPU" if device == 0 else "Using CPU") |
|
|
|
|
|
model = pipeline("text-generation", model="gpt2", device=device) |
|
|
|
def generate_text(prompt): |
|
return model(prompt, max_length=50)[0]["generated_text"] |
|
|
|
interface = gr.Interface( |
|
fn=generate_text, |
|
inputs=gr.Textbox(label="Enter your prompt here"), |
|
outputs=gr.Textbox(label="Generated Text"), |
|
title="AI Text Generator", |
|
description="This app generates text based on your input prompt. Try it out!", |
|
theme="dark" |
|
) |
|
|
|
if __name__ == "__main__": |
|
interface.launch() |
|
|