File size: 847 Bytes
b935e3d
f404d24
36bfcb2
b935e3d
36bfcb2
 
ed0ae5f
36bfcb2
86a1b48
 
4e0781d
 
f404d24
36bfcb2
ed0ae5f
 
 
 
 
 
 
 
f404d24
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
import gradio as gr
from transformers import pipeline
from spaces import GPU  # Import the GPU decorator for ZeroGPU

# Decorate the function to indicate it needs GPU resources
@GPU
def generate_text(prompt):
    # Load the model within the function so that it only runs on GPU when the function is called
    model = pipeline("text-generation", model="EleutherAI/gpt-neo-125M", device=0)

    return model(prompt, max_length=150, temperature=0.7, top_p=0.9)[0]["generated_text"]


# Create the Gradio interface
interface = gr.Interface(
    fn=generate_text,
    inputs=gr.Textbox(label="Enter your prompt here"),
    outputs=gr.Textbox(label="Generated Text"),
    title="AI Text Generator",
    description="This app generates text based on your input prompt. Try it out!",
    theme="dark"
)

if __name__ == "__main__":
    interface.launch()