Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import GPT2Tokenizer, GPT2LMHeadModel | |
# Define model and tokenizer | |
model_name = 'gpt2-large' | |
st.write("Loading model and tokenizer...") | |
model = GPT2LMHeadModel.from_pretrained(model_name) | |
tokenizer = GPT2Tokenizer.from_pretrained(model_name) | |
st.write("Model and tokenizer loaded.") | |
def generate_blogpost(topic): | |
try: | |
inputs = tokenizer.encode(topic, return_tensors='pt') | |
attention_mask = tokenizer.encode_plus(topic, return_tensors='pt')['attention_mask'] | |
outputs = model.generate( | |
inputs, | |
attention_mask=attention_mask, | |
max_length=500, | |
num_return_sequences=1, | |
pad_token_id=tokenizer.eos_token_id | |
) | |
text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return text | |
except Exception as e: | |
return f"Error: {e}" | |
# Streamlit app | |
st.title('Blog Post Generator') | |
topic = st.text_input('Enter a topic:') | |
if topic: | |
st.write("Generating blog post...") | |
blogpost = generate_blogpost(topic) | |
st.write(blogpost) | |