Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import GPT2Tokenizer, GPT2LMHeadModel | |
# Define model and tokenizer | |
model_name = 'gpt2-large' | |
model = GPT2LMHeadModel.from_pretrained(model_name) | |
tokenizer = GPT2Tokenizer.from_pretrained(model_name) | |
def generate_blogpost(topic): | |
inputs = tokenizer.encode(topic, return_tensors='pt') | |
attention_mask = tokenizer.encode_plus(topic, return_tensors='pt')['attention_mask'] | |
outputs = model.generate(inputs, attention_mask=attention_mask, max_length=500, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id) | |
text = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
return text | |
# Streamlit app | |
st.title('Blog Post Generator') | |
topic = st.text_input('Enter a topic:') | |
if topic: | |
blogpost = generate_blogpost(topic) | |
st.write(blogpost) | |