Spaces:
Sleeping
Sleeping
File size: 808 Bytes
8da6985 5eebd83 8da6985 6a25254 5eebd83 8da6985 6a25254 f2d9be5 6a25254 f2d9be5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 |
import streamlit as st
from transformers import GPT2Tokenizer, GPT2LMHeadModel
# Define model and tokenizer
model_name = 'gpt2-large'
model = GPT2LMHeadModel.from_pretrained(model_name)
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
def generate_blogpost(topic):
inputs = tokenizer.encode(topic, return_tensors='pt')
attention_mask = tokenizer.encode_plus(topic, return_tensors='pt')['attention_mask']
outputs = model.generate(inputs, attention_mask=attention_mask, max_length=500, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id)
text = tokenizer.decode(outputs[0], skip_special_tokens=True)
return text
# Streamlit app
st.title('Blog Post Generator')
topic = st.text_input('Enter a topic:')
if topic:
blogpost = generate_blogpost(topic)
st.write(blogpost)
|