Spaces:
Sleeping
Sleeping
File size: 1,087 Bytes
8da6985 5eebd83 8da6985 6a25254 5eebd83 5adef75 5eebd83 5adef75 8da6985 6a25254 5adef75 6a25254 5adef75 6a25254 5adef75 6a25254 f2d9be5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
import streamlit as st
from transformers import GPT2Tokenizer, GPT2LMHeadModel
# Define model and tokenizer
model_name = 'gpt2-large'
st.write("Loading model and tokenizer...")
model = GPT2LMHeadModel.from_pretrained(model_name)
tokenizer = GPT2Tokenizer.from_pretrained(model_name)
st.write("Model and tokenizer loaded.")
def generate_blogpost(topic):
try:
inputs = tokenizer.encode(topic, return_tensors='pt')
attention_mask = tokenizer.encode_plus(topic, return_tensors='pt')['attention_mask']
outputs = model.generate(
inputs,
attention_mask=attention_mask,
max_length=500,
num_return_sequences=1,
pad_token_id=tokenizer.eos_token_id
)
text = tokenizer.decode(outputs[0], skip_special_tokens=True)
return text
except Exception as e:
return f"Error: {e}"
# Streamlit app
st.title('Blog Post Generator')
topic = st.text_input('Enter a topic:')
if topic:
st.write("Generating blog post...")
blogpost = generate_blogpost(topic)
st.write(blogpost)
|