Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
# Load the tokenizer and model | |
tokenizer = AutoTokenizer.from_pretrained("suriya7/bart-finetuned-text-summarization") | |
model = AutoModelForSeq2SeqLM.from_pretrained("suriya7/bart-finetuned-text-summarization") | |
def generate_summary(text): | |
inputs = tokenizer([text], max_length=1024, return_tensors='pt', truncation=True) | |
summary_ids = model.generate(inputs['input_ids'], max_new_tokens=100, do_sample=False) | |
summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True) | |
# Post-process the summary to include only specific points | |
important_points = extract_important_points(summary) | |
return important_points | |
def extract_important_points(summary): | |
# This is a very basic example of how you might filter for key phrases | |
# You can refine this with more sophisticated NLP techniques | |
keywords = ["change", "request", "important", "needs", "must", "critical", "required", "suggested"] | |
filtered_lines = [line for line in summary.split('. ') if any(keyword in line.lower() for keyword in keywords)] | |
return '. '.join(filtered_lines) | |
# Streamlit interface | |
st.title("Text Summarization App") | |
# User text input | |
user_input = st.text_area("Enter the text you want to summarize", height=200) | |
if st.button("Generate Summary"): | |
if user_input: | |
with st.spinner("Generating summary..."): | |
summary = generate_summary(user_input) | |
st.subheader("Filtered Summary:") | |
st.write(summary) | |
else: | |
st.warning("Please enter text to summarize.") | |
# Instructions for using the app | |
st.write("Enter your text in the box above and click 'Generate Summary' to get a summarized version of your text.") | |