Spaces:
Sleeping
Sleeping
import streamlit as st | |
from transformers import pipeline | |
# Assume you have fine-tuned models and their names are listed here | |
available_models = [ | |
"facebook/t5-small", | |
"google/pegasus-xsum", | |
"sshleifer/distilbart-cnn-12-6", | |
"your_fine_tuned_news_model", # Replace with your fine-tuned model name | |
"your_fine_tuned_long_doc_model", # Replace with another fine-tuned model name | |
# Add more of your fine-tuned models here | |
] | |
def load_summarizer(model_name): | |
"""Loads the summarization pipeline for a given model.""" | |
try: | |
summarizer = pipeline("summarization", model=model_name) | |
return summarizer | |
except Exception as e: | |
st.error(f"Error loading model {model_name}: {e}") | |
return None | |
st.title("Advanced Text Summarization App") | |
text_to_summarize = st.text_area("Enter text to summarize:", height=300) | |
selected_model = st.selectbox("Choose a summarization model:", available_models) | |
# Parameters for controlling summarization | |
max_length = st.sidebar.slider("Max Summary Length:", min_value=50, max_value=500, value=150) | |
min_length = st.sidebar.slider("Min Summary Length:", min_value=10, max_value=250, value=30) | |
temperature = st.sidebar.slider("Temperature (for sampling):", min_value=0.0, max_value=1.0, value=0.0, step=0.01, help="Higher values make the output more random.") | |
repetition_penalty = st.sidebar.slider("Repetition Penalty:", min_value=1.0, max_value=2.5, value=1.0, step=0.01, help="Penalizes repeated tokens to improve coherence.") | |
num_beams = st.sidebar.slider("Number of Beams (for beam search):", min_value=1, max_value=10, value=1, help="More beams improve quality but increase computation.") | |
do_sample = st.sidebar.checkbox("Enable Sampling?", value=False, help="Whether to use sampling; set to False for deterministic output.") | |
if st.button("Summarize"): | |
if text_to_summarize: | |
summarizer = load_summarizer(selected_model) | |
if summarizer: | |
with st.spinner(f"Summarizing using {selected_model}..."): | |
try: | |
summary = summarizer( | |
text_to_summarize, | |
max_length=max_length, | |
min_length=min_length, | |
do_sample=do_sample, | |
temperature=temperature if do_sample else None, | |
repetition_penalty=repetition_penalty, | |
num_beams=num_beams if not do_sample else 1, # Beam search is usually not used with sampling | |
early_stopping=True, | |
)[0]['summary_text'] | |
st.subheader("Summary:") | |
st.write(summary) | |
except Exception as e: | |
st.error(f"Error during summarization: {e}") | |
else: | |
st.warning("Failed to load the selected model.") | |
else: | |
st.warning("Please enter some text to summarize.") | |
st.sidebar.header("About") | |
st.sidebar.info( | |
"This app uses the `transformers` library from Hugging Face " | |
"to perform text summarization. You can select from various " | |
"pre-trained and potentially fine-tuned models. Experiment with " | |
"the parameters in the sidebar to control the summarization process." | |
) |