Spaces:
Sleeping
Sleeping
File size: 3,260 Bytes
6b672ec 3ca9914 0167289 e9f2b62 0167289 e9f2b62 0167289 e9f2b62 0167289 7192351 0167289 e9f2b62 6557c8b 7192351 e9f2b62 7192351 0167289 e9f2b62 f7687c4 e9f2b62 6557c8b 5f8cfa2 0167289 6557c8b 5f8cfa2 0167289 e9f2b62 0167289 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 |
import streamlit as st
from transformers import pipeline
available_models = [
"facebook/t5-small",
"facebook/t5-base",
"facebook/t5-large",
"google/pegasus-xsum",
"google/pegasus-cnn_dailymail",
"sshleifer/distilbart-cnn-12-6",
"allenai/led-base-16384",
"google/mt5-small",
"google/mt5-base",
# Add more models as needed
]
@st.cache_resource
def load_summarizer(model_name):
"""Loads the summarization pipeline for a given model from Hugging Face."""
try:
summarizer = pipeline("summarization", model=model_name)
return summarizer
except Exception as e:
st.error(f"Error loading model {model_name}: {e}")
return None
st.title("Hugging Face Text Summarization App")
text_to_summarize = st.text_area("Enter text to summarize:", height=300)
selected_model = st.selectbox("Choose a summarization model from Hugging Face:", available_models)
st.sidebar.header("Summarization Parameters")
max_length = st.sidebar.slider("Max Summary Length:", min_value=50, max_value=500, value=150)
min_length = st.sidebar.slider("Min Summary Length:", min_value=10, max_value=250, value=30)
temperature = st.sidebar.slider("Temperature (for sampling):", min_value=0.0, max_value=1.0, value=0.0, step=0.01, help="Higher values make the output more random.")
repetition_penalty = st.sidebar.slider("Repetition Penalty:", min_value=1.0, max_value=2.5, value=1.0, step=0.01, help="Penalizes repeated tokens to improve coherence.")
num_beams = st.sidebar.slider("Number of Beams (for beam search):", min_value=1, max_value=10, value=1, help="More beams improve quality but increase computation.")
do_sample = st.sidebar.checkbox("Enable Sampling?", value=False, help="Whether to use sampling; set to False for deterministic output.")
if st.button("Summarize"):
if text_to_summarize:
summarizer = load_summarizer(selected_model)
if summarizer:
with st.spinner(f"Summarizing using {selected_model}..."):
try:
summary = summarizer(
text_to_summarize,
max_length=max_length,
min_length=min_length,
do_sample=do_sample,
temperature=temperature if do_sample else None,
repetition_penalty=repetition_penalty,
num_beams=num_beams if not do_sample else 1, # Beam search is usually not used with sampling
early_stopping=True,
)[0]['summary_text']
st.subheader("Summary:")
st.write(summary)
except Exception as e:
st.error(f"Error during summarization: {e}")
else:
st.warning("Failed to load the selected model.")
else:
st.warning("Please enter some text to summarize.")
st.sidebar.header("About")
st.sidebar.info(
"This app uses the `transformers` library from Hugging Face "
"to perform text summarization. You can select from a variety of "
"pre-trained models available on the Hugging Face Model Hub. "
"Experiment with the parameters in the sidebar to control the "
"summarization process."
) |