|
import streamlit as st |
|
import torch |
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM |
|
import os |
|
from zipfile import ZipFile |
|
|
|
|
|
st.title("Text Summarizer") |
|
|
|
uploaded_file = st.file_uploader("Upload the saved model.zip file", type="zip") |
|
|
|
if uploaded_file is not None: |
|
|
|
with ZipFile(uploaded_file, 'r') as zip_ref: |
|
zip_ref.extractall("model_directory") |
|
|
|
|
|
try: |
|
model_path = "model_directory" |
|
tokenizer = AutoTokenizer.from_pretrained(model_path) |
|
model = AutoModelForSeq2SeqLM.from_pretrained(model_path) |
|
st.success("Model loaded successfully!") |
|
except Exception as e: |
|
st.error(f"Failed to load model: {e}") |
|
|
|
|
|
text = st.text_area("Enter the text to generate its Summary:") |
|
|
|
|
|
generation_config = {'max_length': 100, 'do_sample': True, 'temperature': 0.7} |
|
|
|
if text: |
|
try: |
|
|
|
inputs_encoded = tokenizer(text, return_tensors='pt') |
|
|
|
|
|
with torch.no_grad(): |
|
model_output = model.generate(inputs_encoded["input_ids"], **generation_config)[0] |
|
|
|
|
|
output = tokenizer.decode(model_output, skip_special_tokens=True) |
|
|
|
|
|
with st.expander("Output", expanded=True): |
|
st.write(output) |
|
|
|
except Exception as e: |
|
st.error(f"An error occurred during summarization: {e}") |