File size: 1,544 Bytes
246bea3
 
 
 
 
 
 
 
8a7d4b9
 
246bea3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import streamlit as st
import torch
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import os
from zipfile import ZipFile

# Streamlit UI for uploading model
st.title("Text Summarizer")

uploaded_file = st.file_uploader("Upload the saved model.zip file", type="zip")

if uploaded_file is not None:
    # Extract the uploaded zip file
    with ZipFile(uploaded_file, 'r') as zip_ref:
        zip_ref.extractall("model_directory")

    # Load the model from the extracted directory
    try:
        model_path = "model_directory"
        tokenizer = AutoTokenizer.from_pretrained(model_path)
        model = AutoModelForSeq2SeqLM.from_pretrained(model_path)
        st.success("Model loaded successfully!")
    except Exception as e:
        st.error(f"Failed to load model: {e}")

# Text area for input
text = st.text_area("Enter the text to generate its Summary:")

# Configuration for generation
generation_config = {'max_length': 100, 'do_sample': True, 'temperature': 0.7}

if text:
    try:
        # Encode input
        inputs_encoded = tokenizer(text, return_tensors='pt')

        # Generate output
        with torch.no_grad():
            model_output = model.generate(inputs_encoded["input_ids"], **generation_config)[0]

        # Decode output
        output = tokenizer.decode(model_output, skip_special_tokens=True)

        # Display results
        with st.expander("Output", expanded=True):
            st.write(output)

    except Exception as e:
        st.error(f"An error occurred during summarization: {e}")