File size: 2,000 Bytes
246bea3
 
 
be13547
 
246bea3
be13547
246bea3
 
be13547
 
 
 
8a7d4b9
be13547
 
246bea3
be13547
 
246bea3
be13547
246bea3
be13547
 
 
 
 
 
 
 
246bea3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
be13547
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import streamlit as st
import torch
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
import os
from io import BytesIO
from zipfile import ZipFile

# Initialize Google Auth and Drive
gauth = GoogleAuth()
gauth.LocalWebserverAuth()  # Authenticates and opens a browser window
drive = GoogleDrive(gauth)

# Streamlit UI
st.title("Text Summarizer")

# Enter the file ID of your model.zip on Google Drive
model_file_id = st.text_input("Enter the Google Drive file ID of the model.zip")

if model_file_id:
    try:
        # Download the file from Google Drive
        downloaded = drive.CreateFile({'id': model_file_id}).GetContentString()
        
        # Load the model from the downloaded zip file
        with ZipFile(BytesIO(downloaded.encode()), 'r') as zip_ref:
            zip_ref.extractall("model_directory")

        # Load the model from the extracted directory
        model_path = "model_directory"
        tokenizer = AutoTokenizer.from_pretrained(model_path)
        model = AutoModelForSeq2SeqLM.from_pretrained(model_path)
        st.success("Model loaded successfully!")
    except Exception as e:
        st.error(f"Failed to load model: {e}")

# Text area for input
text = st.text_area("Enter the text to generate its Summary:")

# Configuration for generation
generation_config = {'max_length': 100, 'do_sample': True, 'temperature': 0.7}

if text:
    try:
        # Encode input
        inputs_encoded = tokenizer(text, return_tensors='pt')

        # Generate output
        with torch.no_grad():
            model_output = model.generate(inputs_encoded["input_ids"], **generation_config)[0]

        # Decode output
        output = tokenizer.decode(model_output, skip_special_tokens=True)

        # Display results
        with st.expander("Output", expanded=True):
            st.write(output)

    except Exception as e:
        st.error(f"An error occurred during summarization: {e}")