import streamlit as st import torch from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, GenerationConfig from pydrive2.auth import GoogleAuth from pydrive2.drive import GoogleDrive # Authenticate and create the PyDrive client. gauth = GoogleAuth() gauth.LocalWebserverAuth() # Creates a local webserver and automatically handles authentication. drive = GoogleDrive(gauth) # Update this path to your local path where the model is stored model_path = '/content/drive/My Drive/bart-base' try: tokenizer = AutoTokenizer.from_pretrained(model_path) model = AutoModelForSeq2SeqLM.from_pretrained(model_path) except Exception as e: st.error(f"Failed to load model: {e}") # Streamlit UI st.title("Text Summarizer") text = st.text_area("Enter the text to generate its Summary:") # Configuration for generation generation_config = GenerationConfig(max_new_tokens=100, do_sample=True, temperature=0.7) if text: try: # Encode input inputs_encoded = tokenizer(text, return_tensors='pt') # Generate output with torch.no_grad(): model_output = model.generate(inputs_encoded["input_ids"], generation_config=generation_config)[0] # Decode output output = tokenizer.decode(model_output, skip_special_tokens=True) # Display results in a box with a title with st.expander("Output", expanded=True): st.write(output) except Exception as e: st.error(f"An error occurred during summarization: {e}")