|
import streamlit as st |
|
import torch |
|
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, GenerationConfig |
|
|
|
|
|
model_url = 'https://huggingface.co/your-username/your-model-name/resolve/main/' |
|
|
|
|
|
try: |
|
tokenizer = AutoTokenizer.from_pretrained(model_url) |
|
model = AutoModelForSeq2SeqLM.from_pretrained(model_url) |
|
except Exception as e: |
|
st.error(f"Failed to load model: {e}") |
|
|
|
|
|
st.title("Text Summarizer") |
|
text = st.text_area("Enter the text to generate its Summary:") |
|
|
|
|
|
generation_config = GenerationConfig(max_new_tokens=100, do_sample=True, temperature=0.7) |
|
|
|
if text: |
|
try: |
|
|
|
inputs_encoded = tokenizer(text, return_tensors='pt') |
|
|
|
|
|
with torch.no_grad(): |
|
model_output = model.generate(inputs_encoded["input_ids"], generation_config=generation_config)[0] |
|
|
|
|
|
output = tokenizer.decode(model_output, skip_special_tokens=True) |
|
|
|
|
|
with st.expander("Output", expanded=True): |
|
st.write(output) |
|
|
|
except Exception as e: |
|
st.error(f"An error occurred during summarization: {e}") |
|
|