waqasali1707 commited on
Commit
2650d25
·
verified ·
1 Parent(s): 3d39d76

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -49
app.py DELETED
@@ -1,49 +0,0 @@
1
- import streamlit as st
2
- import torch
3
- from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
- import os
5
- from zipfile import ZipFile
6
-
7
- # Streamlit UI for uploading model
8
- st.title("Text Summarizer")
9
- uploaded_file = st.file_uploader("bart-base.zip", type="zip")
10
-
11
- if uploaded_file is not None:
12
- # Extract the uploaded zip file
13
- with ZipFile(uploaded_file, 'r') as zip_ref:
14
- zip_ref.extractall("model_directory")
15
-
16
- # Load the model from the extracted directory
17
- try:
18
- model_path = "model_directory"
19
- tokenizer = AutoTokenizer.from_pretrained(model_path)
20
- model = AutoModelForSeq2SeqLM.from_pretrained(model_path)
21
- st.success("Model loaded successfully!")
22
- except Exception as e:
23
- st.error(f"Failed to load model: {e}")
24
-
25
- # Text area for input
26
- text = st.text_area("Enter the text to generate its Summary:")
27
-
28
- # Configuration for generation
29
- generation_config = {'max_length': 100, 'do_sample': True, 'temperature': 0.7}
30
-
31
- if text:
32
- try:
33
- # Encode input
34
- inputs_encoded = tokenizer(text, return_tensors='pt')
35
-
36
- # Generate output
37
- with torch.no_grad():
38
- model_output = model.generate(inputs_encoded["input_ids"], **generation_config)[0]
39
-
40
- # Decode output
41
- output = tokenizer.decode(model_output, skip_special_tokens=True)
42
-
43
- # Display results
44
- with st.expander("Output", expanded=True):
45
- st.write(output)
46
-
47
- except Exception as e:
48
- st.error(f"An error occurred during summarization: {e}")
49
-