MrGanesh commited on
Commit
19a3228
·
1 Parent(s): d3cfb6c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -18
app.py CHANGED
@@ -1,26 +1,81 @@
1
- import streamlit
2
- import pandas as pd
3
- #import torch
4
- from transformers import pipeline
5
  import streamlit as st
 
 
6
 
 
 
7
 
8
- def app():
9
- st.title("Text Summarization 🤓")
10
 
11
- st.markdown("This is a Web application that Summarizes Text 😎")
12
- text=st.text_area('Enter Text')
13
-
14
- summarizer = pipeline("summarization", model="google/bigbird-pegasus-large-bigpatent"
 
 
 
 
 
 
 
 
 
 
15
 
16
- # Check to see if a file has been uploaded
17
- if text:
18
- st.success("Summarizing Text, Please wait...")
19
- # If it has then do the following:
20
- out=summarizer(text,min_length=100, max_length=400)
21
- st.json(out)
22
 
 
 
 
 
23
 
 
24
 
25
- if __name__ == "__main__":
26
- app()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
 
 
 
2
  import streamlit as st
3
+ from transformers import BartTokenizer, BartForConditionalGeneration
4
+ from transformers import T5Tokenizer, T5ForConditionalGeneration
5
 
6
+ st.title('Text Summarization Demo')
7
+ st.markdown('Using BART and T5 transformer model')
8
 
9
+ model = st.selectbox('Select the model', ('BART', 'T5'))
 
10
 
11
+ if model == 'BART':
12
+ _num_beams = 4
13
+ _no_repeat_ngram_size = 3
14
+ _length_penalty = 1
15
+ _min_length = 12
16
+ _max_length = 128
17
+ _early_stopping = True
18
+ else:
19
+ _num_beams = 4
20
+ _no_repeat_ngram_size = 3
21
+ _length_penalty = 2
22
+ _min_length = 30
23
+ _max_length = 200
24
+ _early_stopping = True
25
 
26
+ col1, col2, col3 = st.beta_columns(3)
27
+ _num_beams = col1.number_input("num_beams", value=_num_beams)
28
+ _no_repeat_ngram_size = col2.number_input("no_repeat_ngram_size", value=_no_repeat_ngram_size)
29
+ _length_penalty = col3.number_input("length_penalty", value=_length_penalty)
 
 
30
 
31
+ col1, col2, col3 = st.beta_columns(3)
32
+ _min_length = col1.number_input("min_length", value=_min_length)
33
+ _max_length = col2.number_input("max_length", value=_max_length)
34
+ _early_stopping = col3.number_input("early_stopping", value=_early_stopping)
35
 
36
+ text = st.text_area('Text Input')
37
 
38
+
39
+ def run_model(input_text):
40
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
41
+
42
+ if model == "BART":
43
+ bart_model = BartForConditionalGeneration.from_pretrained("facebook/bart-base")
44
+ bart_tokenizer = BartTokenizer.from_pretrained("facebook/bart-base")
45
+ input_text = str(input_text)
46
+ input_text = ' '.join(input_text.split())
47
+ input_tokenized = bart_tokenizer.encode(input_text, return_tensors='pt').to(device)
48
+ summary_ids = bart_model.generate(input_tokenized,
49
+ num_beams=_num_beams,
50
+ no_repeat_ngram_size=_no_repeat_ngram_size,
51
+ length_penalty=_length_penalty,
52
+ min_length=_min_length,
53
+ max_length=_max_length,
54
+ early_stopping=_early_stopping)
55
+
56
+ output = [bart_tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g insummary_ids]
57
+ st.write('Summary')
58
+ st.success(output[0])
59
+
60
+ else:
61
+ t5_model = T5ForConditionalGeneration.from_pretrained("t5-base")
62
+ t5_tokenizer = T5Tokenizer.from_pretrained("t5-base")
63
+ input_text = str(input_text).replace('\n', '')
64
+ input_text = ' '.join(input_text.split())
65
+ input_tokenized = t5_tokenizer.encode(input_text, return_tensors="pt").to(device)
66
+ summary_task = torch.tensor([[21603, 10]]).to(device)
67
+ input_tokenized = torch.cat([summary_task, input_tokenized], dim=-1).to(device)
68
+ summary_ids = t5_model.generate(input_tokenized,
69
+ num_beams=_num_beams,
70
+ no_repeat_ngram_size=_no_repeat_ngram_size,
71
+ length_penalty=_length_penalty,
72
+ min_length=_min_length,
73
+ max_length=_max_length,
74
+ early_stopping=_early_stopping)
75
+ output = [t5_tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids]
76
+ st.write('Summary')
77
+ st.success(output[0])
78
+
79
+
80
+ if st.button('Submit'):
81
+ run_model(text)