Lauraayu commited on
Commit
10f3712
·
verified ·
1 Parent(s): 9a28330

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -26
app.py CHANGED
@@ -3,11 +3,17 @@ from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassifica
3
  import torch
4
 
5
  # Define the summarization pipeline
6
- summarizer_ntg = pipeline("text2text-generation", model="mrm8488/t5-base-finetuned-summarize-news")
 
 
 
7
 
8
  # Load the tokenizer and model for classification
9
- tokenizer_bb = AutoTokenizer.from_pretrained("Lauraayu/News_Classi_Model")
10
- model_bb = AutoModelForSequenceClassification.from_pretrained("Lauraayu/News_Classi_Model")
 
 
 
11
 
12
  # Streamlit application title
13
  st.title("News Article Summarizer and Classifier")
@@ -18,26 +24,32 @@ text = st.text_area("Enter the news article text here:")
18
 
19
  # Perform summarization and classification when the user clicks the "Classify" button
20
  if st.button("Classify"):
21
- # Perform text summarization
22
- summary = summarizer_ntg(text)[0]['summary_text']
23
-
24
- # Tokenize the summarized text
25
- inputs = tokenizer_bb(summary, return_tensors="pt", truncation=True, padding=True, max_length=512)
26
-
27
- # Move inputs and model to the same device (GPU or CPU)
28
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
29
- inputs = {k: v.to(device) for k, v in inputs.items()}
30
- model_bb.to(device)
31
-
32
- # Perform text classification
33
- with torch.no_grad():
34
- outputs = model_bb(**inputs)
35
-
36
- # Get the predicted label
37
- predicted_label_id = torch.argmax(outputs.logits, dim=-1).item()
38
- label_mapping = model_bb.config.id2label
39
- predicted_label = label_mapping[predicted_label_id]
40
-
41
- # Display the summary and classification result
42
- st.write("Summary:", summary)
43
- st.write("Category:", predicted_label)
 
 
 
 
 
 
 
3
  import torch
4
 
5
  # Define the summarization pipeline
6
+ try:
7
+ summarizer_ntg = pipeline("text2text-generation", model="mrm8488/t5-base-finetuned-summarize-news")
8
+ except Exception as e:
9
+ st.error(f"Error loading summarization model: {e}")
10
 
11
  # Load the tokenizer and model for classification
12
+ try:
13
+ tokenizer_bb = AutoTokenizer.from_pretrained("Lauraayu/News_Classi_Model")
14
+ model_bb = AutoModelForSequenceClassification.from_pretrained("Lauraayu/News_Classi_Model")
15
+ except Exception as e:
16
+ st.error(f"Error loading classification model or tokenizer: {e}")
17
 
18
  # Streamlit application title
19
  st.title("News Article Summarizer and Classifier")
 
24
 
25
  # Perform summarization and classification when the user clicks the "Classify" button
26
  if st.button("Classify"):
27
+ if not text:
28
+ st.error("Please enter some text to classify.")
29
+ else:
30
+ try:
31
+ # Perform text summarization
32
+ summary = summarizer_ntg(text)[0]['summary_text']
33
+
34
+ # Tokenize the summarized text
35
+ inputs = tokenizer_bb(summary, return_tensors="pt", truncation=True, padding=True, max_length=512)
36
+
37
+ # Move inputs and model to the same device (GPU or CPU)
38
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
39
+ inputs = {k: v.to(device) for k, v in inputs.items()}
40
+ model_bb.to(device)
41
+
42
+ # Perform text classification
43
+ with torch.no_grad():
44
+ outputs = model_bb(**inputs)
45
+
46
+ # Get the predicted label
47
+ predicted_label_id = torch.argmax(outputs.logits, dim=-1).item()
48
+ label_mapping = model_bb.config.id2label
49
+ predicted_label = label_mapping[predicted_label_id]
50
+
51
+ # Display the summary and classification result
52
+ st.write("Summary:", summary)
53
+ st.write("Category:", predicted_label)
54
+ except Exception as e:
55
+ st.error(f"Error during summarization or classification: {e}")