First Commit
Browse files- app.py +86 -0
- requirements.txt +5 -0
app.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import datetime
|
3 |
+
import pandas as pd
|
4 |
+
from pygooglenews import GoogleNews
|
5 |
+
from transformers import pipeline
|
6 |
+
import plotly.graph_objects as go
|
7 |
+
|
8 |
+
# Load the sentiment analysis model
|
9 |
+
pipe = pipeline("text-classification", model="pramudyalyza/bert-indonesian-finetuned-news")
|
10 |
+
|
11 |
+
# Function to process the keyword and get sentiment analysis
|
12 |
+
def process_keyword(keyword):
|
13 |
+
one_week_ago = datetime.datetime.now() - datetime.timedelta(days=7)
|
14 |
+
gn = GoogleNews(lang='id', country='ID')
|
15 |
+
search = gn.search(keyword)
|
16 |
+
|
17 |
+
filtered_headlines = []
|
18 |
+
for entry in search['entries']:
|
19 |
+
published_date = datetime.datetime.strptime(entry['published'], '%a, %d %b %Y %H:%M:%S %Z')
|
20 |
+
if published_date > one_week_ago:
|
21 |
+
filtered_headlines.append(entry['title'])
|
22 |
+
|
23 |
+
df = pd.DataFrame(filtered_headlines, columns=['title'])
|
24 |
+
df_clean = df.drop_duplicates()
|
25 |
+
|
26 |
+
df_clean['sentiment'] = df_clean['title'].apply(lambda x: pipe(x)[0]['label'])
|
27 |
+
|
28 |
+
positive_count = (df_clean['sentiment'] == 'Positive').sum()
|
29 |
+
negative_count = (df_clean['sentiment'] == 'Negative').sum()
|
30 |
+
total_count = len(df_clean)
|
31 |
+
|
32 |
+
return positive_count, negative_count, total_count, df_clean
|
33 |
+
|
34 |
+
# Streamlit app layout
|
35 |
+
st.title("News Sentiment Analysis Dashboard")
|
36 |
+
|
37 |
+
keyword_input = st.text_input("Enter a keyword to search for news", placeholder="Type a keyword...")
|
38 |
+
|
39 |
+
if st.button("Analyze"):
|
40 |
+
if keyword_input:
|
41 |
+
positive_count, negative_count, total_count, df_clean = process_keyword(keyword_input)
|
42 |
+
|
43 |
+
# Create plots
|
44 |
+
fig_positive = go.Figure(go.Indicator(
|
45 |
+
mode="gauge+number",
|
46 |
+
value=positive_count,
|
47 |
+
title={'text': "Positive Sentiment"},
|
48 |
+
gauge={'axis': {'range': [0, total_count]},
|
49 |
+
'bar': {'color': "green"}}
|
50 |
+
))
|
51 |
+
|
52 |
+
fig_negative = go.Figure(go.Indicator(
|
53 |
+
mode="gauge+number",
|
54 |
+
value=negative_count,
|
55 |
+
title={'text': "Negative Sentiment"},
|
56 |
+
gauge={'axis': {'range': [0, total_count]},
|
57 |
+
'bar': {'color': "red"}}
|
58 |
+
))
|
59 |
+
|
60 |
+
fig_donut = go.Figure(go.Pie(
|
61 |
+
labels=['Positive', 'Negative'],
|
62 |
+
values=[positive_count, negative_count],
|
63 |
+
hole=0.5,
|
64 |
+
marker=dict(colors=['green', 'red'])
|
65 |
+
))
|
66 |
+
fig_donut.update_layout(title_text='Sentiment Distribution')
|
67 |
+
|
68 |
+
# Display results
|
69 |
+
st.plotly_chart(fig_positive)
|
70 |
+
st.plotly_chart(fig_negative)
|
71 |
+
st.plotly_chart(fig_donut)
|
72 |
+
st.write(f"News articles found: {total_count}")
|
73 |
+
|
74 |
+
# Show DataFrame
|
75 |
+
st.dataframe(df_clean)
|
76 |
+
|
77 |
+
# Download CSV
|
78 |
+
csv = df_clean.to_csv(index=False).encode('utf-8')
|
79 |
+
st.download_button(
|
80 |
+
label="Download CSV",
|
81 |
+
data=csv,
|
82 |
+
file_name='news_sentiment_analysis.csv',
|
83 |
+
mime='text/csv',
|
84 |
+
)
|
85 |
+
else:
|
86 |
+
st.error("Please enter a keyword.")
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
pandas==2.2.3
|
2 |
+
plotly==5.24.1
|
3 |
+
pygooglenews==0.1.2
|
4 |
+
streamlit==1.37.1
|
5 |
+
transformers==4.36.2
|