Spaces:
Sleeping
Sleeping
File size: 1,524 Bytes
ea4634d af09235 ea4634d af09235 ea4634d af09235 ea4634d af09235 ea4634d af09235 ea4634d af09235 ea4634d af09235 ea4634d af09235 ea4634d af09235 ea4634d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
from pymongo import MongoClient
import pandas as pd
from transformers import pipeline
import streamlit as st
#### **1. MongoDB Connection**
def get_mongo_client():
client = MongoClient("mongodb+srv://groupA:[email protected]/?retryWrites=true&w=majority&appName=SentimentCluster")
db = client["sentiment_db"]
return db["tweets"]
collection = get_mongo_client()
#### **2. Load Dataset from Hugging Face**
csv_url = "https://huggingface.co/spaces/sharangrav24/SentimentAnalysis/resolve/main/sentiment140.csv"
df = pd.read_csv(csv_url)
#### **3. Sentiment Analysis using BERT-ROBERTA**
sentiment_pipeline = pipeline("sentiment-analysis", model="cardiffnlp/twitter-roberta-base-sentiment")
# Function to analyze sentiment
def analyze_sentiment(text):
return sentiment_pipeline(text)[0]['label']
df["sentiment"] = df["text"].apply(analyze_sentiment)
#### **4. Upload Data to MongoDB**
# Convert DataFrame to dictionary and upload to MongoDB
collection.delete_many({}) # Optional: Clear existing data before inserting
collection.insert_many(df.to_dict("records"))
#### **5. Build Streamlit Dashboard**
st.title("Sentiment Analysis Dashboard")
# Show first 5 rows from MongoDB
st.subheader("First 5 Rows from Database")
data = list(collection.find({}, {"_id": 0}).limit(5))
st.write(pd.DataFrame(data))
if st.button("Show Complete Data"):
st.write(df)
if st.button("Show MongoDB Data"):
data = list(collection.find({}, {"_id": 0}))
st.write(pd.DataFrame(data))
|