Spaces:
Sleeping
Sleeping
| import os | |
| import streamlit as st | |
| import google.generativeai as genai | |
| from transformers import pipeline, AutoModelForSequenceClassification, AutoTokenizer | |
| from keybert import KeyBERT # Topic Extraction | |
| # π Fetch API key from Hugging Face Secrets | |
| GEMINI_API_KEY = os.getenv("gemini_api") | |
| if GEMINI_API_KEY: | |
| genai.configure(api_key=GEMINI_API_KEY) | |
| else: | |
| st.error("β οΈ Google API key is missing! Set it in Hugging Face Secrets.") | |
| # Correct Model Path | |
| MODEL_NAME = "cardiffnlp/twitter-roberta-base-sentiment" | |
| # Load Sentiment Analysis Model | |
| try: | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) | |
| sentiment_pipeline = pipeline("sentiment-analysis", model=MODEL_NAME, tokenizer=tokenizer) | |
| except Exception as e: | |
| st.error(f"β Error loading sentiment model: {e}") | |
| # Load KeyBERT for topic extraction | |
| kw_model = KeyBERT() | |
| # Function to analyze sentiment | |
| def analyze_sentiment(text): | |
| try: | |
| sentiment_result = sentiment_pipeline(text)[0] | |
| label = sentiment_result['label'] # Extract sentiment label (POSITIVE, NEGATIVE, NEUTRAL) | |
| score = sentiment_result['score'] # Extract confidence score | |
| # Convert labels to readable format | |
| sentiment_mapping = { | |
| "LABEL_0": "Negative", | |
| "LABEL_1": "Neutral", | |
| "LABEL_2": "Positive" | |
| } | |
| return sentiment_mapping.get(label, "Unknown"), score | |
| except Exception as e: | |
| return f"Error analyzing sentiment: {e}", None | |
| # Function to extract key topics | |
| def extract_topics(text, num_keywords=3): | |
| try: | |
| keywords = kw_model.extract_keywords(text, keyphrase_ngram_range=(1, 2), top_n=num_keywords) | |
| return [word[0] for word in keywords] # Return only the keywords | |
| except Exception as e: | |
| return [f"Error extracting topics: {e}"] | |
| # Function to generate AI response, analyze sentiment, and extract topics | |
| def chatbot_response(user_prompt): | |
| if not user_prompt: | |
| return None, None, None, None | |
| try: | |
| # AI Response from Gemini | |
| model = genai.GenerativeModel("gemini-1.5-pro") | |
| ai_response = model.generate_content(user_prompt) | |
| # Sentiment Analysis | |
| sentiment_label, confidence = analyze_sentiment(user_prompt) | |
| # Topic Extraction | |
| topics = extract_topics(user_prompt) | |
| return ai_response.text, sentiment_label, confidence, topics | |
| except Exception as e: | |
| return f"β Error: {e}", None, None, None | |