Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import Flask, request, render_template
|
2 |
+
import pandas as pd
|
3 |
+
import spacy
|
4 |
+
from transformers import pipeline
|
5 |
+
|
6 |
+
# Initialize Flask app
|
7 |
+
app = Flask(__name__)
|
8 |
+
|
9 |
+
# Load spaCy model for preprocessing
|
10 |
+
nlp = spacy.load("en_core_web_sm")
|
11 |
+
|
12 |
+
# Load Hugging Face pipelines
|
13 |
+
sentiment_pipeline = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
|
14 |
+
ner_pipeline = pipeline("ner", model="dbmdz/bert-large-cased-finetuned-conll03-english", aggregation_strategy="simple")
|
15 |
+
|
16 |
+
# Function to preprocess text
|
17 |
+
def preprocess_text(text):
|
18 |
+
doc = nlp(text)
|
19 |
+
tokens = [token.lemma_.lower() for token in doc if not token.is_stop and not token.is_punct]
|
20 |
+
return ' '.join(tokens)
|
21 |
+
|
22 |
+
@app.route('/')
|
23 |
+
def home():
|
24 |
+
return render_template('index.html')
|
25 |
+
|
26 |
+
@app.route('/analyze', methods=['POST'])
|
27 |
+
def analyze():
|
28 |
+
if request.method == 'POST':
|
29 |
+
comments = request.form['comments']
|
30 |
+
cleaned_comments = preprocess_text(comments)
|
31 |
+
|
32 |
+
# Analyze sentiment
|
33 |
+
sentiment_result = sentiment_pipeline(cleaned_comments)[0]
|
34 |
+
|
35 |
+
# Analyze entities
|
36 |
+
entities_result = ner_pipeline(cleaned_comments)
|
37 |
+
|
38 |
+
# Prepare results for rendering
|
39 |
+
result = {
|
40 |
+
'original_comment': comments,
|
41 |
+
'cleaned_comment': cleaned_comments,
|
42 |
+
'sentiment': sentiment_result,
|
43 |
+
'entities': entities_result
|
44 |
+
}
|
45 |
+
|
46 |
+
return render_template('result.html', result=result)
|
47 |
+
|
48 |
+
if __name__ == '__main__':
|
49 |
+
app.run(debug=True)
|