Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import praw
|
2 |
+
import gradio as gr
|
3 |
+
from transformers import TextClassificationPipeline, AutoModelForSequenceClassification, AutoTokenizer
|
4 |
+
|
5 |
+
|
6 |
+
reddit = praw.Reddit(client_id =client_id,
|
7 |
+
client_secret =client_secret, user_agent =user_agent)
|
8 |
+
|
9 |
+
|
10 |
+
model_name = "ProsusAI/finbert"
|
11 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
|
12 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels = 3)
|
13 |
+
pipe = TextClassificationPipeline(model=model, tokenizer=tokenizer, max_length=64, truncation=True, padding = 'max_length')
|
14 |
+
|
15 |
+
|
16 |
+
def reddit_analysis(subreddit_name, num_posts):
|
17 |
+
|
18 |
+
local_score = 0
|
19 |
+
local_titles = []
|
20 |
+
subreddit = reddit.subreddit(subreddit_name)
|
21 |
+
for post in subreddit.new(limit=int(num_posts)):
|
22 |
+
|
23 |
+
prediction = pipe(post.title)
|
24 |
+
local_titles.append(post.title)
|
25 |
+
|
26 |
+
if prediction[0]["label"] == "negative":
|
27 |
+
local_score-= prediction[0]["score"]
|
28 |
+
elif prediction[0]["label"] == "positive":
|
29 |
+
local_score+= prediction[0]["score"]
|
30 |
+
|
31 |
+
titles_string = "\n".join(local_titles)
|
32 |
+
|
33 |
+
return local_score, titles_string
|
34 |
+
|
35 |
+
#print(post.title)
|
36 |
+
#print(post.selftext)
|
37 |
+
|
38 |
+
|
39 |
+
|
40 |
+
total_score = 0
|
41 |
+
text_list = []
|
42 |
+
def manual_analysis(text):
|
43 |
+
|
44 |
+
global total_score
|
45 |
+
prediction = pipe(text)
|
46 |
+
|
47 |
+
text_list.append(text)
|
48 |
+
if prediction[0]["label"] == "negative":
|
49 |
+
total_score-= prediction[0]["score"]
|
50 |
+
elif prediction[0]["label"] == "positive":
|
51 |
+
total_score+= prediction[0]["score"]
|
52 |
+
|
53 |
+
return prediction, total_score
|
54 |
+
|
55 |
+
|
56 |
+
with gr.Blocks() as demo:
|
57 |
+
with gr.Tab("Seperate Analysis"):
|
58 |
+
first_title = """<p><h1 align="center" style="font-size: 24px;">Analyse texts manually</h1></p>"""
|
59 |
+
gr.HTML(first_title)
|
60 |
+
with gr.Row():
|
61 |
+
with gr.Column():
|
62 |
+
text = gr.Textbox(label="text")
|
63 |
+
analyse = gr.Button("Analyse")
|
64 |
+
|
65 |
+
|
66 |
+
with gr.Column():
|
67 |
+
label_score = gr.Textbox(label="Label/Score")
|
68 |
+
average_score = gr.Textbox(label="Average Score")
|
69 |
+
|
70 |
+
analyse.click(fn=manual_analysis, inputs=text, outputs=[label_score, average_score], api_name="Calc1")
|
71 |
+
|
72 |
+
with gr.Tab("Mass Analysis"):
|
73 |
+
second_title = """<p><h1 align="center" style="font-size: 24px;">Analyse latest posts from Reddit</h1></p>"""
|
74 |
+
gr.HTML(second_title)
|
75 |
+
with gr.Row():
|
76 |
+
with gr.Column():
|
77 |
+
subreddit_name = gr.Textbox(label="Subreddit Name")
|
78 |
+
|
79 |
+
num_post = gr.Textbox(label="Number of Posts")
|
80 |
+
analyse = gr.Button("Analyse")
|
81 |
+
with gr.Column():
|
82 |
+
average_score = gr.Textbox(label="Average Score")
|
83 |
+
tifu_titles = gr.Textbox(label="Tifu Titles")
|
84 |
+
|
85 |
+
analyse.click(fn=reddit_analysis, inputs=[subreddit_name, num_post], outputs=[average_score, tifu_titles], api_name="Calc2")
|
86 |
+
|
87 |
+
|
88 |
+
|
89 |
+
demo.launch()
|