CrystalMo commited on
Commit
0a84bfb
·
verified ·
1 Parent(s): 490f61c

Upload Mini_Project_1_Part_1.py

Browse files
Files changed (1) hide show
  1. Mini_Project_1_Part_1.py +475 -0
Mini_Project_1_Part_1.py ADDED
@@ -0,0 +1,475 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Mini Project 1 - Part 1: Getting Familiar with Word Embeddings.
2
+ # This assignment introduces students to text similarity measures using cosine similarity and sentence embeddings.
3
+ # Students will implement and compare different methods for computing and analyzing text similarity using GloVe and Sentence Transformers.
4
+
5
+ #Learning Objectives
6
+ #By the end of this assignment, students will:
7
+ #Understand how cosine similarity is used to measure text similarity.
8
+ #Learn to encode sentences using GloVe embeddings and Sentence Transformers.
9
+ #Compare the performance of different embedding techniques.
10
+ #Create a Web interface for your model
11
+
12
+ # Context: In this part, you are going to play around with some commonly used pretrained text embeddings for text search. For example, GloVe is an unsupervised learning algorithm for obtaining vector representations for words. Pretrained on
13
+ # 2 billion tweets with vocabulary size of 1.2 million. Download from [Stanford NLP](http://nlp.stanford.edu/data/glove.twitter.27B.zip).
14
+ # Jeffrey Pennington, Richard Socher, and Christopher D. Manning. 2014. *GloVe: Global Vectors for Word Representation*.
15
+
16
+
17
+ ### Import necessary libraries: here you will use streamlit library to run a text search demo, please make sure to install it.
18
+ import streamlit as st
19
+ import numpy as np
20
+ import numpy.linalg as la
21
+ import pickle
22
+ import os
23
+ import gdown
24
+ from sentence_transformers import SentenceTransformer
25
+ import matplotlib.pyplot as plt
26
+
27
+
28
+ ### Some predefined utility functions for you to load the text embeddings
29
+
30
+ # Function to Load Glove Embeddings
31
+ def load_glove_embeddings(glove_path="Data/embeddings.pkl"):
32
+ with open(glove_path, "rb") as f:
33
+ embeddings_dict = pickle.load(f, encoding="latin1")
34
+
35
+ return embeddings_dict # A dictionary where the keys are words (or tokens) and the values are their corresponding GloVe embeddings.
36
+
37
+
38
+ def get_model_id_gdrive(model_type):
39
+ if model_type == "25d": # the dimension of the GloVe embeddings
40
+ word_index_id = "13qMXs3-oB9C6kfSRMwbAtzda9xuAUtt8" # Google Drive ID for the word index dictionary
41
+ embeddings_id = "1-RXcfBvWyE-Av3ZHLcyJVsps0RYRRr_2" # Google Drive ID for the embeddings file.
42
+ elif model_type == "50d":
43
+ embeddings_id = "1DBaVpJsitQ1qxtUvV1Kz7ThDc3az16kZ"
44
+ word_index_id = "1rB4ksHyHZ9skes-fJHMa2Z8J1Qa7awQ9"
45
+ elif model_type == "100d":
46
+ word_index_id = "1-oWV0LqG3fmrozRZ7WB1jzeTJHRUI3mq"
47
+ embeddings_id = "1SRHfX130_6Znz7zbdfqboKosz-PfNvNp"
48
+
49
+ return word_index_id, embeddings_id
50
+
51
+
52
+ def download_glove_embeddings_gdrive(model_type):
53
+ # Get glove embeddings from google drive
54
+ word_index_id, embeddings_id = get_model_id_gdrive(model_type)
55
+
56
+ # Use gdown to get files from google drive
57
+ embeddings_temp = "embeddings_" + str(model_type) + "_temp.npy"
58
+ word_index_temp = "word_index_dict_" + str(model_type) + "_temp.pkl"
59
+
60
+ # Download word_index pickle file
61
+ print("Downloading word index dictionary....\n")
62
+ gdown.download(id=word_index_id, output=word_index_temp, quiet=False)
63
+
64
+ # Download embeddings numpy file
65
+ print("Donwloading embedings...\n\n")
66
+ gdown.download(id=embeddings_id, output=embeddings_temp, quiet=False)
67
+
68
+
69
+ # @st.cache_data()
70
+ def load_glove_embeddings_gdrive(model_type):
71
+ word_index_temp = "word_index_dict_" + str(model_type) + "_temp.pkl"
72
+ embeddings_temp = "embeddings_" + str(model_type) + "_temp.npy"
73
+
74
+ # Load word index dictionary
75
+ word_index_dict = pickle.load(open(word_index_temp, "rb"), encoding="latin")
76
+
77
+ # Load embeddings numpy
78
+ embeddings = np.load(embeddings_temp)
79
+
80
+ return word_index_dict, embeddings
81
+
82
+
83
+ @st.cache_resource()
84
+ def load_sentence_transformer_model(model_name):
85
+ sentenceTransformer = SentenceTransformer(model_name)
86
+ return sentenceTransformer
87
+
88
+
89
+ def get_sentence_transformer_embeddings(sentence, model_name="all-MiniLM-L6-v2"):
90
+ """
91
+ Get sentence transformer embeddings for a sentence
92
+ """
93
+ # 384 dimensional embedding
94
+ # Default model: https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2
95
+
96
+ sentenceTransformer = load_sentence_transformer_model(model_name)
97
+
98
+ try:
99
+ return sentenceTransformer.encode(sentence)
100
+ except:
101
+ if model_name == "all-MiniLM-L6-v2":
102
+ return np.zeros(384)
103
+ else:
104
+ return np.zeros(512)
105
+
106
+
107
+ def get_glove_embeddings(word, word_index_dict, embeddings, model_type):
108
+ """
109
+ Get glove embedding for a single word
110
+ """
111
+ if word.lower() in word_index_dict:
112
+ return embeddings[word_index_dict[word.lower()]]
113
+ else:
114
+ return np.zeros(int(model_type.split("d")[0]))
115
+
116
+
117
+ def get_category_embeddings(embeddings_metadata):
118
+ """
119
+ Get embeddings for each category
120
+ 1. Split categories into words
121
+ 2. Get embeddings for each word
122
+ """
123
+ model_name = embeddings_metadata["model_name"]
124
+ st.session_state["cat_embed_" + model_name] = {}
125
+ for category in st.session_state.categories.split(" "):
126
+ if model_name:
127
+ if not category in st.session_state["cat_embed_" + model_name]:
128
+ st.session_state["cat_embed_" + model_name][category] = get_sentence_transformer_embeddings(category,
129
+ model_name=model_name)
130
+ else:
131
+ if not category in st.session_state["cat_embed_" + model_name]:
132
+ st.session_state["cat_embed_" + model_name][category] = get_sentence_transformer_embeddings(category)
133
+
134
+
135
+ def update_category_embeddings(embeddings_metadata):
136
+ """
137
+ Update embeddings for each category
138
+ """
139
+ get_category_embeddings(embeddings_metadata)
140
+
141
+
142
+ ### Plotting utility functions
143
+
144
+ def plot_piechart(sorted_cosine_scores_items):
145
+ sorted_cosine_scores = np.array([
146
+ sorted_cosine_scores_items[index][1]
147
+ for index in range(len(sorted_cosine_scores_items))
148
+ ]
149
+ )
150
+ categories = st.session_state.categories.split(" ")
151
+ categories_sorted = [
152
+ categories[sorted_cosine_scores_items[index][0]]
153
+ for index in range(len(sorted_cosine_scores_items))
154
+ ]
155
+ fig, ax = plt.subplots()
156
+ ax.pie(sorted_cosine_scores, labels=categories_sorted, autopct="%1.1f%%")
157
+ st.pyplot(fig) # Figure
158
+
159
+
160
+ def plot_piechart_helper(sorted_cosine_scores_items):
161
+ sorted_cosine_scores = np.array(
162
+ [
163
+ sorted_cosine_scores_items[index][1]
164
+ for index in range(len(sorted_cosine_scores_items))
165
+ ]
166
+ )
167
+ categories = st.session_state.categories.split(" ")
168
+ categories_sorted = [
169
+ categories[sorted_cosine_scores_items[index][0]]
170
+ for index in range(len(sorted_cosine_scores_items))
171
+ ]
172
+ fig, ax = plt.subplots(figsize=(3, 3))
173
+ my_explode = np.zeros(len(categories_sorted))
174
+ my_explode[0] = 0.2
175
+ if len(categories_sorted) == 3:
176
+ my_explode[1] = 0.1 # explode this by 0.2
177
+ elif len(categories_sorted) > 3:
178
+ my_explode[2] = 0.05
179
+ ax.pie(
180
+ sorted_cosine_scores,
181
+ labels=categories_sorted,
182
+ autopct="%1.1f%%",
183
+ explode=my_explode,
184
+ )
185
+
186
+ return fig
187
+
188
+
189
+ def plot_piecharts(sorted_cosine_scores_models):
190
+ scores_list = []
191
+ categories = st.session_state.categories.split(" ")
192
+ index = 0
193
+ for model in sorted_cosine_scores_models:
194
+ scores_list.append(sorted_cosine_scores_models[model])
195
+ # scores_list[index] = np.array([scores_list[index][ind2][1] for ind2 in range(len(scores_list[index]))])
196
+ index += 1
197
+
198
+ if len(sorted_cosine_scores_models) == 2:
199
+ fig, (ax1, ax2) = plt.subplots(2)
200
+
201
+ categories_sorted = [
202
+ categories[scores_list[0][index][0]] for index in range(len(scores_list[0]))
203
+ ]
204
+ sorted_scores = np.array(
205
+ [scores_list[0][index][1] for index in range(len(scores_list[0]))]
206
+ )
207
+ ax1.pie(sorted_scores, labels=categories_sorted, autopct="%1.1f%%")
208
+
209
+ categories_sorted = [
210
+ categories[scores_list[1][index][0]] for index in range(len(scores_list[1]))
211
+ ]
212
+ sorted_scores = np.array(
213
+ [scores_list[1][index][1] for index in range(len(scores_list[1]))]
214
+ )
215
+ ax2.pie(sorted_scores, labels=categories_sorted, autopct="%1.1f%%")
216
+
217
+ st.pyplot(fig)
218
+
219
+
220
+ def plot_alatirchart(sorted_cosine_scores_models):
221
+ models = list(sorted_cosine_scores_models.keys())
222
+ tabs = st.tabs(models)
223
+ figs = {}
224
+ for model in models:
225
+ figs[model] = plot_piechart_helper(sorted_cosine_scores_models[model])
226
+
227
+ for index in range(len(tabs)):
228
+ with tabs[index]:
229
+ st.pyplot(figs[models[index]])
230
+
231
+
232
+ ### Your Part To Complete: Follow the instructions in each function below to complete the similarity calculation between text embeddings
233
+
234
+ # Task I: Compute Cosine Similarity
235
+ def cosine_similarity(x, y):
236
+ """
237
+ Exponentiated cosine similarity
238
+ 1. Compute cosine similarity
239
+ 2. Exponentiate cosine similarity
240
+ 3. Return exponentiated cosine similarity
241
+ (20 pts)
242
+ """
243
+ ##################################
244
+ ### TODO: Add code here ##########
245
+ ##################################
246
+
247
+ # Ensure inputs are NumPy arrays
248
+ x = np.array(x)
249
+ y = np.array(y)
250
+
251
+ # Compute dot product
252
+ dot_product = np.dot(x, y)
253
+
254
+ # Compute L2 norms of both vectors
255
+ norm_x = np.linalg.norm(x)
256
+ norm_y = np.linalg.norm(y)
257
+
258
+ # Compute cosine similarity
259
+ cosine_sim = dot_product / (norm_x * norm_y)
260
+
261
+ # Exponentiate cosine similarity
262
+ exp_cosine_sim = np.exp(cosine_sim)
263
+
264
+ return exp_cosine_sim
265
+
266
+
267
+ # Task II: Average Glove Embedding Calculation
268
+ def averaged_glove_embeddings_gdrive(sentence, word_index_dict, embeddings, model_type=50):
269
+ """
270
+ Get averaged glove embeddings for a sentence
271
+ 1. Split sentence into words
272
+ 2. Get embeddings for each word
273
+ 3. Add embeddings for each word
274
+ 4. Divide by number of words
275
+ 5. Return averaged embeddings
276
+ (30 pts)
277
+ """
278
+ embedding = np.zeros(int(model_type.split("d")[0]))
279
+ ##################################
280
+ ##### TODO: Add code here ########
281
+ ##################################
282
+ # split sentence into words and convert to lowercase
283
+ words = sentence.lower().split()
284
+
285
+ # track the number of valid words found in the embeddings
286
+ valid_word_count = 0
287
+
288
+ for word in words:
289
+ if word in word_index_dict: # Check if the word exists in the vocabulary
290
+ index = word_index_dict[word] # Get the word's index in embeddings
291
+ embedding += embeddings[index] # Sum the corresponding embedding vector
292
+ valid_word_count += 1
293
+
294
+ # Compute the average embedding if any valid words were found
295
+ if valid_word_count > 0:
296
+ embedding /= valid_word_count
297
+
298
+ return embedding
299
+
300
+
301
+ # Task III: Sort the cosine similarity
302
+ # def get_sorted_cosine_similarity(embeddings_metadata):
303
+ # def get_sorted_cosine_similarity(embeddings_metadata, categories_input=None):
304
+ def get_sorted_cosine_similarity(text_search, embeddings_metadata):
305
+ """
306
+ Get sorted cosine similarity between input sentence and categories
307
+ Steps:
308
+ 1. Get embeddings for input sentence
309
+ 2. Get embeddings for categories (if not found, update category embeddings)
310
+ 3. Compute cosine similarity between input sentence and categories
311
+ 4. Sort cosine similarity
312
+ 5. Return sorted cosine similarity
313
+ (50 pts)
314
+ """
315
+ categories = st.session_state.categories.split(" ")
316
+ # categories = categories_input if categories_input is not None else st.session_state.categories.split(" ")
317
+ cosine_sim = {}
318
+ if embeddings_metadata["embedding_model"] == "glove":
319
+ word_index_dict = embeddings_metadata["word_index_dict"]
320
+ embeddings = embeddings_metadata["embeddings"]
321
+ model_type = embeddings_metadata["model_type"]
322
+
323
+ input_embedding = averaged_glove_embeddings_gdrive(text_search,
324
+ word_index_dict,
325
+ embeddings, model_type)
326
+
327
+ ##########################################
328
+ ## TODO: Get embeddings for categories ###
329
+ ##########################################
330
+ for index, category in enumerate(categories):
331
+ category_embedding = averaged_glove_embeddings_gdrive(
332
+ category,
333
+ word_index_dict,
334
+ embeddings,
335
+ model_type)
336
+ cosine_sim[index] = cosine_similarity(input_embedding, category_embedding)
337
+
338
+ else:
339
+ model_name = embeddings_metadata["model_name"]
340
+ if not "cat_embed_" + model_name in st.session_state:
341
+ get_category_embeddings(embeddings_metadata)
342
+
343
+ category_embeddings = st.session_state["cat_embed_" + model_name]
344
+
345
+ print("text_search = ", text_search)
346
+ if model_name:
347
+ input_embedding = get_sentence_transformer_embeddings(text_search, model_name=model_name)
348
+ else:
349
+ input_embedding = get_sentence_transformer_embeddings(text_search)
350
+
351
+ for index in range(len(categories)):
352
+ ##########################################
353
+ # TODO: Compute cosine similarity between input sentence and categories
354
+ # TODO: Update category embeddings if category not found
355
+ ##########################################
356
+ category = categories[index]
357
+ if category in category_embeddings:
358
+ category_embedding = category_embeddings[category]
359
+ cosine_sim[index] = cosine_similarity(input_embedding, category_embedding)
360
+ else:
361
+ update_category_embeddings(embeddings_metadata)
362
+ category_embedding = st.session_state["cat_embed_" + model_name][category]
363
+ cosine_sim[index] = cosine_similarity(input_embedding, category_embedding)
364
+
365
+ # Sort cosine similarities in descending order
366
+ sorted_items = sorted(cosine_sim.items(), key=lambda x: x[1], reverse=True)
367
+
368
+ return sorted_items
369
+
370
+
371
+ ### Below is the main function, creating the app demo for text search engine using the text embeddings.
372
+
373
+ if __name__ == "__main__":
374
+ ### Text Search ###
375
+ ### There will be Bonus marks of 10% for the teams that submit a URL for your deployed web app.
376
+ ### Bonus: You can also submit a publicly accessible link to the deployed web app.
377
+
378
+ st.sidebar.title("GloVe Twitter")
379
+ st.sidebar.markdown(
380
+ """
381
+ GloVe is an unsupervised learning algorithm for obtaining vector representations for words. Pretrained on
382
+ 2 billion tweets with vocabulary size of 1.2 million. Download from [Stanford NLP](http://nlp.stanford.edu/data/glove.twitter.27B.zip).
383
+
384
+ Jeffrey Pennington, Richard Socher, and Christopher D. Manning. 2014. *GloVe: Global Vectors for Word Representation*.
385
+ """
386
+ )
387
+
388
+ model_type = st.sidebar.selectbox("Choose the model", ("25d", "50d", "100d"), index=1)
389
+
390
+ st.title("Search Based Retrieval Demo")
391
+ st.subheader(
392
+ "Pass in space separated categories you want this search demo to be about."
393
+ )
394
+ # st.selectbox(label="Pick the categories you want this search demo to be about...",
395
+ # options=("Flowers Colors Cars Weather Food", "Chocolate Milk", "Anger Joy Sad Frustration Worry Happiness", "Positive Negative"),
396
+ # key="categories"
397
+ # )
398
+ st.text_input(
399
+ label="Categories", key="categories", value="Flowers Colors Cars Weather Food"
400
+ )
401
+ print(st.session_state["categories"])
402
+ print(type(st.session_state["categories"]))
403
+ # print("Categories = ", categories)
404
+ # st.session_state.categories = categories
405
+
406
+ st.subheader("Pass in an input word or even a sentence")
407
+ text_search = st.text_input(
408
+ label="Input your sentence",
409
+ key="text_search",
410
+ value="Roses are red, trucks are blue, and Seattle is grey right now",
411
+ )
412
+ # st.session_state.text_search = text_search
413
+
414
+ # Download glove embeddings if it doesn't exist
415
+ embeddings_path = "embeddings_" + str(model_type) + "_temp.npy"
416
+ word_index_dict_path = "word_index_dict_" + str(model_type) + "_temp.pkl"
417
+ if not os.path.isfile(embeddings_path) or not os.path.isfile(word_index_dict_path):
418
+ print("Model type = ", model_type)
419
+ glove_path = "Data/glove_" + str(model_type) + ".pkl"
420
+ print("glove_path = ", glove_path)
421
+
422
+ # Download embeddings from google drive
423
+ with st.spinner("Downloading glove embeddings..."):
424
+ download_glove_embeddings_gdrive(model_type)
425
+
426
+ # Load glove embeddings
427
+ word_index_dict, embeddings = load_glove_embeddings_gdrive(model_type)
428
+
429
+ # Find closest word to an input word
430
+ if st.session_state.text_search:
431
+ # Glove embeddings
432
+ print("Glove Embedding")
433
+ embeddings_metadata = {
434
+ "embedding_model": "glove",
435
+ "word_index_dict": word_index_dict,
436
+ "embeddings": embeddings,
437
+ "model_type": model_type,
438
+ }
439
+ with st.spinner("Obtaining Cosine similarity for Glove..."):
440
+ sorted_cosine_sim_glove = get_sorted_cosine_similarity(
441
+ st.session_state.text_search, embeddings_metadata
442
+ )
443
+
444
+ # Sentence transformer embeddings
445
+ print("Sentence Transformer Embedding")
446
+ embeddings_metadata = {"embedding_model": "transformers", "model_name": ""}
447
+ with st.spinner("Obtaining Cosine similarity for 384d sentence transformer..."):
448
+ sorted_cosine_sim_transformer = get_sorted_cosine_similarity(
449
+ st.session_state.text_search, embeddings_metadata
450
+ )
451
+
452
+ # Results and Plot Pie Chart for Glove
453
+ print("Categories are: ", st.session_state.categories)
454
+ st.subheader(
455
+ "Closest word I have between: "
456
+ + st.session_state.categories
457
+ + " as per different Embeddings"
458
+ )
459
+
460
+ print(sorted_cosine_sim_glove)
461
+ print(sorted_cosine_sim_transformer)
462
+ # print(sorted_distilbert)
463
+ # Altair Chart for all models
464
+ plot_alatirchart(
465
+ {
466
+ "glove_" + str(model_type): sorted_cosine_sim_glove,
467
+ "sentence_transformer_384": sorted_cosine_sim_transformer,
468
+ }
469
+ )
470
+ # "distilbert_512": sorted_distilbert})
471
+
472
+ st.write("")
473
+ st.write(
474
+ "Demo developed by Hongyan Liu and Yinxiu Wang(https://www.linkedin.com/in/your_id/ - Optional)"
475
+ )