from transformers import AlbertTokenizer, AlbertModel from sklearn.metrics.pairwise import cosine_similarity tokenizer = AlbertTokenizer.from_pretrained('albert-large-v2') model = AlbertModel.from_pretrained("albert-large-v2") a1 = "65 Mountain Blvd Ext, Warren, NJ 07059" a2 = "112 Mountain Blvd Ext, Warren, NJ 07059" a3 = "1677 NJ-27 #2, Edison, NJ 08817" a4 = "5078 S Maryland Pkwy, Las Vegas, NV 89119" a5 = "65 Mountain Boulevard Ext, Warren, NJ 07059" def get_embedding(input_text): encoded_input = tokenizer(input_text, return_tensors='pt') input_ids = encoded_input.input_ids input_num_tokens = input_ids.shape[1] print( "Number of input tokens: " + str(input_num_tokens)) print("Length of input: " + str(len(input_text))) list_of_tokens = tokenizer.convert_ids_to_tokens(input_ids.view(-1).tolist()) print( "Tokens : " + ' '.join(list_of_tokens)) output = model(**encoded_input) embedding = output.last_hidden_state[0][0] return embedding.tolist() e1 = get_embedding(a1) e2 = get_embedding(a2) #e3 = get_embedding(a3) e4 = get_embedding(a4) e5 = get_embedding(a5) print("a1 to a2") print(cosine_similarity([e1], [e2])) print("a1 to a4") print(cosine_similarity([e1], [e4])) print("a1 to a5") print(cosine_similarity([e1], [e5])) # with base #a1 to a2 #[[0.99512167]] #a1 to a4 #[[0.94850088]] #a1 to a5 #[[0.99636901]] # with large #a1 to a2 #[[0.99682108]] #a1 to a4 #[[0.94006972]] #a1 to a5 #[[0.99503919]]