File size: 1,516 Bytes
2619b03
 
 
4132514
 
 
 
2619b03
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4132514
2619b03
4132514
2619b03
4132514
2619b03
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
from transformers import AlbertTokenizer, AlbertModel
from sklearn.metrics.pairwise import cosine_similarity

# base
# large
tokenizer = AlbertTokenizer.from_pretrained('albert-base-v2')
model = AlbertModel.from_pretrained("albert-base-v2")

a1 = "65 Mountain Blvd Ext, Warren, NJ 07059"
a2 = "112 Mountain Blvd Ext, Warren, NJ 07059"
a3 = "1677 NJ-27 #2, Edison, NJ 08817"
a4 = "5078 S Maryland Pkwy, Las Vegas, NV 89119"
a5 = "65 Mountain Boulevard Ext, Warren, NJ 07059"

def get_embedding(input_text):
    encoded_input = tokenizer(input_text, return_tensors='pt')
    input_ids = encoded_input.input_ids
    input_num_tokens = input_ids.shape[1]

    print( "Number of input tokens: " + str(input_num_tokens))
    print("Length of input: " + str(len(input_text)))

    list_of_tokens = tokenizer.convert_ids_to_tokens(input_ids.view(-1).tolist())

    print( "Tokens : " + ' '.join(list_of_tokens))
    output = model(**encoded_input)

    embedding = output.last_hidden_state[0][0]
    return embedding.tolist()

e1 = get_embedding(a1)
e2 = get_embedding(a2)
#e3 = get_embedding(a3)
e4 = get_embedding(a4)
e5 = get_embedding(a5)

print(f"a1 {a1} to {a2} a2")
print(cosine_similarity([e1], [e2]))
print(f"a1 {a1} to {a4} a4")
print(cosine_similarity([e1], [e4]))
print(f"a1 {a1} to {a5} a5")
print(cosine_similarity([e1], [e5]))

# with base
#a1 to a2
#[[0.99512167]]
#a1 to a4
#[[0.94850088]]
#a1 to a5
#[[0.99636901]]

# with large
#a1 to a2
#[[0.99682108]]
#a1 to a4
#[[0.94006972]]
#a1 to a5
#[[0.99503919]]