|
|
|
|
|
import torch |
|
import torch.nn as nn |
|
import random |
|
|
|
|
|
adj_list = [[1,2,3], [0,2,3], [0, 1, 3], [0, 1, 2], [5, 6], [4,6], [4, 5], [1, 3]] |
|
size_vertex = len(adj_list) |
|
|
|
|
|
|
|
w = 3 |
|
d = 2 |
|
y = 200 |
|
t = 6 |
|
lr = 0.025 |
|
|
|
v=[0,1,2,3,4,5,6,7] |
|
|
|
|
|
|
|
|
|
def RandomWalk(node,t): |
|
walk = [node] |
|
|
|
for i in range(t-1): |
|
node = adj_list[node][random.randint(0,len(adj_list[node])-1)] |
|
walk.append(node) |
|
|
|
return walk |
|
|
|
|
|
class Model(torch.nn.Module): |
|
def __init__(self): |
|
super(Model, self).__init__() |
|
self.phi = nn.Parameter(torch.rand((size_vertex, d), requires_grad=True)) |
|
self.phi2 = nn.Parameter(torch.rand((d, size_vertex), requires_grad=True)) |
|
|
|
|
|
def forward(self, one_hot): |
|
hidden = torch.matmul(one_hot, self.phi) |
|
out = torch.matmul(hidden, self.phi2) |
|
return out |
|
|
|
model = Model() |
|
|
|
|
|
def skip_gram(wvi, w): |
|
for j in range(len(wvi)): |
|
for k in range(max(0,j-w) , min(j+w, len(wvi))): |
|
|
|
|
|
one_hot = torch.zeros(size_vertex) |
|
one_hot[wvi[j]] = 1 |
|
|
|
out = model(one_hot) |
|
loss = torch.log(torch.sum(torch.exp(out))) - out[wvi[k]] |
|
loss.backward() |
|
|
|
for param in model.parameters(): |
|
param.data.sub_(lr*param.grad) |
|
param.grad.data.zero_() |
|
|
|
|
|
for i in range(y): |
|
random.shuffle(v) |
|
for vi in v: |
|
wvi=RandomWalk(vi,t) |
|
skip_gram(wvi, w) |
|
|
|
|
|
print(model.phi) |
|
|
|
|
|
|
|
|
|
def func_L(w): |
|
""" |
|
Parameters |
|
---------- |
|
w: Leaf node. |
|
|
|
Returns |
|
------- |
|
count: The length of path from the root node to the given vertex. |
|
""" |
|
count=1 |
|
while(w!=1): |
|
count+=1 |
|
w//=2 |
|
|
|
return count |
|
|
|
|
|
|
|
def func_n(w, j): |
|
li=[w] |
|
while(w!=1): |
|
w = w//2 |
|
li.append(w) |
|
|
|
li.reverse() |
|
|
|
return li[j] |
|
|
|
|
|
def sigmoid(x): |
|
out = 1/(1+torch.exp(-x)) |
|
return out |
|
|
|
|
|
class HierarchicalModel(torch.nn.Module): |
|
|
|
def __init__(self): |
|
super(HierarchicalModel, self).__init__() |
|
self.phi = nn.Parameter(torch.rand((size_vertex, d), requires_grad=True)) |
|
self.prob_tensor = nn.Parameter(torch.rand((2*size_vertex, d), requires_grad=True)) |
|
|
|
def forward(self, wi, wo): |
|
one_hot = torch.zeros(size_vertex) |
|
one_hot[wi] = 1 |
|
w = size_vertex + wo |
|
h = torch.matmul(one_hot,self.phi) |
|
p = torch.tensor([1.0]) |
|
for j in range(1, func_L(w)-1): |
|
mult = -1 |
|
if(func_n(w, j+1)==2*func_n(w, j)): |
|
mult = 1 |
|
|
|
p = p*sigmoid(mult*torch.matmul(self.prob_tensor[func_n(w,j)], h)) |
|
|
|
return p |
|
|
|
|
|
hierarchicalModel = HierarchicalModel() |
|
|
|
|
|
def HierarchicalSkipGram(wvi, w): |
|
|
|
for j in range(len(wvi)): |
|
for k in range(max(0,j-w) , min(j+w, len(wvi))): |
|
|
|
|
|
prob = hierarchicalModel(wvi[j], wvi[k]) |
|
loss = - torch.log(prob) |
|
loss.backward() |
|
for param in hierarchicalModel.parameters(): |
|
param.data.sub_(lr*param.grad) |
|
param.grad.data.zero_() |
|
|
|
|
|
for i in range(y): |
|
random.shuffle(v) |
|
for vi in v: |
|
wvi = RandomWalk(vi,t) |
|
HierarchicalSkipGram(wvi, w) |
|
|
|
|
|
|
|
for i in range(8): |
|
for j in range(8): |
|
print((hierarchicalModel(i,j).item()*100)//1, end=' ') |
|
print(end = '\n') |
|
|