repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
3SD
|
3SD-main/u2net_test.py
|
import os
from skimage import io, transform
import torch
import torchvision
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms#, utils
# import torch.optim as optim
import numpy as np
from PIL import Image
import glob
from data_loader import RescaleT
from data_loader import ToTensor
from data_loader import ToTensorLab
from data_loader import SalObjDataset
from model import U2NET # full size version 173.6 MB
from model import U2NETP # small version u2net 4.7 MB
# normalize the predicted SOD probability map
def normPRED(d):
ma = torch.max(d)
mi = torch.min(d)
dn = (d-mi)/(ma-mi)
return dn
def save_output(image_name,pred,d_dir):
predict = pred
predict = predict.squeeze()
predict_np = predict.cpu().data.numpy()
im = Image.fromarray(predict_np*255).convert('RGB')
img_name = image_name.split(os.sep)[-1]
image = io.imread(image_name)
imo = im.resize((image.shape[1],image.shape[0]),resample=Image.BILINEAR)
pb_np = np.array(imo)
aaa = img_name.split(".")
bbb = aaa[0:-1]
imidx = bbb[0]
for i in range(1,len(bbb)):
imidx = imidx + "." + bbb[i]
imo.save(d_dir+imidx+'.png')
def main():
# --------- 1. get image path and name ---------
model_name='u2net'#u2netp
test_datasets = ['ECSSD', 'PASCAL', 'DUTS_Test', 'HKU-IS', 'DUT', 'THUR']
for dataset in test_datasets:
image_dir = os.path.join(os.getcwd(), './../testing/', 'img',dataset)
prediction_dir = os.path.join(os.getcwd(), '../testing/','class_' + model_name + '_results' , dataset + os.sep)
model_dir = os.path.join(os.getcwd(), 'saved_models', 'trans_syn_' + model_name, model_name + '_bce_epoch_229_train.pth')
if (os.path.exists(prediction_dir)==False):
os.mkdir(prediction_dir)
img_name_list = list(glob.glob(image_dir + '/*'+'.jpg')) + list(glob.glob(image_dir + '/*'+'.png'))
#print(img_name_list)
# --------- 2. dataloader ---------
#1. dataloader
test_salobj_dataset = SalObjDataset(img_name_list = img_name_list,
lbl_name_list = [],
transform=transforms.Compose([RescaleT(320),
ToTensorLab(flag=0)])
)
test_salobj_dataloader = DataLoader(test_salobj_dataset,
batch_size=1,
shuffle=False,
num_workers=1)
# --------- 3. model define ---------
if(model_name=='u2net'):
print("...load U2NET---173.6 MB")
net = U2NET(3,1)
elif(model_name=='u2netp'):
print("...load U2NEP---4.7 MB")
net = U2NETP(3,1)
if torch.cuda.is_available():
net = torch.nn.DataParallel(net)
net.load_state_dict(torch.load(model_dir))
net.cuda()
else:
net.load_state_dict(torch.load(model_dir, map_location='cpu'))
net.eval()
# --------- 4. inference for each image ---------
for i_test, data_test in enumerate(test_salobj_dataloader):
print("inferencing:",img_name_list[i_test].split(os.sep)[-1])
inputs_test = data_test['image']
inputs_test = inputs_test.type(torch.FloatTensor)
if torch.cuda.is_available():
inputs_test = Variable(inputs_test.cuda())
else:
inputs_test = Variable(inputs_test)
with torch.no_grad():
d1,d2,d3,d4,d5,d6,d7= net(inputs_test)
# normalization
pred = d1[:,0,:,:]
pred = normPRED(pred)
# save results to test_results folder
if not os.path.exists(prediction_dir):
os.makedirs(prediction_dir, exist_ok=True)
save_output(img_name_list[i_test],pred,prediction_dir)
del d1,d2,d3,d4,d5,d6,d7
if __name__ == "__main__":
main()
| 4,238 | 32.642857 | 129 |
py
|
3SD
|
3SD-main/smoothness/__init__.py
|
import torch
import torch.nn.functional as F
# from torch.autograd import Variable
# import numpy as np
def laplacian_edge(img):
laplacian_filter = torch.Tensor([[-1, -1, -1], [-1, 8, -1], [-1, -1, -1]])
filter = torch.reshape(laplacian_filter, [1, 1, 3, 3])
filter = filter.cuda()
lap_edge = F.conv2d(img, filter, stride=1, padding=1)
return lap_edge
def gradient_x(img):
sobel = torch.Tensor([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
filter = torch.reshape(sobel,[1,1,3,3])
filter = filter.cuda()
gx = F.conv2d(img, filter, stride=1, padding=1)
return gx
def gradient_y(img):
sobel = torch.Tensor([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])
filter = torch.reshape(sobel, [1, 1,3,3])
filter = filter.cuda()
gy = F.conv2d(img, filter, stride=1, padding=1)
return gy
def charbonnier_penalty(s):
cp_s = torch.pow(torch.pow(s, 2) + 0.001**2, 0.5)
return cp_s
def get_saliency_smoothness(pred, gt, size_average=True):
alpha = 10
s1 = 10
s2 = 1
## first oder derivative: sobel
sal_x = torch.abs(gradient_x(pred))
sal_y = torch.abs(gradient_y(pred))
gt_x = gradient_x(gt)
gt_y = gradient_y(gt)
w_x = torch.exp(torch.abs(gt_x) * (-alpha))
w_y = torch.exp(torch.abs(gt_y) * (-alpha))
cps_x = charbonnier_penalty(sal_x * w_x)
cps_y = charbonnier_penalty(sal_y * w_y)
cps_xy = cps_x + cps_y
## second order derivative: laplacian
lap_sal = torch.abs(laplacian_edge(pred))
lap_gt = torch.abs(laplacian_edge(gt))
weight_lap = torch.exp(lap_gt * (-alpha))
weighted_lap = charbonnier_penalty(lap_sal*weight_lap)
smooth_loss = s1*torch.mean(cps_xy) + s2*torch.mean(weighted_lap)
return smooth_loss
class smoothness_loss(torch.nn.Module):
def __init__(self, size_average = True):
super(smoothness_loss, self).__init__()
self.size_average = size_average
def forward(self, pred, target):
return get_saliency_smoothness(pred, target, self.size_average)
| 2,014 | 30.484375 | 78 |
py
|
3SD
|
3SD-main/model/u2net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class REBNCONV(nn.Module):
def __init__(self,in_ch=3,out_ch=3,dirate=1):
super(REBNCONV,self).__init__()
self.conv_s1 = nn.Conv2d(in_ch,out_ch,3,padding=1*dirate,dilation=1*dirate)
self.bn_s1 = nn.BatchNorm2d(out_ch)
self.relu_s1 = nn.ReLU(inplace=True)
def forward(self,x):
hx = x
xout = self.relu_s1(self.bn_s1(self.conv_s1(hx)))
return xout
## upsample tensor 'src' to have the same spatial size with tensor 'tar'
def _upsample_like(src,tar):
src = F.upsample(src,size=tar.shape[2:],mode='bilinear')
return src
### RSU-7 ###
class RSU7(nn.Module):#UNet07DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU7,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool4 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool5 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv6 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv7 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv6d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv5d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx = self.pool4(hx4)
hx5 = self.rebnconv5(hx)
hx = self.pool5(hx5)
hx6 = self.rebnconv6(hx)
hx7 = self.rebnconv7(hx6)
hx6d = self.rebnconv6d(torch.cat((hx7,hx6),1))
hx6dup = _upsample_like(hx6d,hx5)
hx5d = self.rebnconv5d(torch.cat((hx6dup,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.rebnconv4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-6 ###
class RSU6(nn.Module):#UNet06DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU6,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool4 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv6 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv5d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx = self.pool4(hx4)
hx5 = self.rebnconv5(hx)
hx6 = self.rebnconv6(hx5)
hx5d = self.rebnconv5d(torch.cat((hx6,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.rebnconv4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-5 ###
class RSU5(nn.Module):#UNet05DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU5,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx5 = self.rebnconv5(hx4)
hx4d = self.rebnconv4d(torch.cat((hx5,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-4 ###
class RSU4(nn.Module):#UNet04DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU4,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx4 = self.rebnconv4(hx3)
hx3d = self.rebnconv3d(torch.cat((hx4,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-4F ###
class RSU4F(nn.Module):#UNet04FRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU4F,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=4)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=8)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=4)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=2)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx2 = self.rebnconv2(hx1)
hx3 = self.rebnconv3(hx2)
hx4 = self.rebnconv4(hx3)
hx3d = self.rebnconv3d(torch.cat((hx4,hx3),1))
hx2d = self.rebnconv2d(torch.cat((hx3d,hx2),1))
hx1d = self.rebnconv1d(torch.cat((hx2d,hx1),1))
return hx1d + hxin
##### U^2-Net ####
class U2NET(nn.Module):
def __init__(self,in_ch=3,out_ch=1):
super(U2NET,self).__init__()
self.stage1 = RSU7(in_ch,32,64)
self.pool12 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage2 = RSU6(64,32,128)
self.pool23 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage3 = RSU5(128,64,256)
self.pool34 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage4 = RSU4(256,128,512)
self.pool45 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage5 = RSU4F(512,256,512)
self.pool56 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage6 = RSU4F(512,256,512)
# decoder
self.stage5d = RSU4F(1024,256,512)
self.stage4d = RSU4(1024,128,256)
self.stage3d = RSU5(512,64,128)
self.stage2d = RSU6(256,32,64)
self.stage1d = RSU7(128,16,64)
self.side1 = nn.Conv2d(64,out_ch,3,padding=1)
self.side2 = nn.Conv2d(64,out_ch,3,padding=1)
self.side3 = nn.Conv2d(128,out_ch,3,padding=1)
self.side4 = nn.Conv2d(256,out_ch,3,padding=1)
self.side5 = nn.Conv2d(512,out_ch,3,padding=1)
self.side6 = nn.Conv2d(512,out_ch,3,padding=1)
self.outconv = nn.Conv2d(6*out_ch,out_ch,1)
def forward(self,x):
hx = x
#stage 1
hx1 = self.stage1(hx)
hx = self.pool12(hx1)
#stage 2
hx2 = self.stage2(hx)
hx = self.pool23(hx2)
#stage 3
hx3 = self.stage3(hx)
hx = self.pool34(hx3)
#stage 4
hx4 = self.stage4(hx)
hx = self.pool45(hx4)
#stage 5
hx5 = self.stage5(hx)
hx = self.pool56(hx5)
#stage 6
hx6 = self.stage6(hx)
hx6up = _upsample_like(hx6,hx5)
#-------------------- decoder --------------------
hx5d = self.stage5d(torch.cat((hx6up,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.stage4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.stage3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.stage2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.stage1d(torch.cat((hx2dup,hx1),1))
#side output
d1 = self.side1(hx1d)
d2 = self.side2(hx2d)
d2 = _upsample_like(d2,d1)
d3 = self.side3(hx3d)
d3 = _upsample_like(d3,d1)
d4 = self.side4(hx4d)
d4 = _upsample_like(d4,d1)
d5 = self.side5(hx5d)
d5 = _upsample_like(d5,d1)
d6 = self.side6(hx6)
d6 = _upsample_like(d6,d1)
d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1))
return F.sigmoid(d0), F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6)
### U^2-Net small ###
class U2NETP(nn.Module):
def __init__(self,in_ch=3,out_ch=1):
super(U2NETP,self).__init__()
self.stage1 = RSU7(in_ch,16,64)
self.pool12 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage2 = RSU6(64,16,64)
self.pool23 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage3 = RSU5(64,16,64)
self.pool34 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage4 = RSU4(64,16,64)
self.pool45 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage5 = RSU4F(64,16,64)
self.pool56 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage6 = RSU4F(64,16,64)
# decoder
self.stage5d = RSU4F(128,16,64)
self.stage4d = RSU4(128,16,64)
self.stage3d = RSU5(128,16,64)
self.stage2d = RSU6(128,16,64)
self.stage1d = RSU7(128,16,64)
self.side1 = nn.Conv2d(64,out_ch,3,padding=1)
self.side2 = nn.Conv2d(64,out_ch,3,padding=1)
self.side3 = nn.Conv2d(64,out_ch,3,padding=1)
self.side4 = nn.Conv2d(64,out_ch,3,padding=1)
self.side5 = nn.Conv2d(64,out_ch,3,padding=1)
self.side6 = nn.Conv2d(64,out_ch,3,padding=1)
self.outconv = nn.Conv2d(6*out_ch,out_ch,1)
def forward(self,x):
hx = x
#stage 1
hx1 = self.stage1(hx)
hx = self.pool12(hx1)
#stage 2
hx2 = self.stage2(hx)
hx = self.pool23(hx2)
#stage 3
hx3 = self.stage3(hx)
hx = self.pool34(hx3)
#stage 4
hx4 = self.stage4(hx)
hx = self.pool45(hx4)
#stage 5
hx5 = self.stage5(hx)
hx = self.pool56(hx5)
#stage 6
hx6 = self.stage6(hx)
hx6up = _upsample_like(hx6,hx5)
#decoder
hx5d = self.stage5d(torch.cat((hx6up,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.stage4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.stage3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.stage2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.stage1d(torch.cat((hx2dup,hx1),1))
#side output
d1 = self.side1(hx1d)
d2 = self.side2(hx2d)
d2 = _upsample_like(d2,d1)
d3 = self.side3(hx3d)
d3 = _upsample_like(d3,d1)
d4 = self.side4(hx4d)
d4 = _upsample_like(d4,d1)
d5 = self.side5(hx5d)
d5 = _upsample_like(d5,d1)
d6 = self.side6(hx6)
d6 = _upsample_like(d6,d1)
d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1))
return F.sigmoid(d0), F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6)
| 14,719 | 26.984791 | 118 |
py
|
3SD
|
3SD-main/model/u2net_transformer_pseudo_dino_final.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mostly copy-paste from timm library.
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
import math
from functools import partial
import torch
import torch.nn as nn
from model.utils import trunc_normal_
import pdb
class REBNCONV(nn.Module):
def __init__(self,in_ch=3,out_ch=3,dirate=1):
super(REBNCONV,self).__init__()
self.conv_s1 = nn.Conv2d(in_ch,out_ch,3,padding=1*dirate,dilation=1*dirate)
self.bn_s1 = nn.BatchNorm2d(out_ch)
self.relu_s1 = nn.ReLU(inplace=True)
def forward(self,x):
hx = x
xout = self.relu_s1(self.bn_s1(self.conv_s1(hx)))
return xout
## upsample tensor 'src' to have the same spatial size with tensor 'tar'
def _upsample_like(src,tar):
src = F.upsample(src,size=tar.shape[2:],mode='bilinear')
return src
def drop_path(x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
#pdb.set_trace()
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, return_attention=False):
y, attn = self.attn(self.norm1(x))
if return_attention:
return attn
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=400, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = (img_size // patch_size) * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x)
#print(x.shape)
x = x.flatten(2).transpose(1, 2)
#print(self.num_patches)
return x
### RSU-7 ###
class RSU7(nn.Module):#UNet07DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU7,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool4 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool5 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv6 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv7 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv6d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv5d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx = self.pool4(hx4)
hx5 = self.rebnconv5(hx)
hx = self.pool5(hx5)
hx6 = self.rebnconv6(hx)
hx7 = self.rebnconv7(hx6)
hx6d = self.rebnconv6d(torch.cat((hx7,hx6),1))
hx6dup = _upsample_like(hx6d,hx5)
hx5d = self.rebnconv5d(torch.cat((hx6dup,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.rebnconv4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-6 ###
class RSU6(nn.Module):#UNet06DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU6,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool4 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv6 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv5d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx = self.pool4(hx4)
hx5 = self.rebnconv5(hx)
hx6 = self.rebnconv6(hx5)
hx5d = self.rebnconv5d(torch.cat((hx6,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.rebnconv4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-5 ###
class RSU5(nn.Module):#UNet05DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU5,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx5 = self.rebnconv5(hx4)
hx4d = self.rebnconv4d(torch.cat((hx5,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-4 ###
class RSU4(nn.Module):#UNet04DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU4,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx4 = self.rebnconv4(hx3)
hx3d = self.rebnconv3d(torch.cat((hx4,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-4F ###
class RSU4F(nn.Module):#UNet04FRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU4F,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=4)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=8)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=4)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=2)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx2 = self.rebnconv2(hx1)
hx3 = self.rebnconv3(hx2)
hx4 = self.rebnconv4(hx3)
hx3d = self.rebnconv3d(torch.cat((hx4,hx3),1))
hx2d = self.rebnconv2d(torch.cat((hx3d,hx2),1))
hx1d = self.rebnconv1d(torch.cat((hx2d,hx1),1))
return hx1d + hxin
class Edge_Dec(nn.Module):
def __init__(self, mid_ch=8, out_ch=1):
super(Edge_Dec,self).__init__()
self.side1 = nn.Conv2d(64, mid_ch, 3, padding=1)
self.side2 = nn.Conv2d(128, mid_ch, 3, padding=1)
self.side3 = nn.Conv2d(256, mid_ch, 3, padding=1)
self.side4 = nn.Conv2d(512, mid_ch, 3, padding=1)
self.side5 = nn.Conv2d(512, mid_ch, 3, padding=1)
self.side6 = nn.Conv2d(512, mid_ch, 3, padding=1)
self.outconv = nn.Conv2d(6 * mid_ch, out_ch, 1)
self.relu = nn.ReLU(inplace=True)
def forward(self,hx1,hx2,hx3,hx4,hx5,hx6):
d1 = self.relu(self.side1(hx1))
d2 = self.relu(self.side2(hx2))
d2 = _upsample_like(d2, d1)
d3 = self.relu(self.side3(hx3))
d3 = _upsample_like(d3, d1)
d4 = self.relu(self.side4(hx4))
d4 = _upsample_like(d4, d1)
d5 = self.relu(self.side5(hx5))
d5 = _upsample_like(d5, d1)
d6 = self.relu(self.side6(hx6))
d6 = _upsample_like(d6, d1)
d0 = F.sigmoid(self.outconv(torch.cat((d1, d2, d3, d4, d5, d6), 1)))
return d0
class Class_Dec(nn.Module):
def __init__(self, input_ch, Num_classes):
super(Class_Dec,self).__init__()
self.fc_layer1 = nn.Conv2d(input_ch, Num_classes, kernel_size=1, stride=1,padding=0, bias=False)
self.fc_layer_bag = nn.Conv2d(input_ch, Num_classes, kernel_size=1, stride=1, padding=0, bias=False)
self.pool = nn.MaxPool2d(2, stride=2, ceil_mode=True)
def forward(self,class_input):
#print("class")
self.global_pool = F.upsample(class_input, size=[1,1], mode='bilinear')
output = self.fc_layer1(self.global_pool)
B,C,H,W = output.shape
output = output.view(B,C*H*W)
cam_map = self.fc_layer1(class_input)
#bag output
bag_output = self.fc_layer_bag(class_input)
return output,cam_map,bag_output
##### U^2-Net ####
class U2NET(nn.Module):
def __init__(self,in_ch=3,out_ch=1,img_size=[400], patch_size=16, in_chans=3, num_classes=0, embed_dim=384, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
super(U2NET,self).__init__()
### Transformfer encoder ###
self.image_size = img_size[0]
self.num_features = self.embed_dim = embed_dim
self.preprocess = RSU7(in_ch,16,8)
self.patch_size = patch_size
self.patch_embed = PatchEmbed(
img_size=img_size[0], patch_size=patch_size, in_chans=8, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
### U^2Net encoder branch for local task ###
self.stage1 = RSU7(in_ch,16,64)
self.pool12 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage2 = RSU6(64,16,128)
self.pool23 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage3 = RSU5(128,32,256)
self.pool34 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage4 = RSU4(256,64,512)
self.pool45 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage5 = RSU4F(512,128,512)
self.pool56 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage6 = RSU4F(512,128,512)
# decoder
self.stage5d = RSU4F(1408,256,512)
self.stage4d = RSU4(1024,128,256)
self.stage3d = RSU5(512,64,128)
self.stage2d = RSU6(256,32,64)
self.stage1d = RSU7(128,16,64)
self.side1 = nn.Conv2d(65,out_ch,3,padding=1)
self.side2 = nn.Conv2d(65,out_ch,3,padding=1)
self.side3 = nn.Conv2d(129,out_ch,3,padding=1)
self.side4 = nn.Conv2d(257,out_ch,3,padding=1)
self.side5 = nn.Conv2d(513,out_ch,3,padding=1)
self.side6 = nn.Conv2d(513,out_ch,3,padding=1)
# edge decoder
self.edge_dec = Edge_Dec(8,out_ch)
self.outconv = nn.Conv2d(7*out_ch,out_ch,1)
# classification decoder
self.class_dec = Class_Dec(896,200)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
B, nc, w, h = x.shape
x = self.patch_embed(x) # patch linear embedding
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
"""def forward(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]"""
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def forward(self,x):
hx = x
#pdb.set_trace()
tx = F.upsample(x,size=[self.image_size,self.image_size],mode='bilinear')
tx = self.prepare_tokens(self.preprocess(tx))
for blk in self.blocks:
tx = blk(tx)
tx = self.norm(tx)
tx = tx.transpose(1,2)
B, N, C = tx.shape
#print(B,N,C)
tx = F.upsample(tx.reshape(B,N,C,1),size=[C-1,1],mode='bilinear')
tx = tx.reshape(B,N,self.image_size//self.patch_size ,self.image_size//self.patch_size )
#stage 1
hx1 = self.stage1(hx)
hx = self.pool12(hx1)
#stage 2
hx2 = self.stage2(hx)
hx = self.pool23(hx2)
#stage 3
hx3 = self.stage3(hx)
hx = self.pool34(hx3)
#stage 4
hx4 = self.stage4(hx)
hx = self.pool45(hx4)
#stage 5
hx5 = self.stage5(hx)
hx = self.pool56(hx5)
#stage 6
hx6 = self.stage6(hx)
hx6up = _upsample_like(hx6,hx5)
#-------------------- class decoder -------------------
txc = tx#F.upsample(tx, size=hx5.shape[2:], mode='bilinear')
class_input = torch.cat((F.upsample(hx6, size=txc.shape[2:], mode='bilinear'),txc),1)
pred_class,cam_map,bag_output = self.class_dec(class_input)
#print(bag_output.shape,self.patch_size)
txd = F.upsample(tx, size=hx5.shape[2:], mode='bilinear')
#-------------------- decoder --------------------
hx5d = self.stage5d(torch.cat((hx6up,hx5,txd),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.stage4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.stage3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.stage2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.stage1d(torch.cat((hx2dup,hx1),1))
#side output
edge = self.edge_dec(hx1,hx2,hx3,hx4,hx5,hx6)
d1 = self.side1(torch.cat((hx1d,edge),1))
d2 = self.side2(torch.cat((hx2d,_upsample_like(edge,hx2d)),1))
d2 = _upsample_like(d2,d1)
d3 = self.side3(torch.cat((hx3d,_upsample_like(edge,hx3d)),1))
d3 = _upsample_like(d3,d1)
d4 = self.side4(torch.cat((hx4d,_upsample_like(edge,hx4d)),1))
d4 = _upsample_like(d4,d1)
d5 = self.side5(torch.cat((hx5d,_upsample_like(edge,hx5d)),1))
d5 = _upsample_like(d5,d1)
d6 = self.side6(torch.cat((hx6,_upsample_like(edge,hx6)),1))
d6 = _upsample_like(d6,d1)
d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6,edge),1))
return F.sigmoid(d0), F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6),edge,cam_map, bag_output, pred_class
### U^2-Net small ###
class U2NETP(nn.Module):
def __init__(self,in_ch=3,out_ch=1):
super(U2NETP,self).__init__()
self.stage1 = RSU7(in_ch,16,64)
self.pool12 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage2 = RSU6(64,16,64)
self.pool23 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage3 = RSU5(64,16,64)
self.pool34 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage4 = RSU4(64,16,64)
self.pool45 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage5 = RSU4F(64,16,64)
self.pool56 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage6 = RSU4F(64,16,64)
# decoder
self.stage5d = RSU4F(128,16,64)
self.stage4d = RSU4(128,16,64)
self.stage3d = RSU5(128,16,64)
self.stage2d = RSU6(128,16,64)
self.stage1d = RSU7(128,16,64)
self.side1 = nn.Conv2d(64,out_ch,3,padding=1)
self.side2 = nn.Conv2d(64,out_ch,3,padding=1)
self.side3 = nn.Conv2d(64,out_ch,3,padding=1)
self.side4 = nn.Conv2d(64,out_ch,3,padding=1)
self.side5 = nn.Conv2d(64,out_ch,3,padding=1)
self.side6 = nn.Conv2d(64,out_ch,3,padding=1)
self.outconv = nn.Conv2d(6*out_ch,out_ch,1)
def forward(self,x):
hx = x
#stage 1
hx1 = self.stage1(hx)
hx = self.pool12(hx1)
#stage 2
hx2 = self.stage2(hx)
hx = self.pool23(hx2)
#stage 3
hx3 = self.stage3(hx)
hx = self.pool34(hx3)
#stage 4
hx4 = self.stage4(hx)
hx = self.pool45(hx4)
#stage 5
hx5 = self.stage5(hx)
hx = self.pool56(hx5)
#stage 6
hx6 = self.stage6(hx)
hx6up = _upsample_like(hx6,hx5)
#decoder
hx5d = self.stage5d(torch.cat((hx6up,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.stage4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.stage3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.stage2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.stage1d(torch.cat((hx2dup,hx1),1))
#side output
d1 = self.side1(hx1d)
d2 = self.side2(hx2d)
d2 = _upsample_like(d2,d1)
d3 = self.side3(hx3d)
d3 = _upsample_like(d3,d1)
d4 = self.side4(hx4d)
d4 = _upsample_like(d4,d1)
d5 = self.side5(hx5d)
d5 = _upsample_like(d5,d1)
d6 = self.side6(hx6)
d6 = _upsample_like(d6,d1)
d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1))
return F.sigmoid(d0), F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6)
class VisionTransformer(nn.Module):
""" Vision Transformer """
def __init__(self, img_size=[400], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(
img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
B, nc, w, h = x.shape
x = self.patch_embed(x) # patch linear embedding
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def vit_tiny(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_small(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_base(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
class DINOHead(nn.Module):
def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256):
super().__init__()
nlayers = max(nlayers, 1)
if nlayers == 1:
self.mlp = nn.Linear(in_dim, bottleneck_dim)
else:
layers = [nn.Linear(in_dim, hidden_dim)]
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
for _ in range(nlayers - 2):
layers.append(nn.Linear(hidden_dim, hidden_dim))
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
layers.append(nn.Linear(hidden_dim, bottleneck_dim))
self.mlp = nn.Sequential(*layers)
self.apply(self._init_weights)
self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))
self.last_layer.weight_g.data.fill_(1)
if norm_last_layer:
self.last_layer.weight_g.requires_grad = False
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.mlp(x)
x = nn.functional.normalize(x, dim=-1, p=2)
x = self.last_layer(x)
return x
| 33,498 | 32.499 | 155 |
py
|
3SD
|
3SD-main/model/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Misc functions.
Mostly copy-paste from torchvision references or other public repos like DETR:
https://github.com/facebookresearch/detr/blob/master/util/misc.py
"""
import os
import sys
import time
import math
import random
import datetime
import subprocess
from collections import defaultdict, deque
import numpy as np
import torch
from torch import nn
import torch.distributed as dist
from PIL import ImageFilter, ImageOps
class GaussianBlur(object):
"""
Apply Gaussian Blur to the PIL image.
"""
def __init__(self, p=0.5, radius_min=0.1, radius_max=2.):
self.prob = p
self.radius_min = radius_min
self.radius_max = radius_max
def __call__(self, img):
do_it = random.random() <= self.prob
if not do_it:
return img
return img.filter(
ImageFilter.GaussianBlur(
radius=random.uniform(self.radius_min, self.radius_max)
)
)
class Solarization(object):
"""
Apply Solarization to the PIL image.
"""
def __init__(self, p):
self.p = p
def __call__(self, img):
if random.random() < self.p:
return ImageOps.solarize(img)
else:
return img
def load_pretrained_weights(model, pretrained_weights, checkpoint_key, model_name, patch_size):
if os.path.isfile(pretrained_weights):
state_dict = torch.load(pretrained_weights, map_location="cpu")
if checkpoint_key is not None and checkpoint_key in state_dict:
print(f"Take key {checkpoint_key} in provided checkpoint dict")
state_dict = state_dict[checkpoint_key]
# remove `module.` prefix
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# remove `backbone.` prefix induced by multicrop wrapper
state_dict = {k.replace("backbone.", ""): v for k, v in state_dict.items()}
msg = model.load_state_dict(state_dict, strict=False)
print('Pretrained weights found at {} and loaded with msg: {}'.format(pretrained_weights, msg))
else:
print("Please use the `--pretrained_weights` argument to indicate the path of the checkpoint to evaluate.")
url = None
if model_name == "vit_small" and patch_size == 16:
url = "dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth"
elif model_name == "vit_small" and patch_size == 8:
url = "dino_deitsmall8_pretrain/dino_deitsmall8_pretrain.pth"
elif model_name == "vit_base" and patch_size == 16:
url = "dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth"
elif model_name == "vit_base" and patch_size == 8:
url = "dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth"
if url is not None:
print("Since no pretrained weights have been provided, we load the reference pretrained DINO weights.")
state_dict = torch.hub.load_state_dict_from_url(url="https://dl.fbaipublicfiles.com/dino/" + url)
model.load_state_dict(state_dict, strict=True)
else:
print("There is no reference weights available for this model => We use random weights.")
def clip_gradients(model, clip):
norms = []
for name, p in model.named_parameters():
if p.grad is not None:
param_norm = p.grad.data.norm(2)
norms.append(param_norm.item())
clip_coef = clip / (param_norm + 1e-6)
if clip_coef < 1:
p.grad.data.mul_(clip_coef)
return norms
def cancel_gradients_last_layer(epoch, model, freeze_last_layer):
if epoch >= freeze_last_layer:
return
for n, p in model.named_parameters():
if "last_layer" in n:
p.grad = None
def restart_from_checkpoint(ckp_path, run_variables=None, **kwargs):
"""
Re-start from checkpoint
"""
if not os.path.isfile(ckp_path):
return
print("Found checkpoint at {}".format(ckp_path))
# open checkpoint file
checkpoint = torch.load(ckp_path, map_location="cpu")
# key is what to look for in the checkpoint file
# value is the object to load
# example: {'state_dict': model}
for key, value in kwargs.items():
if key in checkpoint and value is not None:
try:
msg = value.load_state_dict(checkpoint[key], strict=False)
print("=> loaded '{}' from checkpoint '{}' with msg {}".format(key, ckp_path, msg))
except TypeError:
try:
msg = value.load_state_dict(checkpoint[key])
print("=> loaded '{}' from checkpoint: '{}'".format(key, ckp_path))
except ValueError:
print("=> failed to load '{}' from checkpoint: '{}'".format(key, ckp_path))
else:
print("=> key '{}' not found in checkpoint: '{}'".format(key, ckp_path))
# re load variable important for the run
if run_variables is not None:
for var_name in run_variables:
if var_name in checkpoint:
run_variables[var_name] = checkpoint[var_name]
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0, start_warmup_value=0):
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * niter_per_ep
if warmup_epochs > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange(epochs * niter_per_ep - warmup_iters)
schedule = final_value + 0.5 * (base_value - final_value) * (1 + np.cos(np.pi * iters / len(iters)))
schedule = np.concatenate((warmup_schedule, schedule))
assert len(schedule) == epochs * niter_per_ep
return schedule
def bool_flag(s):
"""
Parse boolean arguments from the command line.
"""
FALSY_STRINGS = {"off", "false", "0"}
TRUTHY_STRINGS = {"on", "true", "1"}
if s.lower() in FALSY_STRINGS:
return False
elif s.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("invalid value for a boolean flag")
def fix_random_seeds(seed=31):
"""
Fix random seeds.
"""
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.6f} ({global_avg:.6f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.6f}')
data_time = SmoothedValue(fmt='{avg:.6f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
if torch.cuda.is_available():
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}',
'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
])
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.6f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def get_sha():
cwd = os.path.dirname(os.path.abspath(__file__))
def _run(command):
return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()
sha = 'N/A'
diff = "clean"
branch = 'N/A'
try:
sha = _run(['git', 'rev-parse', 'HEAD'])
subprocess.check_output(['git', 'diff'], cwd=cwd)
diff = _run(['git', 'diff-index', 'HEAD'])
diff = "has uncommited changes" if diff else "clean"
branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
except Exception:
pass
message = f"sha: {sha}, status: {diff}, branch: {branch}"
return message
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def init_distributed_mode(args):
# launched with torch.distributed.launch
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
# launched with submitit on a slurm cluster
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
# launched naively with `python main_dino.py`
# we manually add MASTER_ADDR and MASTER_PORT to env variables
elif torch.cuda.is_available():
print('Will run the code on one GPU.')
args.rank, args.gpu, args.world_size = 0, 0, 1
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
else:
print('Does not support training without GPU.')
sys.exit(1)
dist.init_process_group(
backend="nccl",
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
torch.cuda.set_device(args.gpu)
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
dist.barrier()
setup_for_distributed(args.rank == 0)
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
return [correct[:k].reshape(-1).float().sum(0) * 100. / batch_size for k in topk]
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class LARS(torch.optim.Optimizer):
"""
Almost copy-paste from https://github.com/facebookresearch/barlowtwins/blob/main/main.py
"""
def __init__(self, params, lr=0, weight_decay=0, momentum=0.9, eta=0.001,
weight_decay_filter=None, lars_adaptation_filter=None):
defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum,
eta=eta, weight_decay_filter=weight_decay_filter,
lars_adaptation_filter=lars_adaptation_filter)
super().__init__(params, defaults)
@torch.no_grad()
def step(self):
for g in self.param_groups:
for p in g['params']:
dp = p.grad
if dp is None:
continue
if p.ndim != 1:
dp = dp.add(p, alpha=g['weight_decay'])
if p.ndim != 1:
param_norm = torch.norm(p)
update_norm = torch.norm(dp)
one = torch.ones_like(param_norm)
q = torch.where(param_norm > 0.,
torch.where(update_norm > 0,
(g['eta'] * param_norm / update_norm), one), one)
dp = dp.mul(q)
param_state = self.state[p]
if 'mu' not in param_state:
param_state['mu'] = torch.zeros_like(p)
mu = param_state['mu']
mu.mul_(g['momentum']).add_(dp)
p.add_(mu, alpha=-g['lr'])
class MultiCropWrapper(nn.Module):
"""
Perform forward pass separately on each resolution input.
The inputs corresponding to a single resolution are clubbed and single
forward is run on the same resolution inputs. Hence we do several
forward passes = number of different resolutions used. We then
concatenate all the output features and run the head forward on these
concatenated features.
"""
def __init__(self, backbone, head):
super(MultiCropWrapper, self).__init__()
# disable layers dedicated to ImageNet labels classification
backbone.fc, backbone.head = nn.Identity(), nn.Identity()
self.backbone = backbone
self.head = head
def forward(self, x):
# convert to list
if not isinstance(x, list):
x = [x]
idx_crops = torch.cumsum(torch.unique_consecutive(
torch.tensor([inp.shape[-1] for inp in x]),
return_counts=True,
)[1], 0)
start_idx = 0
for end_idx in idx_crops:
_out = self.backbone(torch.cat(x[start_idx: end_idx]))
if start_idx == 0:
output = _out
else:
output = torch.cat((output, _out))
start_idx = end_idx
# Run the head forward on the concatenated features.
return self.head(output)
def get_params_groups(model):
regularized = []
not_regularized = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue
# we do not regularize biases nor Norm parameters
if name.endswith(".bias") or len(param.shape) == 1:
not_regularized.append(param)
else:
regularized.append(param)
return [{'params': regularized}, {'params': not_regularized, 'weight_decay': 0.}]
def has_batchnorms(model):
bn_types = (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d, nn.SyncBatchNorm)
for name, module in model.named_modules():
if isinstance(module, bn_types):
return True
return False
| 21,117 | 33.06129 | 115 |
py
|
3SD
|
3SD-main/model/u2net_refactor.py
|
import torch
import torch.nn as nn
import math
__all__ = ['U2NET_full', 'U2NET_lite']
def _upsample_like(x, size):
return nn.Upsample(size=size, mode='bilinear', align_corners=False)(x)
def _size_map(x, height):
# {height: size} for Upsample
size = list(x.shape[-2:])
sizes = {}
for h in range(1, height):
sizes[h] = size
size = [math.ceil(w / 2) for w in size]
return sizes
class REBNCONV(nn.Module):
def __init__(self, in_ch=3, out_ch=3, dilate=1):
super(REBNCONV, self).__init__()
self.conv_s1 = nn.Conv2d(in_ch, out_ch, 3, padding=1 * dilate, dilation=1 * dilate)
self.bn_s1 = nn.BatchNorm2d(out_ch)
self.relu_s1 = nn.ReLU(inplace=True)
def forward(self, x):
return self.relu_s1(self.bn_s1(self.conv_s1(x)))
class RSU(nn.Module):
def __init__(self, name, height, in_ch, mid_ch, out_ch, dilated=False):
super(RSU, self).__init__()
self.name = name
self.height = height
self.dilated = dilated
self._make_layers(height, in_ch, mid_ch, out_ch, dilated)
def forward(self, x):
sizes = _size_map(x, self.height)
x = self.rebnconvin(x)
# U-Net like symmetric encoder-decoder structure
def unet(x, height=1):
if height < self.height:
x1 = getattr(self, f'rebnconv{height}')(x)
if not self.dilated and height < self.height - 1:
x2 = unet(getattr(self, 'downsample')(x1), height + 1)
else:
x2 = unet(x1, height + 1)
x = getattr(self, f'rebnconv{height}d')(torch.cat((x2, x1), 1))
return _upsample_like(x, sizes[height - 1]) if not self.dilated and height > 1 else x
else:
return getattr(self, f'rebnconv{height}')(x)
return x + unet(x)
def _make_layers(self, height, in_ch, mid_ch, out_ch, dilated=False):
self.add_module('rebnconvin', REBNCONV(in_ch, out_ch))
self.add_module('downsample', nn.MaxPool2d(2, stride=2, ceil_mode=True))
self.add_module(f'rebnconv1', REBNCONV(out_ch, mid_ch))
self.add_module(f'rebnconv1d', REBNCONV(mid_ch * 2, out_ch))
for i in range(2, height):
dilate = 1 if not dilated else 2 ** (i - 1)
self.add_module(f'rebnconv{i}', REBNCONV(mid_ch, mid_ch, dilate=dilate))
self.add_module(f'rebnconv{i}d', REBNCONV(mid_ch * 2, mid_ch, dilate=dilate))
dilate = 2 if not dilated else 2 ** (height - 1)
self.add_module(f'rebnconv{height}', REBNCONV(mid_ch, mid_ch, dilate=dilate))
class U2NET(nn.Module):
def __init__(self, cfgs, out_ch):
super(U2NET, self).__init__()
self.out_ch = out_ch
self._make_layers(cfgs)
def forward(self, x):
sizes = _size_map(x, self.height)
maps = [] # storage for maps
# side saliency map
def unet(x, height=1):
if height < 6:
x1 = getattr(self, f'stage{height}')(x)
x2 = unet(getattr(self, 'downsample')(x1), height + 1)
x = getattr(self, f'stage{height}d')(torch.cat((x2, x1), 1))
side(x, height)
return _upsample_like(x, sizes[height - 1]) if height > 1 else x
else:
x = getattr(self, f'stage{height}')(x)
side(x, height)
return _upsample_like(x, sizes[height - 1])
def side(x, h):
# side output saliency map (before sigmoid)
x = getattr(self, f'side{h}')(x)
x = _upsample_like(x, sizes[1])
maps.append(x)
def fuse():
# fuse saliency probability maps
maps.reverse()
x = torch.cat(maps, 1)
x = getattr(self, 'outconv')(x)
maps.insert(0, x)
return [torch.sigmoid(x) for x in maps]
unet(x)
maps = fuse()
return maps
def _make_layers(self, cfgs):
self.height = int((len(cfgs) + 1) / 2)
self.add_module('downsample', nn.MaxPool2d(2, stride=2, ceil_mode=True))
for k, v in cfgs.items():
# build rsu block
self.add_module(k, RSU(v[0], *v[1]))
if v[2] > 0:
# build side layer
self.add_module(f'side{v[0][-1]}', nn.Conv2d(v[2], self.out_ch, 3, padding=1))
# build fuse layer
self.add_module('outconv', nn.Conv2d(int(self.height * self.out_ch), self.out_ch, 1))
def U2NET_full():
full = {
# cfgs for building RSUs and sides
# {stage : [name, (height(L), in_ch, mid_ch, out_ch, dilated), side]}
'stage1': ['En_1', (7, 3, 32, 64), -1],
'stage2': ['En_2', (6, 64, 32, 128), -1],
'stage3': ['En_3', (5, 128, 64, 256), -1],
'stage4': ['En_4', (4, 256, 128, 512), -1],
'stage5': ['En_5', (4, 512, 256, 512, True), -1],
'stage6': ['En_6', (4, 512, 256, 512, True), 512],
'stage5d': ['De_5', (4, 1024, 256, 512, True), 512],
'stage4d': ['De_4', (4, 1024, 128, 256), 256],
'stage3d': ['De_3', (5, 512, 64, 128), 128],
'stage2d': ['De_2', (6, 256, 32, 64), 64],
'stage1d': ['De_1', (7, 128, 16, 64), 64],
}
return U2NET(cfgs=full, out_ch=1)
def U2NET_lite():
lite = {
# cfgs for building RSUs and sides
# {stage : [name, (height(L), in_ch, mid_ch, out_ch, dilated), side]}
'stage1': ['En_1', (7, 3, 16, 64), -1],
'stage2': ['En_2', (6, 64, 16, 64), -1],
'stage3': ['En_3', (5, 64, 16, 64), -1],
'stage4': ['En_4', (4, 64, 16, 64), -1],
'stage5': ['En_5', (4, 64, 16, 64, True), -1],
'stage6': ['En_6', (4, 64, 16, 64, True), 64],
'stage5d': ['De_5', (4, 128, 16, 64, True), 64],
'stage4d': ['De_4', (4, 128, 16, 64), 64],
'stage3d': ['De_3', (5, 128, 16, 64), 64],
'stage2d': ['De_2', (6, 128, 16, 64), 64],
'stage1d': ['De_1', (7, 128, 16, 64), 64],
}
return U2NET(cfgs=lite, out_ch=1)
| 6,097 | 35.08284 | 101 |
py
|
3SD
|
3SD-main/model/__init__.py
|
from .u2net_transformer_pseudo_dino_final import U2NET
from .u2net import U2NETP
| 81 | 26.333333 | 54 |
py
|
3SD
|
3SD-main/model/u2net_transformer.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Mostly copy-paste from timm library.
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
"""
import math
from functools import partial
import torch
import torch.nn as nn
from model.utils import trunc_normal_
import pdb
class REBNCONV(nn.Module):
def __init__(self,in_ch=3,out_ch=3,dirate=1):
super(REBNCONV,self).__init__()
self.conv_s1 = nn.Conv2d(in_ch,out_ch,3,padding=1*dirate,dilation=1*dirate)
self.bn_s1 = nn.BatchNorm2d(out_ch)
self.relu_s1 = nn.ReLU(inplace=True)
def forward(self,x):
hx = x
xout = self.relu_s1(self.bn_s1(self.conv_s1(hx)))
return xout
## upsample tensor 'src' to have the same spatial size with tensor 'tar'
def _upsample_like(src,tar):
src = F.upsample(src,size=tar.shape[2:],mode='bilinear')
return src
def drop_path(x, drop_prob: float = 0., training: bool = False):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
#pdb.set_trace()
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
def forward(self, x, return_attention=False):
y, attn = self.attn(self.norm1(x))
if return_attention:
return attn
x = x + self.drop_path(y)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=400, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
num_patches = (img_size // patch_size) * (img_size // patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, C, H, W = x.shape
x = self.proj(x)
#print(x.shape)
x = x.flatten(2).transpose(1, 2)
#print(self.num_patches)
return x
### RSU-7 ###
class RSU7(nn.Module):#UNet07DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU7,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool4 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool5 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv6 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv7 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv6d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv5d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx = self.pool4(hx4)
hx5 = self.rebnconv5(hx)
hx = self.pool5(hx5)
hx6 = self.rebnconv6(hx)
hx7 = self.rebnconv7(hx6)
hx6d = self.rebnconv6d(torch.cat((hx7,hx6),1))
hx6dup = _upsample_like(hx6d,hx5)
hx5d = self.rebnconv5d(torch.cat((hx6dup,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.rebnconv4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-6 ###
class RSU6(nn.Module):#UNet06DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU6,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool4 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv6 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv5d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx = self.pool4(hx4)
hx5 = self.rebnconv5(hx)
hx6 = self.rebnconv6(hx5)
hx5d = self.rebnconv5d(torch.cat((hx6,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.rebnconv4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-5 ###
class RSU5(nn.Module):#UNet05DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU5,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx5 = self.rebnconv5(hx4)
hx4d = self.rebnconv4d(torch.cat((hx5,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-4 ###
class RSU4(nn.Module):#UNet04DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU4,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx4 = self.rebnconv4(hx3)
hx3d = self.rebnconv3d(torch.cat((hx4,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-4F ###
class RSU4F(nn.Module):#UNet04FRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU4F,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=4)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=8)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=4)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=2)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx2 = self.rebnconv2(hx1)
hx3 = self.rebnconv3(hx2)
hx4 = self.rebnconv4(hx3)
hx3d = self.rebnconv3d(torch.cat((hx4,hx3),1))
hx2d = self.rebnconv2d(torch.cat((hx3d,hx2),1))
hx1d = self.rebnconv1d(torch.cat((hx2d,hx1),1))
return hx1d + hxin
##### U^2-Net ####
class U2NET(nn.Module):
def __init__(self,in_ch=3,out_ch=1,img_size=[400], patch_size=16, in_chans=3, num_classes=0, embed_dim=384, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
super(U2NET,self).__init__()
### Transformfer encoder ###
print("haha")
self.num_features = self.embed_dim = embed_dim
self.preprocess = RSU7(in_ch,16,8)
self.img_size = img_size[0]
self.patch_embed = PatchEmbed(
img_size=img_size[0], patch_size=patch_size, in_chans=8, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
### U^2Net encoder branch for local task ###
self.stage1 = RSU7(in_ch,16,64)
self.pool12 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage2 = RSU6(64,32,128)
self.pool23 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage3 = RSU5(128,64,256)
self.pool34 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage4 = RSU4(256,128,512)
self.pool45 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage5 = RSU4F(512,256,512)
self.pool56 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage6 = RSU4F(512,256,512)
# decoder
self.stage5d = RSU4F(1408,256,512)
self.stage4d = RSU4(1024,128,256)
self.stage3d = RSU5(512,64,128)
self.stage2d = RSU6(256,32,64)
self.stage1d = RSU7(128,16,64)
self.side1 = nn.Conv2d(64,out_ch,3,padding=1)
self.side2 = nn.Conv2d(64,out_ch,3,padding=1)
self.side3 = nn.Conv2d(128,out_ch,3,padding=1)
self.side4 = nn.Conv2d(256,out_ch,3,padding=1)
self.side5 = nn.Conv2d(512,out_ch,3,padding=1)
self.side6 = nn.Conv2d(512,out_ch,3,padding=1)
self.outconv = nn.Conv2d(6*out_ch,out_ch,1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
B, nc, w, h = x.shape
x = self.patch_embed(x) # patch linear embedding
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
"""def forward(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]"""
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def forward(self,x):
hx = x
#pdb.set_trace()
tx = F.upsample(x,size=[self.img_size,self.img_size],mode='bilinear')
tx = self.prepare_tokens(self.preprocess(tx))
for blk in self.blocks:
tx = blk(tx)
tx = self.norm(tx)
tx = tx.transpose(1,2)
B, N, C = tx.shape
tx = F.upsample(tx.reshape(B,N,C,1),size=[C-1,1],mode='bilinear')
tx = tx.reshape(B,N,self.img_size//16 ,self.img_size//16 )
#stage 1
hx1 = self.stage1(hx)
hx = self.pool12(hx1)
#stage 2
hx2 = self.stage2(hx)
hx = self.pool23(hx2)
#stage 3
hx3 = self.stage3(hx)
hx = self.pool34(hx3)
#stage 4
hx4 = self.stage4(hx)
hx = self.pool45(hx4)
#stage 5
hx5 = self.stage5(hx)
hx = self.pool56(hx5)
#stage 6
hx6 = self.stage6(hx)
hx6up = _upsample_like(hx6,hx5)
txd = F.upsample(tx, size=hx5.shape[2:], mode='bilinear')
#-------------------- decoder --------------------
hx5d = self.stage5d(torch.cat((hx6up,hx5,txd),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.stage4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.stage3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.stage2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.stage1d(torch.cat((hx2dup,hx1),1))
#side output
d1 = self.side1(hx1d)
d2 = self.side2(hx2d)
d2 = _upsample_like(d2,d1)
d3 = self.side3(hx3d)
d3 = _upsample_like(d3,d1)
d4 = self.side4(hx4d)
d4 = _upsample_like(d4,d1)
d5 = self.side5(hx5d)
d5 = _upsample_like(d5,d1)
d6 = self.side6(hx6)
d6 = _upsample_like(d6,d1)
d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1))
return F.sigmoid(d0), F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6),tx
### U^2-Net small ###
class U2NETP(nn.Module):
def __init__(self,in_ch=3,out_ch=1):
super(U2NETP,self).__init__()
self.stage1 = RSU7(in_ch,16,64)
self.pool12 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage2 = RSU6(64,16,64)
self.pool23 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage3 = RSU5(64,16,64)
self.pool34 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage4 = RSU4(64,16,64)
self.pool45 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage5 = RSU4F(64,16,64)
self.pool56 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage6 = RSU4F(64,16,64)
# decoder
self.stage5d = RSU4F(128,16,64)
self.stage4d = RSU4(128,16,64)
self.stage3d = RSU5(128,16,64)
self.stage2d = RSU6(128,16,64)
self.stage1d = RSU7(128,16,64)
self.side1 = nn.Conv2d(64,out_ch,3,padding=1)
self.side2 = nn.Conv2d(64,out_ch,3,padding=1)
self.side3 = nn.Conv2d(64,out_ch,3,padding=1)
self.side4 = nn.Conv2d(64,out_ch,3,padding=1)
self.side5 = nn.Conv2d(64,out_ch,3,padding=1)
self.side6 = nn.Conv2d(64,out_ch,3,padding=1)
self.outconv = nn.Conv2d(6*out_ch,out_ch,1)
def forward(self,x):
hx = x
#stage 1
hx1 = self.stage1(hx)
hx = self.pool12(hx1)
#stage 2
hx2 = self.stage2(hx)
hx = self.pool23(hx2)
#stage 3
hx3 = self.stage3(hx)
hx = self.pool34(hx3)
#stage 4
hx4 = self.stage4(hx)
hx = self.pool45(hx4)
#stage 5
hx5 = self.stage5(hx)
hx = self.pool56(hx5)
#stage 6
hx6 = self.stage6(hx)
hx6up = _upsample_like(hx6,hx5)
#decoder
hx5d = self.stage5d(torch.cat((hx6up,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.stage4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.stage3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.stage2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.stage1d(torch.cat((hx2dup,hx1),1))
#side output
d1 = self.side1(hx1d)
d2 = self.side2(hx2d)
d2 = _upsample_like(d2,d1)
d3 = self.side3(hx3d)
d3 = _upsample_like(d3,d1)
d4 = self.side4(hx4d)
d4 = _upsample_like(d4,d1)
d5 = self.side5(hx5d)
d5 = _upsample_like(d5,d1)
d6 = self.side6(hx6)
d6 = _upsample_like(d6,d1)
d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1))
return F.sigmoid(d0), F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6)
class VisionTransformer(nn.Module):
""" Vision Transformer """
def __init__(self, img_size=[400], patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, **kwargs):
super().__init__()
self.num_features = self.embed_dim = embed_dim
self.patch_embed = PatchEmbed(
img_size=img_size[0], patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Classifier head
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def interpolate_pos_encoding(self, x, w, h):
npatch = x.shape[1] - 1
N = self.pos_embed.shape[1] - 1
if npatch == N and w == h:
return self.pos_embed
class_pos_embed = self.pos_embed[:, 0]
patch_pos_embed = self.pos_embed[:, 1:]
dim = x.shape[-1]
w0 = w // self.patch_embed.patch_size
h0 = h // self.patch_embed.patch_size
# we add a small number to avoid floating point error in the interpolation
# see discussion at https://github.com/facebookresearch/dino/issues/8
w0, h0 = w0 + 0.1, h0 + 0.1
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2),
scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)),
mode='bicubic',
)
assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1]
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
def prepare_tokens(self, x):
B, nc, w, h = x.shape
x = self.patch_embed(x) # patch linear embedding
# add the [CLS] token to the embed patch tokens
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
# add positional encoding to each token
x = x + self.interpolate_pos_encoding(x, w, h)
return self.pos_drop(x)
def forward(self, x):
x = self.prepare_tokens(x)
for blk in self.blocks:
x = blk(x)
x = self.norm(x)
return x[:, 0]
def get_last_selfattention(self, x):
x = self.prepare_tokens(x)
for i, blk in enumerate(self.blocks):
if i < len(self.blocks) - 1:
x = blk(x)
else:
# return attention of the last block
return blk(x, return_attention=True)
def get_intermediate_layers(self, x, n=1):
x = self.prepare_tokens(x)
# we return the output tokens from the `n` last blocks
output = []
for i, blk in enumerate(self.blocks):
x = blk(x)
if len(self.blocks) - i <= n:
output.append(self.norm(x))
return output
def vit_tiny(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_small(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
def vit_base(patch_size=16, **kwargs):
model = VisionTransformer(
patch_size=patch_size, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
class DINOHead(nn.Module):
def __init__(self, in_dim, out_dim, use_bn=False, norm_last_layer=True, nlayers=3, hidden_dim=2048, bottleneck_dim=256):
super().__init__()
nlayers = max(nlayers, 1)
if nlayers == 1:
self.mlp = nn.Linear(in_dim, bottleneck_dim)
else:
layers = [nn.Linear(in_dim, hidden_dim)]
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
for _ in range(nlayers - 2):
layers.append(nn.Linear(hidden_dim, hidden_dim))
if use_bn:
layers.append(nn.BatchNorm1d(hidden_dim))
layers.append(nn.GELU())
layers.append(nn.Linear(hidden_dim, bottleneck_dim))
self.mlp = nn.Sequential(*layers)
self.apply(self._init_weights)
self.last_layer = nn.utils.weight_norm(nn.Linear(bottleneck_dim, out_dim, bias=False))
self.last_layer.weight_g.data.fill_(1)
if norm_last_layer:
self.last_layer.weight_g.requires_grad = False
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.mlp(x)
x = nn.functional.normalize(x, dim=-1, p=2)
x = self.last_layer(x)
return x
| 30,678 | 31.917382 | 124 |
py
|
STFTgrad
|
STFTgrad-main/classifier/classifier_adaptive.py
|
"""
Code for the adaptive classifier with the differentiable STFT front-end
This will be trained on our test input signal, alternating sinusoids of 2 frequencies
"""
# Dependencies
import numpy as np
from tqdm import tqdm
import haiku as hk
import jax.numpy as jnp
import jax
import optax
from dstft import diff_stft
import sys
# Order of input arguments:
"""
1 : list of N to initialize classifier with
2 : learning rate
3 : number of epochs
"""
n = len(sys.argv[1])
a = sys.argv[1][1:n-1]
a = a.split(',')
list_N = [int(i) for i in a]
lr = float(sys.argv[2])
nepochs = int(sys.argv[3])
# Construct the test signal to classify:
# Sampling rate
fs = 200
# Durations and frequencies of the 2 sines
dur_sins = [0.2,0.2]
freqs = [20,80]
Ns = [int(fs*i) for i in dur_sins]
# adding some noise in the sine to prevent the classifier from overfitting
list_sin = [(np.sin(2*np.pi*(freqs[i]/fs)*np.arange(Ns[i])) + 0.2*np.random.randn(Ns[i])) for i in range(len(dur_sins))]
one_period = np.concatenate(list_sin)
# Repeat this Nr times
Nr = 20
signal = np.tile(one_period,Nr)
P = sum(dur_sins)
I1 = np.arange(0,Nr*P,P)
I2 = np.arange(0.2,Nr*P,P)
# Input dimension to the classifier after the differentiable STFT (it is zero-padded to ensure this dimension)
Nzp = 50
# Differentiable FT as pre-processor to classifier
def forward(x):
mlp = hk.Sequential([
hk.Linear(2), jax.nn.softmax
])
return mlp(x)
net = hk.without_apply_rng(hk.transform(forward))
def loss_fn(param_dict, signal):
params = param_dict["nn"]
sigma = param_dict["s"]
hf = 1
N = int(jnp.round(6*sigma))
# Adding some more noise during training to prevent classifier from overfitting on irrelevant aspects of the spectra
signal = signal + 0.2*np.random.randn(signal.shape[0])
x = diff_stft(signal, s = sigma,hf = hf)
li = []
l1 = jnp.array([[1,0]])
l2 = jnp.array([[0,1]])
l_c = []
for i in range(x.shape[1]):
timi = i*int(hf*N)/fs
d1 = np.min(np.abs(I1 - timi))
d2 = np.min(np.abs(I2 - timi))
if(d1 < d2):
li.append(1)
l_c.append(l1)
else:
li.append(2)
l_c.append(l2)
li = np.array(li)
l_c = np.concatenate(l_c,axis = 0).T
xzp = jnp.concatenate([x,jnp.zeros((Nzp - (N//2 + 1),x.shape[1]))],axis = 0)
logits = net.apply(params,xzp.T)
# Regularized loss (Cross entropy + regularizer to avoid small windows)
cel = -jnp.mean(logits*l_c.T) + (0.1/sigma)
return cel
def update(
param_dict,
opt_state,
signal
):
grads = jax.grad(loss_fn)(param_dict, signal)
updates, opt_state = opt.update(grads, opt_state)
new_params = optax.apply_updates(param_dict, updates)
return new_params, opt_state
# Training the Classifier
nH_evol_fin = []
# list_N = [10,15,20,25,30,45,55,65,70]
opt = optax.adam(lr)
rng = jax.random.PRNGKey(42)
for Ni in list_N:
params = net.init(rng,np.random.randn(1,Nzp))
sinit = (Ni/6)
param_dict = {"nn":params,"s":sinit}
opt_state = opt.init(param_dict)
pfdict = 0
nH = []
for t in tqdm(range(nepochs)):
param_dict, opt_state = update(param_dict, opt_state, signal)
pfdict = param_dict
nH.append(6*param_dict["s"])
nH_evol_fin.append(nH)
# Plotting the evolution of the window length across epochs for different initializations
import matplotlib.pyplot as pyp
import matplotlib
matplotlib.rcParams.update({'font.size': 16})
pyp.figure()
pyp.title('Convergence from varying initial values')
pyp.xlabel('Epoch')
pyp.ylabel('Window Length (N)')
for l,i in enumerate(nH_evol_fin):
pyp.plot(i,'b')
pyp.show()
| 3,684 | 25.510791 | 120 |
py
|
STFTgrad
|
STFTgrad-main/classifier/classifier_ordinary.py
|
"""
Code for a normal classifier (to obtain the loss function as a function of the window length)
This will be trained on our test input signal, alternating sinusoids of 2 frequencies
"""
# Dependencies
import numpy as np
from tqdm import tqdm
import haiku as hk
import jax.numpy as jnp
import jax
import optax
from dstft import diff_stft
import sys
# Order of input arguments:
"""
1 : list of N to initialize classifier with
2 : learning rate
3 : number of epochs
"""
n = len(sys.argv[1])
a = sys.argv[1][1:n-1]
a = a.split(',')
list_N = [int(i) for i in a]
lr = float(sys.argv[2])
nepochs = int(sys.argv[3])
# Construct the test signal to classify:
# Sampling rate
fs = 200
# Durations and frequencies of the 2 sines
dur_sins = [0.2,0.2]
freqs = [20,80]
Ns = [int(fs*i) for i in dur_sins]
# adding some noise in the sine to prevent the classifier from overfitting
list_sin = [(np.sin(2*np.pi*(freqs[i]/fs)*np.arange(Ns[i])) + 0.2*np.random.randn(Ns[i])) for i in range(len(dur_sins))]
one_period = np.concatenate(list_sin)
# Repeat this Nr times
Nr = 20
signal = np.tile(one_period,Nr)
P = sum(dur_sins)
I1 = np.arange(0,Nr*P,P)
I2 = np.arange(0.2,Nr*P,P)
# Constructing the classifier
def forward(x):
mlp = hk.Sequential([
hk.Linear(2), jax.nn.softmax
])
return mlp(x)
net = hk.without_apply_rng(hk.transform(forward))
def loss_fn(params, inp, labels):
logits = net.apply(params,inp)
cel = -jnp.mean(logits * labels)
return cel
def update(
params: hk.Params,
opt_state,
x,l
):
grads = jax.grad(loss_fn)(params, x,l)
updates, opt_state = opt.update(grads, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, opt_state
# Training the classifier
loss_arrs_N = []
loss_fin = []
N_sweep = list_N
opt = optax.adam(lr)
rng = jax.random.PRNGKey(42)
for N in N_sweep:
Nc = N
Nzp = 50
hf = 1
signal = signal + 0.1*np.random.randn(signal.shape[0])
x = diff_stft(signal, s = Nc/6,hf = hf)
li = []
l1 = jnp.array([[1,0]])
l2 = jnp.array([[0,1]])
l_c = []
for i in range(x.shape[1]):
timi = i*int(hf*Nc)/fs
d1 = np.min(np.abs(I1 - timi))
d2 = np.min(np.abs(I2 - timi))
if(d1 < d2):
li.append(1)
l_c.append(l1)
else:
li.append(2)
l_c.append(l2)
li = np.array(li)
l_c = np.concatenate(l_c,axis = 0)
xzp = jnp.concatenate([x,jnp.zeros((Nzp - (Nc//2 + 1),x.shape[1]))],axis = 0)
params = net.init(rng,np.random.randn(1,Nzp))
opt_state = opt.init(params)
paramsf = 0
liter = []
for t in tqdm(range(nepochs)):
params, opt_state = update(params, opt_state, xzp.T, l_c)
paramsf = params
liter.append(loss_fn(paramsf,xzp.T, l_c) + (0.6/N))
loss_arrs_N.append(liter)
loss_fin.append(loss_fn(paramsf,xzp.T, l_c))
# Plotting the spectrograms and final loss for the different N's
costs_fin = loss_fin
import matplotlib.pyplot as pyp
import matplotlib
from matplotlib.pylab import register_cmap
cdict = {
'red': ((0.0, 1.0, 1.0), (1.0, 0.0, 0.0)),
'green': ((0.0, 1.0, 1.0), (1.0, .15, .15)),
'blue': ((0.0, 1.0, 1.0), (1.0, 0.4, 0.4)),
'alpha': ((0.0, 0.0, 0.0), (1.0, 1.0, 1.0))}
register_cmap(name='InvBlueA', data=cdict)
matplotlib.rcParams.update({'font.size': 16})
def plot_various_window_size(sigi):
pyp.figure(figsize=(22, 4))
szs = N_sweep
for i in range(len(szs)):
sz, hp = szs[i], szs[i]
a = diff_stft(sigi,s = szs[i]*1.0/6,hf = 1)
pyp.gcf().add_subplot(1, len(szs), i + 1), pyp.gca().pcolorfast(a,cmap = "InvBlueA")
pyp.gca().set_title(f'FFT size: {sz}, \n Loss: {costs_fin[i]:.5f}')
pyp.xlabel('Time Frame')
pyp.ylabel('Frequency Bin')
pyp.gcf().tight_layout()
plot_various_window_size(signal[:5*one_period.shape[0]])
| 3,916 | 25.828767 | 120 |
py
|
STFTgrad
|
STFTgrad-main/classifier/dstft.py
|
"""
Code for the differentiable STFT front-end
As explained in our paper, we use a Gaussian Window STFT, with N = floor(6\sigma)
"""
# Dependencies
import jax.numpy as jnp
import jax
def diff_stft(xinp,s,hf = 0.5):
"""
Inputs
------
xinp: jnp.array
Input audio signal in time domain
s: jnp.float
The standard deviation of the Gaussian window to be used
hf: jnp.float
The fraction of window size that will be overlapped within consecutive frames
Outputs
-------
a: jnp.array
The computed magnitude spectrogram
"""
# Effective window length of Gaussian is 6\sigma
sz = s * 6
hp = hf*sz
# Truncating to integers for use in jnp functions
intsz = int(jnp.round(sz))
inthp = int(jnp.round(hp))
m = jnp.arange(0, intsz, dtype=jnp.float32)
# Obtaining the "differentiable" window function by using the real valued \sigma
window = jnp.exp(-0.5 * jnp.power((m - sz / 2) / (s + 1e-5), 2))
window_norm = window/jnp.sum(window)
# Computing the STFT, and taking its magnitude
stft = jnp.sqrt(1/(2*window_norm.shape[0] + 1))*jnp.stack([jnp.fft.rfft(window_norm * xinp[i:i+intsz]) for i in range(0, len(xinp) - intsz, inthp)],1)
a = jnp.abs(stft)
return a
| 1,290 | 26.468085 | 154 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/adaptive_stft.py
|
import math
from tqdm import trange
import sys
import pathlib
import torch.autograd
import torch
import numpy as np
import torch.optim
import torch.nn as nn
from celluloid import Camera
import matplotlib.pyplot as plt
from matplotlib.figure import Figure
import torch.nn.functional as F
from adaptive_stft_utils.operators import dithering_int, Sign, InvSign
from adaptive_stft_utils.mappings import IdxToWindow, make_find_window_configs
from adaptive_stft_utils.losses import kurtosis
class NumericalError(Exception):
def __init__(self, message, grad_hist=None, window_times_signal_grads=None, f_grad=None):
self.message = message
self.grad_hist = grad_hist
self.window_times_signal_grads = window_times_signal_grads
self.f_grad = f_grad
def __str__(self):
if self.message:
return 'NumericalError, {0} '.format(self.message)
else:
return 'NumericalError'
#COLOR_MAP = 'GnBu'
COLOR_MAP = None
def optimize_stft(
s,
lr=1e-4,
num_windows=None,
sgd=False,
num_epochs=9000,
score_fn=kurtosis,
window_shape='trapezoid',
make_animation=True,
name_for_saving='',
):
if window_shape not in ['trapezoid', 'triangle']:
raise RuntimeError(f'Unknown window shape {window_shape}')
kur_hist = []
fig_width = int(s.size(0) / 27000 * 14 * 10) / 10
anim_fig = Figure(figsize=(fig_width, 7))
preview_fig = Figure(figsize=(fig_width, 7))
from IPython.core.display import display
preview_handle = display(preview_fig, display_id=True)
camera = Camera(anim_fig)
matrix_fig = plt.figure(figsize=(22, 15))
s = s.cuda()
# make (num_windows - 1) points, in addition to start and end (0, signal_len)
assert len(s.shape) == 1
last_sample = s.size(0)
if num_windows is None:
assert s.size(0) > 512
num_windows = s.size(0) // 512
idx_to_window = IdxToWindow(
signal_len=last_sample, num_windows=num_windows)
idx_to_window = idx_to_window.cuda()
if sgd:
optimizer = torch.optim.SGD(idx_to_window.parameters(), lr=lr)
else:
optimizer = torch.optim.AdamW(
idx_to_window.parameters(), lr=lr, amsgrad=True, weight_decay=1e-6)
find_window_configs = make_find_window_configs(idx_to_window, last_sample=last_sample)
xs = None
ys = None
with trange(num_epochs + 1) as t:
for ep in t:
optimizer.zero_grad()
# window_configs excludes 0 and sample_length
configs = find_window_configs()
xs, ys, s_ext, extend_left = make_window_extend_signal(configs, s, window_shape=window_shape)
rffts_not_detached = []
rffts = []
len_xs = xs.size(0)
assert len_xs > 1, f"xs: {xs}, ys: {ys}, slope: {idx_to_window.slope.item()}"
for i in range(len_xs - 1):
rfft = apply_adaptive_window(s_ext, xs[i], ys[i], xs[i + 1], ys[i + 1], window_shape=window_shape)
rfft_sq = rfft[..., 0] ** 2 + rfft[..., 1] ** 2
rffts_not_detached.append(rfft_sq)
rffts.append(rfft_sq.detach().cpu().numpy())
n_wnd = len(rffts_not_detached)
score = score_fn(rffts_not_detached)
t.set_postfix(score=score.item(),
slope=idx_to_window.slope.item(), n_wnd=n_wnd)
if (torch.isnan(score).any() or torch.isinf(score).any()):
raise NumericalError(
f'score become NaN at iteration {ep}')
(-score).backward()
torch.nn.utils.clip_grad_norm_(
idx_to_window.parameters(), max_norm=1)
optimizer.step()
kur_hist.append(score.item())
plot_width = int(768 * fig_width / 7)
def get_scaled_fft_plots():
# since each window has different size, stretch the FFT frequency to fit the largest
max_size = np.max([x.shape[0] for x in rffts])
scaled_fft_plots = np.zeros(
(max_size, last_sample), dtype=np.float32)
i = 0
from scipy.interpolate import interp1d
for i, fft in enumerate(rffts):
bins = np.linspace(0, max_size, fft.shape[0])
f_out = interp1d(bins, fft, axis=0, kind='nearest')
new_bins = np.linspace(0, max_size, max_size)
fft_out = f_out(new_bins)
fft_out /= np.max(fft_out)
if i == 0:
start_point = int(max(ys[i] - extend_left, 0))
else:
start_point = int(max((xs[i] + ys[i]) / 2 - extend_left, 0))
if i < len(rffts) - 1:
end_point = int((xs[i + 1] + ys[i + 1]) / 2) - extend_left
else:
end_point = last_sample
scaled_fft_plots[:, start_point:end_point] = np.expand_dims(fft_out, -1)
import cv2
scaled_fft_plots = np.power(scaled_fft_plots, 0.5)
return cv2.resize(scaled_fft_plots, dsize=(plot_width, 768))
if ep % (num_epochs // 8) == 0:
outfile = f'{name_for_saving}_plot_data_{ep}.npz'
model_path = f'{name_for_saving}_mapping_model_{ep}.pth'
scaled_fft_plots = get_scaled_fft_plots()
np.savez(outfile, spectro=scaled_fft_plots,
x=xs.cpu().detach().numpy(),
y=ys.cpu().detach().numpy(),
extend_left=extend_left,
sample_length=last_sample,
sample=s.cpu().detach().numpy(),
sample_extended=s_ext.cpu().detach().numpy())
torch.save(idx_to_window.state_dict(), model_path)
plt.gcf().add_subplot(3, 3, ep // (num_epochs // 8) + 1)
import matplotlib.colors
plt.gca().pcolormesh(scaled_fft_plots, norm=matplotlib.colors.Normalize(), linewidth=0, cmap=COLOR_MAP)
plt.gca().set_title(f'ep: {ep}, score: {score.item():.5f}')
for i in range(xs.size(0)):
inter_window_line = (xs[i] + ys[i]).item() / 2 - extend_left
if inter_window_line <= 0 or inter_window_line >= last_sample:
continue
plt.gca().axvline(inter_window_line / last_sample * plot_width,
linewidth=0.5, antialiased=True)
if ep % 15 == 0:
scaled_fft_plots = get_scaled_fft_plots()
def draw(fig):
fig.gca().pcolormesh(scaled_fft_plots, norm=matplotlib.colors.Normalize(), linewidth=0, cmap=COLOR_MAP)
fig.gca().text(0.3, 1.01, f'ep: {ep}, score: {score.item():.5f}', transform=fig.gca().transAxes)
for i in range(xs.size(0)):
inter_window_line = (xs[i] + ys[i]).item() / 2 - extend_left
if inter_window_line <= 0 or inter_window_line >= last_sample:
continue
fig.gca().axvline(inter_window_line / last_sample * plot_width,
linewidth=0.5, antialiased=True)
if make_animation:
draw(anim_fig)
camera.snap()
preview_fig.gca().clear()
draw(preview_fig)
# Show image on notebook
preview_fig.canvas.draw()
preview_handle.update(preview_fig)
if ep % 30 == 0:
import gc
gc.collect()
if make_animation:
ani = camera.animate(interval=33.3, blit=True)
else:
ani = None
preview_handle.update(plt.figure())
matrix_fig.tight_layout()
return idx_to_window, kur_hist, ani
def make_window_extend_signal(configs, s: torch.Tensor, window_shape: str):
last_sample: int = s.size(0)
xs = [x for (x, _) in configs]
xs[0] = torch.clamp(xs[0], -last_sample + 1, 0)
xs[-1] = torch.clamp(xs[-1], 0, last_sample * 2 - 1)
if window_shape == 'trapezoid':
ys = [xs[i + 1] - (xs[i + 1] - xs[i]) * configs[i][1]
for i in range(len(xs) - 1)]
ys.insert(0, xs[0] - (xs[1] - xs[0]) * configs[0][1])
elif window_shape == 'triangle':
ys = [xs[i] for i in range(len(xs))]
# pick x values that are in sample range
xs.pop(0)
else:
raise RuntimeError(f'Unknown window shape {window_shape}')
ys[0] = torch.clamp(ys[0], -last_sample + 1, 0)
xs = torch.cat([x.view(1) for x in xs])
# extend the signal both ways via zero padding
offset = -int(torch.floor(ys[0]))
assert offset >= 0
extend_left = offset
assert extend_left <= last_sample
extend_right = int(torch.ceil(xs[-1])) - last_sample + 1
assert extend_right >= 0
assert extend_right <= last_sample
s_left_pad = torch.zeros_like(s[:extend_left])
s_right_pad = torch.zeros_like(s[last_sample - extend_right:])
s_ext = torch.cat((s_left_pad, s, s_right_pad))
xs = xs + extend_left
ys = torch.cat([y.view(1) for y in ys])
ys = ys + extend_left
return xs, ys, s_ext, extend_left
def apply_adaptive_window(
s_ext: torch.Tensor,
x_i: torch.Tensor,
y_i: torch.Tensor,
x_next: torch.Tensor,
y_next: torch.Tensor,
window_shape: str
) -> torch.Tensor:
if window_shape == 'trapezoid':
# three parts
left_trig_start = dithering_int(y_i)
left_trig_end = dithering_int(x_i)
right_trig_start = dithering_int(y_next)
right_trig_end = dithering_int(x_next)
rect_start = left_trig_end
rect_end = right_trig_start
m = torch.arange(0, left_trig_end - left_trig_start,
dtype=torch.float32, device=s_ext.device)
ramp = m / (x_i - y_i)
left_ramp_times_signal = ramp * \
s_ext[left_trig_start:left_trig_end]
m = torch.arange(0, right_trig_end - right_trig_start,
dtype=torch.float32, device=s_ext.device)
ramp = 1 - (m / (x_next - y_next))
right_ramp_times_signal = ramp * s_ext[right_trig_start:right_trig_end]
rect_signal = s_ext[rect_start:rect_end]
# rect_signal = rect_signal * Sign.apply(y_next) * InvSign.apply(x_i)
window_times_signal = torch.cat(
(left_ramp_times_signal, rect_signal, right_ramp_times_signal), dim=-1)
elif window_shape == 'triangle':
left_trig_start = dithering_int(y_i)
left_trig_end = dithering_int(x_i)
right_trig_start = dithering_int(x_i)
right_trig_end = dithering_int(x_next)
m = torch.arange(0, left_trig_end - left_trig_start,
dtype=torch.float32, device=s_ext.device)
ramp = m / (x_i - y_i)
left_ramp_times_signal = ramp * \
s_ext[left_trig_start:left_trig_end]
m = torch.arange(0, right_trig_end - right_trig_start,
dtype=torch.float32, device=s_ext.device)
ramp = 1 - (m / (x_next - y_next))
right_ramp_times_signal = ramp * s_ext[right_trig_start:right_trig_end]
window_times_signal = torch.cat(
(left_ramp_times_signal, right_ramp_times_signal), dim=-1)
else:
raise RuntimeError(f'unknown window shape {window_shape}')
assert window_times_signal.size(0) > 1, f"x: {x_i}, y: {y_i}"
rfft = torch.rfft(window_times_signal,
signal_ndim=1, normalized=True)
return rfft
| 11,853 | 39.875862 | 123 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/MNISTExperiment.py
|
from models import UMNNMAFFlow
import torch
from lib import dataloader as dl
import lib as transform
import lib.utils as utils
import numpy as np
import os
import pickle
from timeit import default_timer as timer
import torchvision
from tensorboardX import SummaryWriter
writer = SummaryWriter()
def train_mnist(dataset, load=None, gen_image=False, save=None, temperature=.5, real_images=False, nb_iter=5,
nb_steps=50, solver="CC", hidden_embeding=[1024, 1024, 1024], hidden_derivative=[100, 50, 50, 50, 50],
embeding_size=30, nb_images=5, conditionnal=False, nb_flow=5, lr=1e-3, weight_decay=1e-2,
nb_epoch=500, L=1., batch_size=100):
cuda = 0 if torch.cuda.is_available() else -1
device = "cuda:0" if torch.cuda.is_available() else "cpu"
save_name = dataset + "/" + str(nb_steps) if save is None else save
if save is not None or gen_image:
if not (os.path.isdir(save_name)):
os.makedirs(save_name)
logger = utils.get_logger(logpath=os.path.join(save_name, 'logs'), filepath=os.path.abspath(__file__),
saving=save is not None)
cond_in = 10 if conditionnal else 0
nb_in = 28**2
model = UMNNMAFFlow(nb_flow=nb_flow, nb_in=nb_in, hidden_derivative=hidden_derivative,
hidden_embedding=hidden_embeding, embedding_s=embeding_size, nb_steps=nb_steps, device=device,
solver=solver, cond_in=cond_in).to(device)
if save is not None:
with open(save + "/model.txt", "w") as f:
f.write(str(model))
opt = torch.optim.Adam(model.parameters(), lr, weight_decay=weight_decay)
if nb_steps > 0:
max_forward = min(int(3000/(nb_steps/nb_steps * nb_flow * hidden_derivative[0]/100)*784/nb_in), batch_size)
logger.info("Max forward: %d" % max_forward)
random_steps = nb_steps <= 0
if conditionnal:
train_loader, valid_loader, test_loader = dl.dataloader(dataset, batch_size, cuda=cuda, conditionnal=True)
else:
train_loader, valid_loader, test_loader = dl.dataloader(dataset, batch_size, cuda=cuda)
if load is not None:
logger.info("Loading model")
model.load_state_dict(torch.load(load + '/model.pt'))
model.eval()
with torch.no_grad():
# Compute Test loss
i = 0
ll_test = 0.
bpp_avg = 0.
start = end = timer()
for batch_idx, (cur_x, target) in enumerate(test_loader):
if conditionnal:
bpp, ll_tmp, z_est = 0, 0, 0
for j in range(10):
y = target.view(-1, 1)*0 + j
y_one_hot = torch.zeros(y.shape[0], 10).scatter(1, y, 1)
context = y_one_hot.to(device)
bpp_i, ll_tmp_i, z_est_i = model.compute_bpp(cur_x.view(-1, nb_in).to(device), context=context)
bpp += bpp_i/10
ll_tmp += ll_tmp_i/10
else:
context = None
bpp, ll_tmp, z_est = model.compute_bpp(cur_x.view(-1, nb_in).to(device))
i += 1
ll_test -= ll_tmp.mean()
bpp_avg += bpp.mean()
if i == 5 and nb_epoch > 0:
break
end = timer()
logger.info("{:d} :Test loss: {:4f} - BPP: {:4f} - Elapsed time per epoch {:4f}".format(
i, -ll_test.detach().cpu().item()/i, -bpp_avg.detach().cpu().item() / i, end - start))
logger.info("{:d} :Test loss: {:4f} - BPP: {:4f} - Elapsed time per epoch {:4f}".format(
i, -ll_test.detach().cpu().item() / i, -bpp_avg.detach().cpu().item() / i, end - start))
nb_sample = nb_images
# Generate and save images
if gen_image:
if real_images:
logger.info("Regenerate real images")
x, y = next(iter(test_loader))
y = y.view(-1, 1)
context = torch.zeros(y.shape[0], 10).scatter(1, y, 1).to(device) if conditionnal else None
nb_sample = 100
z = torch.distributions.Normal(0., 1.).sample(torch.Size([nb_sample, nb_in])).to(
device) * torch.arange(0.1, 1.1, .1).unsqueeze(0).expand(int(nb_sample / 10), -1).transpose(0,
1) \
.contiguous().view(-1).unsqueeze(1).expand(-1, 784).to(device)
else:
logger.info("Generate random images")
z = torch.distributions.Normal(0., 1.).sample(torch.Size([nb_sample, nb_in])).to(
device) * temperature
z_true = z[:nb_sample, :]
if conditionnal:
if real_images:
nb_sample = 100
z_true = torch.distributions.Normal(0., 1.).sample(torch.Size([nb_sample, nb_in])).to(
device) * torch.arange(0.1, 1.1, .1).unsqueeze(0).expand(int(nb_sample/10), -1).transpose(0, 1)\
.contiguous().view(-1).unsqueeze(1).expand(-1, 784).to(device)
digit = (torch.arange(nb_sample) % 10).float().view(-1, 1)
else:
digit = (torch.arange(nb_sample) % 10).float().view(-1, 1)
logger.info("Creation of: " + str(digit))
context = torch.zeros(digit.shape[0], 10).scatter(1, digit.long(), 1).to(device)
x_est = model.invert(z_true, nb_iter, context=context)
bpp, ll, _ = model.compute_bpp(x_est, context=context)
logger.info("Bpp of generated data is: {:4f}".format(bpp.mean().item()))
logger.info("ll of generated data is: {:4f}".format(ll.mean().item()))
x = transform.logit_back(x_est.detach().cpu(), 1e-6).view(x_est.shape[0], 1, 28, 28)
torchvision.utils.save_image(x, save_name + '/' + str(temperature) + 'images.png', nrow=10,
padding=1)
exit()
with open(load + '/losses.pkl', 'rb') as f:
losses_train, losses_test = pickle.load(f)
cur_epoch = len(losses_test)
else:
losses_train = []
losses_test = []
cur_epoch = 0
for epoch in range(cur_epoch, cur_epoch + nb_epoch):
ll_tot = 0
i = 0
start = timer()
for batch_idx, (cur_x, target) in enumerate(train_loader):
if conditionnal:
y = target.view(-1, 1)
y_one_hot = torch.zeros(y.shape[0], 10).scatter(1, y, 1)
context = y_one_hot.to(device)
else:
cur_x = cur_x.view(-1, nb_in).to(device)
context = None
if random_steps:
nb_steps = np.random.randint(5, 50)*2
max_forward = min(int(1500 / nb_steps), batch_size)
model.set_steps_nb(nb_steps)
cur_x = cur_x.to(device)
ll = 0.
opt.zero_grad()
for cur_su_batch in range(0, batch_size, max_forward):
ll, z = model.compute_ll(cur_x.view(-1, nb_in)[cur_su_batch:cur_su_batch+max_forward], context=context)
ll = -ll.mean()/(batch_size/z.shape[0])
ll.backward()
ll_tot += ll.detach()
opt.step()
if L > 0:
model.forceLipshitz(L)
i += 1
if i % 10 == 0:
time_tot = timer()
logger.info("{:d} cur_loss - {:4f} - Average time elapsed per batch {:4f}".format(
i, ll_tot.item() / i, (time_tot - start) / i))
if save:
torch.save(model.state_dict(), save_name + '/model.pt')
ll_tot /= i
losses_train.append(ll_tot.detach().cpu())
with torch.no_grad():
# Generate and save images
if gen_image and epoch % 10 == 0:
z = torch.distributions.Normal(0., 1.).sample(torch.Size([nb_images, nb_in])).to(device)*temperature
if conditionnal:
digit = (torch.arange(nb_sample) % 10).float().view(-1, 1)
logger.info("Creation of: " + str(digit))
context = torch.zeros(digit.shape[0], 10).scatter(1, digit.long(), 1).to(device)
x = model.invert(z, nb_iter, context=context)
logger.info("Inversion error: {:4f}".format(torch.abs(z - model.forward(x, context=context)).mean().item()))
x = x.detach().cpu()
x = transform.logit_back(x, 1e-6).view(x.shape[0], 1, 28, 28)
writer.add_image('data/images', torchvision.utils.make_grid(x, nrow=4), epoch)
torchvision.utils.save_image(x, save_name + '/epoch_{:04d}.png'.format(epoch), nrow=4, padding=1)
model.set_steps_nb(nb_steps)
# Compute Test loss
i = 0
ll_test = 0.
for batch_idx, (cur_x, target) in enumerate(valid_loader):
if conditionnal:
y = target.view(-1, 1)
y_one_hot = torch.zeros(y.shape[0], 10).scatter(1, y, 1)
context = y_one_hot.to(device)
else:
context = None
ll_tmp, _ = model.compute_ll(cur_x.view(-1, nb_in).to(device), context=context)
i += 1
ll_test -= ll_tmp.mean()
ll_test /= i
losses_test.append(ll_test.detach().cpu())
writer.add_scalars('data/' + save_name + "/losses", {"Valid": ll_test.detach().cpu().item(),
"Train": ll_tot.detach().cpu().item()}, epoch)
# Save losses
if save:
if epoch % 5 == 0:
if not (os.path.isdir(save_name + '/models')):
os.makedirs(save_name + '/models')
torch.save(model.state_dict(), save_name + '/models/model_{:04d}.pt'.format(epoch))
with open(save_name + '/losses.pkl', 'wb') as f: # Python 3: open(..., 'wb')
pickle.dump([losses_train, losses_test], f)
logger.info("epoch: {:d} - Train loss: {:4f} - Test loss: {:4f} - L: {:4f}".format(
epoch, ll_tot.detach().cpu().item(), ll_test.detach().cpu().item(), model.computeLipshitz(10).detach()))
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument("-load", default=None, help="where to load")
parser.add_argument("-gen", default=False, action="store_true", help="where to store results")
parser.add_argument("-save", default=None, help="where to store results")
parser.add_argument("-steps", default=50, type=int, help="number of integration steps")
parser.add_argument("-temperature", default=.5, type=float, help="Temperature for sample")
parser.add_argument("-solver", default="CC", help="Temperature for sample")
parser.add_argument("-hidden_embedding", nargs='+', type=int, default=[1024, 1024, 1024], help="Nb neurons for emebding")
parser.add_argument("-hidden_derivative", nargs='+', type=int, default=[100, 50, 50, 50, 50], help="Nb neurons for derivative")
parser.add_argument("-embedding_size", type=int, default=30, help="Size of embedding part")
parser.add_argument("-real_images", type=bool, default=False, help="Generate real images")
parser.add_argument("-dataset", type=str, default="MNIST", help="Dataset")
parser.add_argument("-nb_images", type=int, default=5, help="Number of images to be generated")
parser.add_argument("-conditionnal", type=bool, default=False, help="Conditionning on class or not")
parser.add_argument("-nb_flow", type=int, default=5, help="Number of nets in the flow")
parser.add_argument("-weight_decay", type=float, default=1e-2, help="Weight Decay")
parser.add_argument("-lr", type=float, default=1e-3, help="Learning rate")
parser.add_argument("-nb_epoch", type=int, default=500, help="Number of epoch")
parser.add_argument("-nb_iter", type=int, default=500, help="Number of iter for inversion")
parser.add_argument("-Lipshitz", type=float, default=0, help="Lipshitz constant max of linear layer in derivative net")
parser.add_argument("-b_size", type=int, default=100, help="Number of samples per batch")
args = parser.parse_args()
dataset = args.dataset
dir_save = None if args.save is None else dataset + "/" + args.save
dir_load = None if args.load is None else dataset + "/" + args.load
train_mnist(dataset=dataset, load=dir_load, gen_image=args.gen, save=dir_save, nb_steps=args.steps,
temperature=args.temperature, solver=args.solver, hidden_embeding=args.hidden_embedding,
hidden_derivative=args.hidden_derivative, real_images=args.real_images, nb_images=args.nb_images,
conditionnal=args.conditionnal, nb_flow=args.nb_flow, weight_decay=args.weight_decay, lr=args.lr,
nb_epoch=args.nb_epoch, L=args.Lipshitz, nb_iter=args.nb_iter,
batch_size=args.b_size)
| 13,329 | 49.492424 | 127 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/ToyExperiments.py
|
from models import UMNNMAFFlow
import torch
import lib.toy_data as toy_data
import numpy as np
import matplotlib.pyplot as plt
from timeit import default_timer as timer
import os
import lib.utils as utils
import lib.visualize_flow as vf
green = '#e15647'
black = '#2d5468'
white_bg = '#ececec'
def summary_plots(x, x_test, folder, epoch, model, ll_tot, ll_test):
fig = plt.figure(figsize=(7, 7))
ax = plt.subplot(1, 1, 1, aspect="equal")
vf.plt_flow(model.compute_ll, ax)
#ax = plt.subplot(1, 3, 2, aspect="equal")
#vf.plt_samples(toy_data.inf_train_gen(toy, batch_size=50000), ax, npts=500)
#ax = plt.subplot(1, 3, 3, aspect="equal")
#samples = model.invert(torch.distributions.Normal(0., 1.).sample([5000, 2]), 8, "Binary")
#vf.plt_samples(samples.detach().numpy(), ax, title="$x\sim q(x)$")
plt.savefig("%s/flow_%d.pdf" % (folder + toy, epoch))
plt.savefig("%s/flow_%d.png" % (folder + toy, epoch))
plt.close(fig)
fig = plt.figure()
z = torch.distributions.Normal(0., 1.).sample(x_test.shape)
plt.figure(figsize=(7, 7))
plt.xlim(-4.5, 4.5)
plt.ylim(-4.5, 4.5)
plt.xlabel("$z_1$", fontsize=20)
plt.ylabel("$z_2$", fontsize=20)
plt.scatter(z[:, 0], z[:, 1], alpha=.2, color=green)
x_min = z.min(0)[0] - .5
x_max = z.max(0)[0] + .5
ticks = [1, 1]
plt.xticks([-4, 0, 4])
plt.yticks([-4, 0, 4])
#plt.grid(True)
ax = plt.gca()
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_facecolor(white_bg)
ax.tick_params(axis='x', colors=black)
ax.tick_params(axis='y', colors=black)
ax.spines['bottom'].set_color(black)
ax.spines['left'].set_color(black)
#plt.xticks(np.arange(int(x_min[0]), int(x_max[0]), ticks[0]), np.arange(int(x_min[0]), int(x_max[0]), ticks[0]))
#plt.yticks(np.arange(int(x_min[1]), int(x_max[1]), ticks[1]), np.arange(int(x_min[1]), int(x_max[1]), ticks[1]))
plt.tight_layout()
plt.savefig("noise.png", transparent=True)
z_pred = model.forward(x_test)
z_pred = z_pred.detach().cpu().numpy()
#plt.subplot(221)
plt.figure()
plt.title("z pred")
plt.scatter(z_pred[:, 0], z_pred[:, 1], alpha=.2)
plt.xticks(np.arange(int(x_min[0]), int(x_max[0]), ticks[0]), np.arange(int(x_min[0]), int(x_max[0]), ticks[0]))
plt.yticks(np.arange(int(x_min[1]), int(x_max[1]), ticks[1]), np.arange(int(x_min[1]), int(x_max[1]), ticks[1]))
plt.savefig("test2.png")
start = timer()
z = torch.distributions.Normal(0., 1.).sample((10000, 2))
x_pred = model.invert(z, 5, "ParallelSimpler")
end = timer()
print("Inversion time: {:4f}s".format(end - start))
plt.subplot(223)
#plt.title("x pred")
x_pred = x_pred.detach().cpu().numpy()
plt.scatter(x_pred[:, 0], x_pred[:, 1], alpha=.2)
x_min = x.min(0)[0] - .5
x_max = x.max(0)[0] + .5
ticks = [1, 1]
plt.xticks(np.arange(int(x_min[0]), int(x_max[0]), ticks[0]), np.arange(int(x_min[0]), int(x_max[0]), ticks[0]))
plt.yticks(np.arange(int(x_min[1]), int(x_max[1]), ticks[1]), np.arange(int(x_min[1]), int(x_max[1]), ticks[1]))
#plt.subplot(224)
plt.figure(figsize=(7, 7))
plt.xlim(-4.5, 4.5)
plt.ylim(-4.5, 4.5)
#cmap = matplotlib.cm.get_cmap(None)
#ax.set_facecolor(cmap(0.))
# ax.invert_yaxis()
plt.xlabel('$x_1$')
plt.ylabel('$x_2$')
plt.xticks([-4, 0, 4])
plt.yticks([-4, 0, 4])
plt.xlabel("$x_1$", fontsize=20)
plt.ylabel("$x_2$", fontsize=20)
plt.scatter(x[:, 0], x[:, 1], alpha=.2, color='#e15647')
#plt.xticks(np.arange(-5, 5.1, 2))
#plt.yticks(np.arange(-5, 5.1, 2))
#plt.grid(True)
ax = plt.gca()
# Hide the right and top spines
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_facecolor(white_bg)
ax.tick_params(axis='x', colors=black)
ax.tick_params(axis='y', colors=black)
ax.spines['bottom'].set_color(black)
ax.spines['left'].set_color(black)
#plt.xticks(np.arange(int(x_min[0]), int(x_max[0]), ticks[0]), np.arange(int(x_min[0]), int(x_max[0]), ticks[0]))
#plt.yticks(np.arange(int(x_min[1]), int(x_max[1]), ticks[1]), np.arange(int(x_min[1]), int(x_max[1]), ticks[1]))
plt.tight_layout()
plt.savefig("8gaussians.png", transparent=True)
plt.suptitle(str(("epoch: ", epoch, "Train loss: ", ll_tot.item(), "Test loss: ", ll_test.item())))
plt.savefig("%s/%d.png" % (folder + toy, epoch))
plt.close(fig)
def train_toy(toy, load=True, nb_steps=20, nb_flow=1, folder=""):
device = "cpu"
logger = utils.get_logger(logpath=os.path.join(folder, toy, 'logs'), filepath=os.path.abspath(__file__))
logger.info("Creating model...")
model = UMNNMAFFlow(nb_flow=nb_flow, nb_in=2, hidden_derivative=[100, 100, 100, 100], hidden_embedding=[100, 100, 100, 100],
embedding_s=10, nb_steps=nb_steps, device=device).to(device)
logger.info("Model created.")
opt = torch.optim.Adam(model.parameters(), 1e-3, weight_decay=1e-5)
if load:
logger.info("Loading model...")
model.load_state_dict(torch.load(folder + toy+'/model.pt'))
model.train()
opt.load_state_dict(torch.load(folder + toy+'/ADAM.pt'))
logger.info("Model loaded.")
nb_samp = 100
batch_size = 100
x_test = torch.tensor(toy_data.inf_train_gen(toy, batch_size=1000)).to(device)
x = torch.tensor(toy_data.inf_train_gen(toy, batch_size=1000)).to(device)
for epoch in range(10000):
ll_tot = 0
start = timer()
for j in range(0, nb_samp, batch_size):
cur_x = torch.tensor(toy_data.inf_train_gen(toy, batch_size=batch_size)).to(device)
ll, z = model.compute_ll(cur_x)
ll = -ll.mean()
ll_tot += ll.detach()/(nb_samp/batch_size)
loss = ll
opt.zero_grad()
loss.backward()
opt.step()
end = timer()
ll_test, _ = model.compute_ll(x_test)
ll_test = -ll_test.mean()
logger.info("epoch: {:d} - Train loss: {:4f} - Test loss: {:4f} - Elapsed time per epoch {:4f} (seconds)".
format(epoch, ll_tot.item(), ll_test.item(), end-start))
if (epoch % 100) == 0:
summary_plots(x, x_test, folder, epoch, model, ll_tot, ll_test)
torch.save(model.state_dict(), folder + toy + '/model.pt')
torch.save(opt.state_dict(), folder + toy + '/ADAM.pt')
import argparse
datasets = ["8gaussians", "swissroll", "moons", "pinwheel", "cos", "2spirals", "checkerboard", "line", "line-noisy",
"circles", "joint_gaussian"]
parser = argparse.ArgumentParser(description='')
parser.add_argument("-dataset", default=None, choices=datasets, help="Which toy problem ?")
parser.add_argument("-load", default=False, action="store_true", help="Load a model ?")
parser.add_argument("-folder", default="", help="Folder")
args = parser.parse_args()
if args.dataset is None:
toys = datasets
else:
toys = [args.dataset]
for toy in toys:
if not(os.path.isdir(args.folder + toy)):
os.makedirs(args.folder + toy)
train_toy(toy, load=args.load, folder=args.folder)
| 7,250 | 37.775401 | 128 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/MonotonicMLP.py
|
import torch
import argparse
import torch.nn as nn
import matplotlib.pyplot as plt
from models.UMNN import MonotonicNN, IntegrandNN
def f(x_1, x_2, x_3):
return .001*(x_1**3 + x_1) + x_2 ** 2 + torch.sin(x_3)
def create_dataset(n_samples):
x = torch.randn(n_samples, 3)
y = f(x[:, 0], x[:, 1], x[:, 2])
return x, y
class MLP(nn.Module):
def __init__(self, in_d, hidden_layers):
super(MLP, self).__init__()
self.net = []
hs = [in_d] + hidden_layers + [1]
for h0, h1 in zip(hs, hs[1:]):
self.net.extend([
nn.Linear(h0, h1),
nn.ReLU(),
])
self.net.pop() # pop the last ReLU for the output layer
self.net = nn.Sequential(*self.net)
def forward(self, x, h):
return self.net(torch.cat((x, h), 1))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument("-nb_train", default=10000, type=int, help="Number of training samples")
parser.add_argument("-nb_test", default=1000, type=int, help="Number of testing samples")
parser.add_argument("-nb_epoch", default=200, type=int, help="Number of training epochs")
parser.add_argument("-load", default=False, action="store_true", help="Load a model ?")
parser.add_argument("-folder", default="", help="Folder")
args = parser.parse_args()
device = "cuda:0" if torch.cuda.is_available() else "cpu"
model_monotonic = MonotonicNN(3, [100, 100, 100], nb_steps=100, dev=device).to(device)
model_mlp = MLP(3, [200, 200, 200]).to(device)
optim_monotonic = torch.optim.Adam(model_monotonic.parameters(), 1e-3, weight_decay=1e-5)
optim_mlp = torch.optim.Adam(model_mlp.parameters(), 1e-3, weight_decay=1e-5)
train_x, train_y = create_dataset(args.nb_train)
test_x, test_y = create_dataset(args.nb_test)
b_size = 100
for epoch in range(0, args.nb_epoch):
# Shuffle
idx = torch.randperm(args.nb_train)
train_x = train_x[idx].to(device)
train_y = train_y[idx].to(device)
avg_loss_mon = 0.
avg_loss_mlp = 0.
for i in range(0, args.nb_train-b_size, b_size):
# Monotonic
x = train_x[i:i + b_size].requires_grad_()
y = train_y[i:i + b_size].requires_grad_()
y_pred = model_monotonic(x[:, [0]], x[:, 1:])[:, 0]
loss = ((y_pred - y)**2).sum()
optim_monotonic.zero_grad()
loss.backward()
optim_monotonic.step()
avg_loss_mon += loss.item()
# MLP
y_pred = model_mlp(x[:, [0]], x[:, 1:])[:, 0]
loss = ((y_pred - y) ** 2).sum()
optim_mlp.zero_grad()
loss.backward()
optim_mlp.step()
avg_loss_mlp += loss.item()
print(epoch)
print("\tMLP: ", avg_loss_mlp/args.nb_train)
print("\tMonotonic: ", avg_loss_mon / args.nb_train)
# <<TEST>>
x = torch.arange(-5, 5, .1).unsqueeze(1).to(device)
h = torch.zeros(x.shape[0], 2).to(device)
y = f(x[:, 0], h[:, 0], h[:, 1]).detach().cpu().numpy()
y_mon = model_monotonic(x, h)[:, 0].detach().cpu().numpy()
y_mlp = model_mlp(x, h)[:, 0].detach().cpu().numpy()
x = x.detach().cpu().numpy()
plt.plot(x, y_mon, label="Monotonic model")
plt.plot(x, y_mlp, label="MLP model")
plt.plot(x, y, label="groundtruth")
plt.legend()
plt.show()
plt.savefig("Monotonicity.png")
| 3,487 | 35.715789 | 96 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/UCIExperiments.py
|
from models import UMNNMAFFlow
import torch
import numpy as np
import os
import pickle
import lib.utils as utils
import datasets
from timeit import default_timer as timer
from tensorboardX import SummaryWriter
writer = SummaryWriter()
def batch_iter(X, batch_size, shuffle=False):
"""
X: feature tensor (shape: num_instances x num_features)
"""
if shuffle:
idxs = torch.randperm(X.shape[0])
else:
idxs = torch.arange(X.shape[0])
if X.is_cuda:
idxs = idxs.cuda()
for batch_idxs in idxs.split(batch_size):
yield X[batch_idxs]
def load_data(name):
if name == 'bsds300':
return datasets.BSDS300()
elif name == 'power':
return datasets.POWER()
elif name == 'gas':
return datasets.GAS()
elif name == 'hepmass':
return datasets.HEPMASS()
elif name == 'miniboone':
return datasets.MINIBOONE()
else:
raise ValueError('Unknown dataset')
def _flatten(sequence):
flat = [p.contiguous().view(-1) for p in sequence]
return torch.cat(flat) if len(flat) > 0 else torch.tensor([])
def train_uci(dataset, load=None, test=False, save=None, nb_steps=50, solver="CC", hidden_embeding=[300, 300, 300, 300],
hidden_derivative=[100, 50, 50, 50, 50], embeding_size=30, nb_flow=5, lr=1e-3, weight_decay=1e-2,
nb_epoch=500, L=1., batch_size = 100, scheduler_rate=.99, scheduler_patience=500, optim="adam"):
cuda = 0 if torch.cuda.is_available() else -1
device = "cuda:0" if torch.cuda.is_available() else "cpu"
save_name = "ExperimentsResults/UCIExperiments/" + dataset + "/" + str(nb_steps) if save is None else save
logger = utils.get_logger(logpath=os.path.join(save_name, 'logs'), filepath=os.path.abspath(__file__), saving=save is not None)
logger.info("Loading data...")
data = load_data(dataset)
data.trn.x = torch.from_numpy(data.trn.x).to(device)
nb_in = data.trn.x.shape[1]
data.val.x = torch.from_numpy(data.val.x).to(device)
data.tst.x = torch.from_numpy(data.tst.x).to(device)
logger.info("Data loaded.")
logger.info("Creating model...")
model = UMNNMAFFlow(nb_flow=nb_flow, nb_in=nb_in, hidden_derivative=hidden_derivative,
hidden_embedding=hidden_embeding, embedding_s=embeding_size, nb_steps=nb_steps, device=device,
solver=solver).to(device)
logger.info("Model created.")
if save is not None:
with open(save + "/model.txt", "w") as f:
f.write(str(model))
if optim == "adam":
opt = torch.optim.Adam(model.parameters(), lr, weight_decay=weight_decay)
elif optim == "sgd":
opt = torch.optim.SGD(model.parameters(), lr, weight_decay=weight_decay, momentum=.9)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(opt, factor=scheduler_rate, patience=scheduler_patience,
threshold=1e-2)
random_steps = nb_steps <= 0
if load is not None:
logger.info("Loading model...")
if cuda >= 0:
model.load_state_dict(torch.load(load + '/model_best_train.pt'))
else:
model.load_state_dict(torch.load(load + '/model_best_train.pt', map_location='cpu'))
logger.info("Model loaded.")
if test:
model.eval()
with torch.no_grad():
# Compute Test loss
i = 0
ll_test = 0.
if random_steps:
model.set_steps_nb(100)
for cur_x in batch_iter(data.tst.x, shuffle=True, batch_size=batch_size):
ll_tmp, z = model.compute_ll(cur_x)
i += 1
ll_test -= ll_tmp.mean()
logger.info("Test loss: {:4f}".format(ll_test.detach().cpu().data/i))
ll_test /= i
logger.info("Number of parameters: {:d} - Test loss: {:4f}".format(len(_flatten(model.parameters())),
ll_test.detach().cpu().data))
with open(load + '/losses.pkl', 'rb') as f:
losses_train, losses_test = pickle.load(f)
cur_epoch = len(losses_test)
else:
losses_train = []
losses_test = []
cur_epoch = 0
best_valid = np.inf
best_train = np.inf
for epoch in range(cur_epoch, cur_epoch + nb_epoch):
ll_tot = 0
i = 0
start = timer()
for cur_x in batch_iter(data.trn.x, shuffle=True, batch_size=batch_size):
if random_steps:
nb_steps = np.random.randint(5, 50)*2
model.set_steps_nb(nb_steps)
opt.zero_grad()
#Useful to split batch into smaller sub-batches
max_forward = batch_size
for cur_su_batch in range(0, batch_size, max_forward):
ll, z = model.compute_ll(cur_x.view(-1, nb_in)[cur_su_batch:cur_su_batch+max_forward])
ll = -ll.mean()/(batch_size/max_forward)
ll.backward()
ll_tot += ll.detach()
torch.nn.utils.clip_grad.clip_grad_value_(model.parameters(), 1.)
opt.step()
if L > 0:
model.forcei_lpschitz(L)
i += 1
if i % 100 == 0:
time_tot = timer()
logger.info("{:d} cur_loss {:4f} - Average time elapsed per batch {:4f}".format(i, ll_tot / i, (time_tot-start)/i))
if save:
torch.save(model.state_dict(), save_name + '/model.pt')
ll_tot /= i
time_tot = timer()
losses_train.append(ll_tot.detach().cpu())
with torch.no_grad():
# Compute Test loss
i = 0
ll_val = 0.
for cur_x in batch_iter(data.val.x, shuffle=True, batch_size=batch_size):
ll_tmp, _ = model.computell(cur_x.view(-1, nb_in).to(device))
i += 1
ll_val -= ll_tmp.mean()
ll_val /= i
losses_test.append(ll_val.detach().cpu())
writer.add_scalars('data/' + save_name + "/losses", {"Valid": ll_val.detach().cpu().item(),
"Train": ll_tot.detach().cpu().item()}, epoch)
scheduler.step(ll_val)
if ll_val.detach().cpu().item() < best_valid:
best_valid = ll_val.detach().cpu().item()
torch.save(model.state_dict(), save_name + '/model_best_valid.pt'.format(epoch))
if ll_tot.detach().cpu().item() < best_train:
torch.save(model.state_dict(), save_name + '/model_best_train_valid.pt'.format(epoch))
if ll_tot.detach().cpu().item() < best_train:
best_train = ll_tot.detach().cpu().item()
torch.save(model.state_dict(), save_name + '/model_best_train.pt'.format(epoch))
# Save losses
if save:
if epoch % 5 == 0:
if not (os.path.isdir(save_name + '/models')):
os.makedirs(save_name + '/models')
torch.save(model.state_dict(), save_name + '/models/model_{:04d}.pt'.format(epoch))
with open(save_name + '/losses.pkl', 'wb') as f: # Python 3: open(..., 'wb')
pickle.dump([losses_train, losses_test], f)
logger.info("epoch: {:d} - Train loss: {:4f} - Valid loss: {:4f} - Time elapsed per epoch {:4f}".format(
epoch, ll_tot.detach().cpu().item(), ll_val.detach().cpu().item(), time_tot-start))
import argparse
parser = argparse.ArgumentParser(description='')
parser.add_argument("-load", default=None, help="where to load")
parser.add_argument("-test", default=False, action="store_true", help="Only test")
parser.add_argument("-save", default=None, help="where to store results")
parser.add_argument("-steps", default=50, type=int, help="number of integration steps")
parser.add_argument("-solver", choices=["CC", "CCParallel"], default="CC", help="Solver to use")
parser.add_argument("-hidden_embedding", nargs='+', type=int, default=[512, 512], help="Nb neurons for emebding")
parser.add_argument("-hidden_derivative", nargs='+', type=int, default=[50, 50, 50, 50], help="Nb neurons for derivative")
parser.add_argument("-embedding_size", type=int, default=30, help="Size of embedding part")
parser.add_argument("-nb_flow", type=int, default=5, help="Number of nets in the flow")
parser.add_argument("-weight_decay", type=float, default=1e-2, help="Weight Decay")
parser.add_argument("-lr", type=float, default=1e-3, help="Learning rate")
parser.add_argument("-s_rate", type=float, default=.5, help="LR Scheduling rate")
parser.add_argument("-nb_epoch", type=int, default=500, help="Number of epoch")
parser.add_argument("-b_size", type=int, default=500, help="Number of samples per batch")
parser.add_argument("-s_patience", type=int, default=5, help="Number of epoch with no improvement for lr scheduling")
parser.add_argument(
'--data', choices=['power', 'gas', 'hepmass', 'miniboone', 'bsds300'], type=str, default='miniboone'
)
parser.add_argument("-Lipshitz", type=float, default=0, help="Lipshitz constant max of linear layer in derivative net")
parser.add_argument("-Optim", choices=["adamBNAF", "sgd", "adam"], type=str, default="adam", help="Optimizer")
args = parser.parse_args()
dataset = args.data
dir_save = None if args.save is None else dataset + "/" + args.save
dir_load = None if args.load is None else dataset + "/" + args.load
if dir_save is not None:
if not (os.path.isdir(dir_save)):
os.makedirs(dir_save)
with open(dir_save + "/args.txt", "w") as f:
f.write(str(args))
train_uci(dataset=dataset, load=dir_load, test=args.test, save=dir_save, nb_steps=args.steps, solver=args.solver,
hidden_embeding=args.hidden_embedding, hidden_derivative=args.hidden_derivative, nb_flow=args.nb_flow,
weight_decay=args.weight_decay, lr=args.lr, nb_epoch=args.nb_epoch, L=args.Lipshitz, batch_size=args.b_size,
scheduler_patience=args.s_patience, scheduler_rate=args.s_rate, optim=args.Optim,
embeding_size=args.embedding_size)
| 10,219 | 41.941176 | 131 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/TrainVaeFlow.py
|
# !/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import time
import torch
import torch.utils.data
import torch.optim as optim
import numpy as np
import math
import random
import os
import datetime
import lib.utils as utils
from models.vae_lib.models import VAE
from models.vae_lib.optimization.training import train, evaluate
from models.vae_lib.utils.load_data import load_dataset
from models.vae_lib.utils.plotting import plot_training_curve
from tensorboardX import SummaryWriter
writer = SummaryWriter()
SOLVERS = ["CC", "CCParallel", "Simpson"]
parser = argparse.ArgumentParser(description='PyTorch VAE Normalizing flows')
parser.add_argument(
'-d', '--dataset', type=str, default='mnist', choices=['mnist', 'freyfaces', 'omniglot', 'caltech'],
metavar='DATASET', help='Dataset choice.'
)
parser.add_argument(
'-freys', '--freyseed', type=int, default=123, metavar='FREYSEED',
help="""Seed for shuffling frey face dataset for test split. Ignored for other datasets.
Results in paper are produced with seeds 123, 321, 231"""
)
parser.add_argument('-nc', '--no_cuda', action='store_true', default=False, help='disables CUDA training')
parser.add_argument('--manual_seed', type=int, help='manual seed, if not given resorts to random seed.')
parser.add_argument(
'-li', '--log_interval', type=int, default=10, metavar='LOG_INTERVAL',
help='how many batches to wait before logging training status'
)
parser.add_argument(
'-od', '--out_dir', type=str, default='snapshots', metavar='OUT_DIR',
help='output directory for model snapshots etc.'
)
# optimization settings
parser.add_argument(
'-e', '--epochs', type=int, default=2000, metavar='EPOCHS', help='number of epochs to train (default: 2000)'
)
parser.add_argument(
'-es', '--early_stopping_epochs', type=int, default=35, metavar='EARLY_STOPPING',
help='number of early stopping epochs'
)
parser.add_argument(
'-bs', '--batch_size', type=int, default=100, metavar='BATCH_SIZE', help='input batch size for training'
)
parser.add_argument('-lr', '--learning_rate', type=float, default=0.0005, metavar='LEARNING_RATE', help='learning rate')
parser.add_argument(
'-w', '--warmup', type=int, default=100, metavar='N',
help='number of epochs for warm-up. Set to 0 to turn warmup off.'
)
parser.add_argument('--max_beta', type=float, default=1., metavar='MB', help='max beta for warm-up')
parser.add_argument('--min_beta', type=float, default=0.0, metavar='MB', help='min beta for warm-up')
parser.add_argument(
'-f', '--flow', type=str, default='no_flow', choices=[
'planar', 'iaf', 'householder', 'orthogonal', 'triangular', 'MMAF', 'no_flow'
], help="""Type of flows to use, no flows can also be selected"""
)
parser.add_argument('-r', '--rank', type=int, default=1)
parser.add_argument(
'-nf', '--num_flows', type=int, default=4, metavar='NUM_FLOWS',
help='Number of flow layers, ignored in absence of flows'
)
parser.add_argument(
'-nv', '--num_ortho_vecs', type=int, default=8, metavar='NUM_ORTHO_VECS',
help=""" For orthogonal flow: How orthogonal vectors per flow do you need.
Ignored for other flow types."""
)
parser.add_argument(
'-nh', '--num_householder', type=int, default=8, metavar='NUM_HOUSEHOLDERS',
help=""" For Householder Sylvester flow: Number of Householder matrices per flow.
Ignored for other flow types."""
)
parser.add_argument(
'-mhs', '--made_h_size', type=int, default=320, metavar='MADEHSIZE',
help='Width of mades for iaf and MMAF. Ignored for all other flows.'
)
parser.add_argument('--z_size', type=int, default=64, metavar='ZSIZE', help='how many stochastic hidden units')
# gpu/cpu
parser.add_argument('--gpu_num', type=int, default=0, metavar='GPU', help='choose GPU to run on.')
# MMAF settings
parser.add_argument("-steps", default=50, type=int, help="number of integration steps")
parser.add_argument("-solver", default="CC", help="Solver used")
parser.add_argument("-hidden_embedding", nargs='+', type=int, default=[512, 512], help="Nb neurons for emebding")
parser.add_argument("-hidden_derivative", nargs='+', type=int, default=[50, 50, 50, 50], help="Nb neurons for derivative")
parser.add_argument("-embedding_size", type=int, default=30, help="Size of embedding part")
parser.add_argument("-Lipshitz", type=float, default=0, help="Lipshitz constant max of linear layer in derivative net")
# evaluation
parser.add_argument('--evaluate', type=eval, default=False, choices=[True, False])
parser.add_argument('--model_path', type=str, default='')
parser.add_argument('--retrain_encoder', type=eval, default=False, choices=[True, False])
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.manual_seed is None:
args.manual_seed = random.randint(1, 100000)
random.seed(args.manual_seed)
torch.manual_seed(args.manual_seed)
np.random.seed(args.manual_seed)
if args.cuda:
# gpu device number
torch.cuda.set_device(args.gpu_num)
kwargs = {'num_workers': 0, 'pin_memory': True} if args.cuda else {}
def run(args, kwargs):
# ==================================================================================================================
# SNAPSHOTS
# ==================================================================================================================
args.model_signature = str(datetime.datetime.now())[0:19].replace(' ', '_')
args.model_signature = args.model_signature.replace(':', '_')
snapshots_path = os.path.join(args.out_dir, 'vae_' + args.dataset + '_')
snap_dir = snapshots_path + args.flow
if args.flow != 'no_flow':
snap_dir += '_' + 'num_flows_' + str(args.num_flows)
if args.flow == 'orthogonal':
snap_dir = snap_dir + '_num_vectors_' + str(args.num_ortho_vecs)
elif args.flow == 'orthogonalH':
snap_dir = snap_dir + '_num_householder_' + str(args.num_householder)
elif args.flow == 'iaf':
snap_dir = snap_dir + '_madehsize_' + str(args.made_h_size)
elif args.flow == "MMAF":
snap_dir = snap_dir + 'MMAF'
elif args.flow == 'permutation':
snap_dir = snap_dir + '_' + 'kernelsize_' + str(args.kernel_size)
elif args.flow == 'mixed':
snap_dir = snap_dir + '_' + 'num_householder_' + str(args.num_householder)
if args.retrain_encoder:
snap_dir = snap_dir + '_retrain-encoder_'
elif args.evaluate:
snap_dir = snap_dir + '_evaluate_'
snap_dir = snap_dir + '__' + args.model_signature + '/'
args.snap_dir = snap_dir
if not os.path.exists(snap_dir):
os.makedirs(snap_dir)
# logger
utils.makedirs(args.snap_dir)
logger = utils.get_logger(logpath=os.path.join(args.snap_dir, 'logs'), filepath=os.path.abspath(__file__))
logger.info(args)
# SAVING
torch.save(args, snap_dir + args.flow + '.config')
# ==================================================================================================================
# LOAD DATA
# ==================================================================================================================
train_loader, val_loader, test_loader, args = load_dataset(args, **kwargs)
if not args.evaluate:
# ==============================================================================================================
# SELECT MODEL
# ==============================================================================================================
# flow parameters and architecture choice are passed on to model through args
if args.flow == 'no_flow':
model = VAE.VAE(args)
elif args.flow == 'planar':
model = VAE.PlanarVAE(args)
elif args.flow == 'iaf':
model = VAE.IAFVAE(args)
elif args.flow == 'orthogonal':
model = VAE.OrthogonalSylvesterVAE(args)
elif args.flow == 'householder':
model = VAE.HouseholderSylvesterVAE(args)
elif args.flow == 'triangular':
model = VAE.TriangularSylvesterVAE(args)
elif args.flow == 'MMAF':
model = VAE.MMAVAE(args)
else:
raise ValueError('Invalid flow choice')
if args.retrain_encoder:
logger.info(f"Initializing decoder from {args.model_path}")
dec_model = torch.load(args.model_path)
dec_sd = {}
for k, v in dec_model.state_dict().items():
if 'p_x' in k:
dec_sd[k] = v
model.load_state_dict(dec_sd, strict=False)
if args.cuda:
logger.info("Model on GPU")
model.cuda()
logger.info(model)
if args.retrain_encoder:
parameters = []
logger.info('Optimizing over:')
for name, param in model.named_parameters():
if 'p_x' not in name:
logger.info(name)
parameters.append(param)
else:
parameters = model.parameters()
optimizer = optim.Adamax(parameters, lr=args.learning_rate, eps=1.e-7)
# ==================================================================================================================
# TRAINING
# ==================================================================================================================
train_loss = []
val_loss = []
# for early stopping
best_loss = np.inf
best_bpd = np.inf
e = 0
epoch = 0
train_times = []
for epoch in range(1, args.epochs + 1):
t_start = time.time()
tr_loss = train(epoch, train_loader, model, optimizer, args, logger)
train_loss.append(tr_loss)
train_times.append(time.time() - t_start)
logger.info('One training epoch took %.2f seconds' % (time.time() - t_start))
v_loss, v_bpd = evaluate(val_loader, model, args, logger, epoch=epoch)
val_loss.append(v_loss)
writer.add_scalars('data/' + args.snap_dir + "/losses", {"Valid": v_loss,
"Train": tr_loss.sum() / len(train_loader)}, epoch)
# early-stopping
if v_loss < best_loss:
e = 0
best_loss = v_loss
if args.input_type != 'binary':
best_bpd = v_bpd
logger.info('->model saved<-')
torch.save(model, snap_dir + args.flow + '.model')
# torch.save(model, snap_dir + args.flow + '_' + args.architecture + '.model')
elif (args.early_stopping_epochs > 0) and (epoch >= args.warmup):
e += 1
if e > args.early_stopping_epochs:
break
if args.input_type == 'binary':
logger.info(
'--> Early stopping: {}/{} (BEST: loss {:.4f})\n'.format(e, args.early_stopping_epochs, best_loss)
)
else:
logger.info(
'--> Early stopping: {}/{} (BEST: loss {:.4f}, bpd {:.4f})\n'.
format(e, args.early_stopping_epochs, best_loss, best_bpd)
)
if math.isnan(v_loss):
raise ValueError('NaN encountered!')
train_loss = np.hstack(train_loss)
val_loss = np.array(val_loss)
plot_training_curve(train_loss, val_loss, fname=snap_dir + '/training_curve_%s.pdf' % args.flow)
# training time per epoch
train_times = np.array(train_times)
mean_train_time = np.mean(train_times)
std_train_time = np.std(train_times, ddof=1)
logger.info('Average train time per epoch: %.2f +/- %.2f' % (mean_train_time, std_train_time))
# ==================================================================================================================
# EVALUATION
# ==================================================================================================================
logger.info(args)
logger.info('Stopped after %d epochs' % epoch)
logger.info('Average train time per epoch: %.2f +/- %.2f' % (mean_train_time, std_train_time))
final_model = torch.load(snap_dir + args.flow + '.model')
validation_loss, validation_bpd = evaluate(val_loader, final_model, args, logger)
else:
validation_loss = "N/A"
validation_bpd = "N/A"
logger.info(f"Loading model from {args.model_path}")
final_model = torch.load(args.model_path)
test_loss, test_bpd = evaluate(test_loader, final_model, args, logger, testing=False)
logger.info('FINAL EVALUATION ON VALIDATION SET. ELBO (VAL): {:.4f}'.format(validation_loss))
logger.info('FINAL EVALUATION ON TESTING SET. ELBO (VAL): {:.4f}'.format(test_loss))
if args.input_type != 'binary':
logger.info('FINAL EVALUATION ON VALIDATION SET. ELBO (VAL) BPD : {:.4f}'.format(validation_bpd))
logger.info('FINAL EVALUATION ON TEST SET. NLL (TEST) BPD: {:.4f}'.format(test_bpd))
return
logger.info('FINAL EVALUATION ON VALIDATION SET. ELBO (VAL): {:.4f}'.format(validation_loss))
logger.info('FINAL EVALUATION ON TEST SET. NLL (TEST): {:.4f}'.format(test_loss))
if args.input_type != 'binary':
logger.info('FINAL EVALUATION ON VALIDATION SET. ELBO (VAL) BPD : {:.4f}'.format(validation_bpd))
logger.info('FINAL EVALUATION ON TEST SET. NLL (TEST) BPD: {:.4f}'.format(test_bpd))
if __name__ == "__main__":
run(args, kwargs)
| 13,750 | 39.444118 | 124 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/models/setup.py
|
from setuptools import setup
setup(
name='UMNN',
version='0.1',
packages=['UMNN'],
url='',
license='MIT License',
author='awehenkel',
author_email='[email protected]',
description=''
)
| 227 | 16.538462 | 46 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/models/__init__.py
|
from .UMNN import UMNNMAFFlow, MADE, ParallelNeuralIntegral, NeuralIntegral
| 76 | 37.5 | 75 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/models/vae_lib/__init__.py
| 0 | 0 | 0 |
py
|
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/models/vae_lib/models/VAE.py
|
from __future__ import print_function
import torch
import torch.nn as nn
from ...vae_lib.models import flows
from ...vae_lib.models.layers import GatedConv2d, GatedConvTranspose2d
class VAE(nn.Module):
"""
The base VAE class containing gated convolutional encoder and decoder architecture.
Can be used as a base class for VAE's with normalizing flows.
"""
def __init__(self, args):
super(VAE, self).__init__()
# extract model settings from args
self.z_size = args.z_size
self.input_size = args.input_size
self.input_type = args.input_type
if self.input_size == [1, 28, 28] or self.input_size == [3, 28, 28]:
self.last_kernel_size = 7
elif self.input_size == [1, 28, 20]:
self.last_kernel_size = (7, 5)
else:
raise ValueError('invalid input size!!')
self.q_z_nn, self.q_z_mean, self.q_z_var = self.create_encoder()
self.p_x_nn, self.p_x_mean = self.create_decoder()
self.q_z_nn_output_dim = 256
# auxiliary
if args.cuda:
self.FloatTensor = torch.cuda.FloatTensor
else:
self.FloatTensor = torch.FloatTensor
# log-det-jacobian = 0 without flows
self.log_det_j = self.FloatTensor(1).zero_()
def create_encoder(self):
"""
Helper function to create the elemental blocks for the encoder. Creates a gated convnet encoder.
the encoder expects data as input of shape (batch_size, num_channels, width, height).
"""
if self.input_type == 'binary':
q_z_nn = nn.Sequential(
GatedConv2d(self.input_size[0], 32, 5, 1, 2),
GatedConv2d(32, 32, 5, 2, 2),
GatedConv2d(32, 64, 5, 1, 2),
GatedConv2d(64, 64, 5, 2, 2),
GatedConv2d(64, 64, 5, 1, 2),
GatedConv2d(64, 256, self.last_kernel_size, 1, 0),
)
q_z_mean = nn.Linear(256, self.z_size)
q_z_var = nn.Sequential(
nn.Linear(256, self.z_size),
nn.Softplus(),
)
return q_z_nn, q_z_mean, q_z_var
elif self.input_type == 'multinomial':
act = None
q_z_nn = nn.Sequential(
GatedConv2d(self.input_size[0], 32, 5, 1, 2, activation=act),
GatedConv2d(32, 32, 5, 2, 2, activation=act),
GatedConv2d(32, 64, 5, 1, 2, activation=act),
GatedConv2d(64, 64, 5, 2, 2, activation=act),
GatedConv2d(64, 64, 5, 1, 2, activation=act),
GatedConv2d(64, 256, self.last_kernel_size, 1, 0, activation=act)
)
q_z_mean = nn.Linear(256, self.z_size)
q_z_var = nn.Sequential(nn.Linear(256, self.z_size), nn.Softplus(), nn.Hardtanh(min_val=0.01, max_val=7.))
return q_z_nn, q_z_mean, q_z_var
def create_decoder(self):
"""
Helper function to create the elemental blocks for the decoder. Creates a gated convnet decoder.
"""
num_classes = 256
if self.input_type == 'binary':
p_x_nn = nn.Sequential(
GatedConvTranspose2d(self.z_size, 64, self.last_kernel_size, 1, 0),
GatedConvTranspose2d(64, 64, 5, 1, 2),
GatedConvTranspose2d(64, 32, 5, 2, 2, 1),
GatedConvTranspose2d(32, 32, 5, 1, 2),
GatedConvTranspose2d(32, 32, 5, 2, 2, 1), GatedConvTranspose2d(32, 32, 5, 1, 2)
)
p_x_mean = nn.Sequential(nn.Conv2d(32, self.input_size[0], 1, 1, 0), nn.Sigmoid())
return p_x_nn, p_x_mean
elif self.input_type == 'multinomial':
act = None
p_x_nn = nn.Sequential(
GatedConvTranspose2d(self.z_size, 64, self.last_kernel_size, 1, 0, activation=act),
GatedConvTranspose2d(64, 64, 5, 1, 2, activation=act),
GatedConvTranspose2d(64, 32, 5, 2, 2, 1, activation=act),
GatedConvTranspose2d(32, 32, 5, 1, 2, activation=act),
GatedConvTranspose2d(32, 32, 5, 2, 2, 1, activation=act),
GatedConvTranspose2d(32, 32, 5, 1, 2, activation=act)
)
p_x_mean = nn.Sequential(
nn.Conv2d(32, 256, 5, 1, 2),
nn.Conv2d(256, self.input_size[0] * num_classes, 1, 1, 0),
# output shape: batch_size, num_channels * num_classes, pixel_width, pixel_height
)
return p_x_nn, p_x_mean
else:
raise ValueError('invalid input type!!')
def reparameterize(self, mu, var):
"""
Samples z from a multivariate Gaussian with diagonal covariance matrix using the
reparameterization trick.
"""
std = var.sqrt()
eps = self.FloatTensor(std.size()).normal_()
z = eps.mul(std).add_(mu)
return z
def encode(self, x):
"""
Encoder expects following data shapes as input: shape = (batch_size, num_channels, width, height)
"""
h = self.q_z_nn(x)
h = h.view(h.size(0), -1)
mean = self.q_z_mean(h)
var = self.q_z_var(h)
return mean, var
def decode(self, z):
"""
Decoder outputs reconstructed image in the following shapes:
x_mean.shape = (batch_size, num_channels, width, height)
"""
z = z.view(z.size(0), self.z_size, 1, 1)
h = self.p_x_nn(z)
x_mean = self.p_x_mean(h)
return x_mean
def forward(self, x):
"""
Evaluates the model as a whole, encodes and decodes. Note that the log det jacobian is zero
for a plain VAE (without flows), and z_0 = z_k.
"""
# mean and variance of z
z_mu, z_var = self.encode(x)
# sample z
z = self.reparameterize(z_mu, z_var)
x_mean = self.decode(z)
return x_mean, z_mu, z_var, self.log_det_j, z, z
class PlanarVAE(VAE):
"""
Variational auto-encoder with planar flows in the encoder.
"""
def __init__(self, args):
super(PlanarVAE, self).__init__(args)
# Initialize log-det-jacobian to zero
self.log_det_j = 0.
# Flow parameters
flow = flows.Planar
self.num_flows = args.num_flows
# Amortized flow parameters
self.amor_u = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size)
self.amor_w = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size)
self.amor_b = nn.Linear(self.q_z_nn_output_dim, self.num_flows)
# Normalizing flow layers
for k in range(self.num_flows):
flow_k = flow()
self.add_module('flow_' + str(k), flow_k)
def encode(self, x):
"""
Encoder that ouputs parameters for base distribution of z and flow parameters.
"""
batch_size = x.size(0)
h = self.q_z_nn(x)
h = h.view(-1, self.q_z_nn_output_dim)
mean_z = self.q_z_mean(h)
var_z = self.q_z_var(h)
# return amortized u an w for all flows
u = self.amor_u(h).view(batch_size, self.num_flows, self.z_size, 1)
w = self.amor_w(h).view(batch_size, self.num_flows, 1, self.z_size)
b = self.amor_b(h).view(batch_size, self.num_flows, 1, 1)
return mean_z, var_z, u, w, b
def forward(self, x):
"""
Forward pass with planar flows for the transformation z_0 -> z_1 -> ... -> z_k.
Log determinant is computed as log_det_j = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ].
"""
self.log_det_j = 0.
z_mu, z_var, u, w, b = self.encode(x)
# Sample z_0
z = [self.reparameterize(z_mu, z_var)]
# Normalizing flows
for k in range(self.num_flows):
flow_k = getattr(self, 'flow_' + str(k))
z_k, log_det_jacobian = flow_k(z[k], u[:, k, :, :], w[:, k, :, :], b[:, k, :, :])
z.append(z_k)
self.log_det_j += log_det_jacobian
x_mean = self.decode(z[-1])
return x_mean, z_mu, z_var, self.log_det_j, z[0], z[-1]
class OrthogonalSylvesterVAE(VAE):
"""
Variational auto-encoder with orthogonal flows in the encoder.
"""
def __init__(self, args):
super(OrthogonalSylvesterVAE, self).__init__(args)
# Initialize log-det-jacobian to zero
self.log_det_j = 0.
# Flow parameters
flow = flows.Sylvester
self.num_flows = args.num_flows
self.num_ortho_vecs = args.num_ortho_vecs
assert (self.num_ortho_vecs <= self.z_size) and (self.num_ortho_vecs > 0)
# Orthogonalization parameters
if self.num_ortho_vecs == self.z_size:
self.cond = 1.e-5
else:
self.cond = 1.e-6
self.steps = 100
identity = torch.eye(self.num_ortho_vecs, self.num_ortho_vecs)
# Add batch dimension
identity = identity.unsqueeze(0)
# Put identity in buffer so that it will be moved to GPU if needed by any call of .cuda
self.register_buffer('_eye', identity)
self._eye.requires_grad = False
# Masks needed for triangular R1 and R2.
triu_mask = torch.triu(torch.ones(self.num_ortho_vecs, self.num_ortho_vecs), diagonal=1)
triu_mask = triu_mask.unsqueeze(0).unsqueeze(3)
diag_idx = torch.arange(0, self.num_ortho_vecs).long()
self.register_buffer('triu_mask', triu_mask)
self.triu_mask.requires_grad = False
self.register_buffer('diag_idx', diag_idx)
# Amortized flow parameters
# Diagonal elements of R1 * R2 have to satisfy -1 < R1 * R2 for flow to be invertible
self.diag_activation = nn.Tanh()
self.amor_d = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.num_ortho_vecs * self.num_ortho_vecs)
self.amor_diag1 = nn.Sequential(
nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.num_ortho_vecs), self.diag_activation
)
self.amor_diag2 = nn.Sequential(
nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.num_ortho_vecs), self.diag_activation
)
self.amor_q = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size * self.num_ortho_vecs)
self.amor_b = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.num_ortho_vecs)
# Normalizing flow layers
for k in range(self.num_flows):
flow_k = flow(self.num_ortho_vecs)
self.add_module('flow_' + str(k), flow_k)
def batch_construct_orthogonal(self, q):
"""
Batch orthogonal matrix construction.
:param q: q contains batches of matrices, shape : (batch_size * num_flows, z_size * num_ortho_vecs)
:return: batches of orthogonalized matrices, shape: (batch_size * num_flows, z_size, num_ortho_vecs)
"""
# Reshape to shape (num_flows * batch_size, z_size * num_ortho_vecs)
q = q.view(-1, self.z_size * self.num_ortho_vecs)
norm = torch.norm(q, p=2, dim=1, keepdim=True)
amat = torch.div(q, norm)
dim0 = amat.size(0)
amat = amat.resize(dim0, self.z_size, self.num_ortho_vecs)
max_norm = 0.
# Iterative orthogonalization
for s in range(self.steps):
tmp = torch.bmm(amat.transpose(2, 1), amat)
tmp = self._eye - tmp
tmp = self._eye + 0.5 * tmp
amat = torch.bmm(amat, tmp)
# Testing for convergence
test = torch.bmm(amat.transpose(2, 1), amat) - self._eye
norms2 = torch.sum(torch.norm(test, p=2, dim=2)**2, dim=1)
norms = torch.sqrt(norms2)
max_norm = torch.max(norms).item()
if max_norm <= self.cond:
break
if max_norm > self.cond:
print('\nWARNING WARNING WARNING: orthogonalization not complete')
print('\t Final max norm =', max_norm)
print()
# Reshaping: first dimension is batch_size
amat = amat.view(-1, self.num_flows, self.z_size, self.num_ortho_vecs)
amat = amat.transpose(0, 1)
return amat
def encode(self, x):
"""
Encoder that ouputs parameters for base distribution of z and flow parameters.
"""
batch_size = x.size(0)
h = self.q_z_nn(x)
h = h.view(-1, self.q_z_nn_output_dim)
mean_z = self.q_z_mean(h)
var_z = self.q_z_var(h)
# Amortized r1, r2, q, b for all flows
full_d = self.amor_d(h)
diag1 = self.amor_diag1(h)
diag2 = self.amor_diag2(h)
full_d = full_d.resize(batch_size, self.num_ortho_vecs, self.num_ortho_vecs, self.num_flows)
diag1 = diag1.resize(batch_size, self.num_ortho_vecs, self.num_flows)
diag2 = diag2.resize(batch_size, self.num_ortho_vecs, self.num_flows)
r1 = full_d * self.triu_mask
r2 = full_d.transpose(2, 1) * self.triu_mask
r1[:, self.diag_idx, self.diag_idx, :] = diag1
r2[:, self.diag_idx, self.diag_idx, :] = diag2
q = self.amor_q(h)
b = self.amor_b(h)
# Resize flow parameters to divide over K flows
b = b.resize(batch_size, 1, self.num_ortho_vecs, self.num_flows)
return mean_z, var_z, r1, r2, q, b
def forward(self, x):
"""
Forward pass with orthogonal sylvester flows for the transformation z_0 -> z_1 -> ... -> z_k.
Log determinant is computed as log_det_j = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ].
"""
self.log_det_j = 0.
z_mu, z_var, r1, r2, q, b = self.encode(x)
# Orthogonalize all q matrices
q_ortho = self.batch_construct_orthogonal(q)
# Sample z_0
z = [self.reparameterize(z_mu, z_var)]
# Normalizing flows
for k in range(self.num_flows):
flow_k = getattr(self, 'flow_' + str(k))
z_k, log_det_jacobian = flow_k(z[k], r1[:, :, :, k], r2[:, :, :, k], q_ortho[k, :, :, :], b[:, :, :, k])
z.append(z_k)
self.log_det_j += log_det_jacobian
x_mean = self.decode(z[-1])
return x_mean, z_mu, z_var, self.log_det_j, z[0], z[-1]
class HouseholderSylvesterVAE(VAE):
"""
Variational auto-encoder with householder sylvester flows in the encoder.
"""
def __init__(self, args):
super(HouseholderSylvesterVAE, self).__init__(args)
# Initialize log-det-jacobian to zero
self.log_det_j = 0.
# Flow parameters
flow = flows.Sylvester
self.num_flows = args.num_flows
self.num_householder = args.num_householder
assert self.num_householder > 0
identity = torch.eye(self.z_size, self.z_size)
# Add batch dimension
identity = identity.unsqueeze(0)
# Put identity in buffer so that it will be moved to GPU if needed by any call of .cuda
self.register_buffer('_eye', identity)
self._eye.requires_grad = False
# Masks needed for triangular r1 and r2.
triu_mask = torch.triu(torch.ones(self.z_size, self.z_size), diagonal=1)
triu_mask = triu_mask.unsqueeze(0).unsqueeze(3)
diag_idx = torch.arange(0, self.z_size).long()
self.register_buffer('triu_mask', triu_mask)
self.triu_mask.requires_grad = False
self.register_buffer('diag_idx', diag_idx)
# Amortized flow parameters
# Diagonal elements of r1 * r2 have to satisfy -1 < r1 * r2 for flow to be invertible
self.diag_activation = nn.Tanh()
self.amor_d = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size * self.z_size)
self.amor_diag1 = nn.Sequential(
nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size), self.diag_activation
)
self.amor_diag2 = nn.Sequential(
nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size), self.diag_activation
)
self.amor_q = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size * self.num_householder)
self.amor_b = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size)
# Normalizing flow layers
for k in range(self.num_flows):
flow_k = flow(self.z_size)
self.add_module('flow_' + str(k), flow_k)
def batch_construct_orthogonal(self, q):
"""
Batch orthogonal matrix construction.
:param q: q contains batches of matrices, shape : (batch_size, num_flows * z_size * num_householder)
:return: batches of orthogonalized matrices, shape: (batch_size * num_flows, z_size, z_size)
"""
# Reshape to shape (num_flows * batch_size * num_householder, z_size)
q = q.view(-1, self.z_size)
norm = torch.norm(q, p=2, dim=1, keepdim=True) # ||v||_2
v = torch.div(q, norm) # v / ||v||_2
# Calculate Householder Matrices
vvT = torch.bmm(v.unsqueeze(2), v.unsqueeze(1)) # v * v_T : batch_dot( B x L x 1 * B x 1 x L ) = B x L x L
amat = self._eye - 2 * vvT # NOTICE: v is already normalized! so there is no need to calculate vvT/vTv
# Reshaping: first dimension is batch_size * num_flows
amat = amat.view(-1, self.num_householder, self.z_size, self.z_size)
tmp = amat[:, 0]
for k in range(1, self.num_householder):
tmp = torch.bmm(amat[:, k], tmp)
amat = tmp.view(-1, self.num_flows, self.z_size, self.z_size)
amat = amat.transpose(0, 1)
return amat
def encode(self, x):
"""
Encoder that ouputs parameters for base distribution of z and flow parameters.
"""
batch_size = x.size(0)
h = self.q_z_nn(x)
h = h.view(-1, self.q_z_nn_output_dim)
mean_z = self.q_z_mean(h)
var_z = self.q_z_var(h)
# Amortized r1, r2, q, b for all flows
full_d = self.amor_d(h)
diag1 = self.amor_diag1(h)
diag2 = self.amor_diag2(h)
full_d = full_d.resize(batch_size, self.z_size, self.z_size, self.num_flows)
diag1 = diag1.resize(batch_size, self.z_size, self.num_flows)
diag2 = diag2.resize(batch_size, self.z_size, self.num_flows)
r1 = full_d * self.triu_mask
r2 = full_d.transpose(2, 1) * self.triu_mask
r1[:, self.diag_idx, self.diag_idx, :] = diag1
r2[:, self.diag_idx, self.diag_idx, :] = diag2
q = self.amor_q(h)
b = self.amor_b(h)
# Resize flow parameters to divide over K flows
b = b.resize(batch_size, 1, self.z_size, self.num_flows)
return mean_z, var_z, r1, r2, q, b
def forward(self, x):
"""
Forward pass with orthogonal flows for the transformation z_0 -> z_1 -> ... -> z_k.
Log determinant is computed as log_det_j = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ].
"""
self.log_det_j = 0.
z_mu, z_var, r1, r2, q, b = self.encode(x)
# Orthogonalize all q matrices
q_ortho = self.batch_construct_orthogonal(q)
# Sample z_0
z = [self.reparameterize(z_mu, z_var)]
# Normalizing flows
for k in range(self.num_flows):
flow_k = getattr(self, 'flow_' + str(k))
q_k = q_ortho[k]
z_k, log_det_jacobian = flow_k(z[k], r1[:, :, :, k], r2[:, :, :, k], q_k, b[:, :, :, k], sum_ldj=True)
z.append(z_k)
self.log_det_j += log_det_jacobian
x_mean = self.decode(z[-1])
return x_mean, z_mu, z_var, self.log_det_j, z[0], z[-1]
class TriangularSylvesterVAE(VAE):
"""
Variational auto-encoder with triangular Sylvester flows in the encoder. Alternates between setting
the orthogonal matrix equal to permutation and identity matrix for each flow.
"""
def __init__(self, args):
super(TriangularSylvesterVAE, self).__init__(args)
# Initialize log-det-jacobian to zero
self.log_det_j = 0.
# Flow parameters
flow = flows.TriangularSylvester
self.num_flows = args.num_flows
# permuting indices corresponding to Q=P (permutation matrix) for every other flow
flip_idx = torch.arange(self.z_size - 1, -1, -1).long()
self.register_buffer('flip_idx', flip_idx)
# Masks needed for triangular r1 and r2.
triu_mask = torch.triu(torch.ones(self.z_size, self.z_size), diagonal=1)
triu_mask = triu_mask.unsqueeze(0).unsqueeze(3)
diag_idx = torch.arange(0, self.z_size).long()
self.register_buffer('triu_mask', triu_mask)
self.triu_mask.requires_grad = False
self.register_buffer('diag_idx', diag_idx)
# Amortized flow parameters
# Diagonal elements of r1 * r2 have to satisfy -1 < r1 * r2 for flow to be invertible
self.diag_activation = nn.Tanh()
self.amor_d = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size * self.z_size)
self.amor_diag1 = nn.Sequential(
nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size), self.diag_activation
)
self.amor_diag2 = nn.Sequential(
nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size), self.diag_activation
)
self.amor_b = nn.Linear(self.q_z_nn_output_dim, self.num_flows * self.z_size)
# Normalizing flow layers
for k in range(self.num_flows):
flow_k = flow(self.z_size)
self.add_module('flow_' + str(k), flow_k)
def encode(self, x):
"""
Encoder that ouputs parameters for base distribution of z and flow parameters.
"""
batch_size = x.size(0)
h = self.q_z_nn(x)
h = h.view(-1, self.q_z_nn_output_dim)
mean_z = self.q_z_mean(h)
var_z = self.q_z_var(h)
# Amortized r1, r2, b for all flows
full_d = self.amor_d(h)
diag1 = self.amor_diag1(h)
diag2 = self.amor_diag2(h)
full_d = full_d.resize(batch_size, self.z_size, self.z_size, self.num_flows)
diag1 = diag1.resize(batch_size, self.z_size, self.num_flows)
diag2 = diag2.resize(batch_size, self.z_size, self.num_flows)
r1 = full_d * self.triu_mask
r2 = full_d.transpose(2, 1) * self.triu_mask
r1[:, self.diag_idx, self.diag_idx, :] = diag1
r2[:, self.diag_idx, self.diag_idx, :] = diag2
b = self.amor_b(h)
# Resize flow parameters to divide over K flows
b = b.resize(batch_size, 1, self.z_size, self.num_flows)
return mean_z, var_z, r1, r2, b
def forward(self, x):
"""
Forward pass with orthogonal flows for the transformation z_0 -> z_1 -> ... -> z_k.
Log determinant is computed as log_det_j = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ].
"""
self.log_det_j = 0.
z_mu, z_var, r1, r2, b = self.encode(x)
# Sample z_0
z = [self.reparameterize(z_mu, z_var)]
# Normalizing flows
for k in range(self.num_flows):
flow_k = getattr(self, 'flow_' + str(k))
if k % 2 == 1:
# Alternate with reorderering z for triangular flow
permute_z = self.flip_idx
else:
permute_z = None
z_k, log_det_jacobian = flow_k(z[k], r1[:, :, :, k], r2[:, :, :, k], b[:, :, :, k], permute_z, sum_ldj=True)
z.append(z_k)
self.log_det_j += log_det_jacobian
x_mean = self.decode(z[-1])
return x_mean, z_mu, z_var, self.log_det_j, z[0], z[-1]
class IAFVAE(VAE):
"""
Variational auto-encoder with inverse autoregressive flows in the encoder.
"""
def __init__(self, args):
super(IAFVAE, self).__init__(args)
# Initialize log-det-jacobian to zero
self.log_det_j = 0.
self.h_size = args.made_h_size
self.h_context = nn.Linear(self.q_z_nn_output_dim, self.h_size)
# Flow parameters
self.num_flows = args.num_flows
self.flow = flows.IAF(
z_size=self.z_size, num_flows=self.num_flows, num_hidden=1, h_size=self.h_size, conv2d=False
)
def encode(self, x):
"""
Encoder that ouputs parameters for base distribution of z and context h for flows.
"""
h = self.q_z_nn(x)
h = h.view(-1, self.q_z_nn_output_dim)
mean_z = self.q_z_mean(h)
var_z = self.q_z_var(h)
h_context = self.h_context(h)
return mean_z, var_z, h_context
def forward(self, x):
"""
Forward pass with inverse autoregressive flows for the transformation z_0 -> z_1 -> ... -> z_k.
Log determinant is computed as log_det_j = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ].
"""
# mean and variance of z
z_mu, z_var, h_context = self.encode(x)
# sample z
z_0 = self.reparameterize(z_mu, z_var)
# iaf flows
z_k, self.log_det_j = self.flow(z_0, h_context)
# decode
x_mean = self.decode(z_k)
return x_mean, z_mu, z_var, self.log_det_j, z_0, z_k
class MMAVAE(VAE):
"""
Variational auto-encoder with Monotonic Masked autoregressive flows in the encoder.
"""
def __init__(self, args):
super(MMAVAE, self).__init__(args)
# Initialize log-det-jacobian to zero
self.log_det_j = 0.
self.h_size = args.made_h_size
self.h_context = nn.Linear(self.q_z_nn_output_dim, self.h_size)
# Flow parameters
self.num_flows = args.num_flows
self.device = "cuda:%d" % args.gpu_num if torch.cuda.is_available() else "cpu"
self.flow = flows.MMAF(
z_size=self.z_size, num_flows=self.num_flows, num_hidden=1, h_size=self.h_size, device=self.device, args=args#, conv2d=False
)
def encode(self, x):
"""
Encoder that ouputs parameters for base distribution of z and context h for flows.
"""
h = self.q_z_nn(x)
h = h.view(-1, self.q_z_nn_output_dim)
mean_z = self.q_z_mean(h)
var_z = self.q_z_var(h)
h_context = self.h_context(h)
return mean_z, var_z, h_context
def forward(self, x):
"""
Forward pass with Monotonic Masked autoregressive flows for the transformation z_0 -> z_1 -> ... -> z_k.
Log determinant is computed as log_det_j = .
"""
# mean and variance of z
z_mu, z_var, h_context = self.encode(x)
# sample z
z_0 = self.reparameterize(z_mu, z_var)
# iaf flows
z_k, self.log_det_j = self.flow(z_0, h_context)
# decode
x_mean = self.decode(z_k)
return x_mean, z_mu, z_var, self.log_det_j, z_0, z_k
def forceLipshitz(self, L=1.5):
self.flow.forceLipshitz(L)
| 26,921 | 32.949559 | 136 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/models/vae_lib/models/CNFVAE.py
|
import torch
import torch.nn as nn
from train_misc import build_model_tabular
from UMNNMAF import lib as layers
import lib as diffeq_layers
from .VAE import VAE
from lib import NONLINEARITIES
from torchdiffeq import odeint_adjoint as odeint
def get_hidden_dims(args):
return tuple(map(int, args.dims.split("-"))) + (args.z_size,)
def concat_layer_num_params(in_dim, out_dim):
return (in_dim + 1) * out_dim + out_dim
class CNFVAE(VAE):
def __init__(self, args):
super(CNFVAE, self).__init__(args)
# CNF model
self.cnf = build_model_tabular(args, args.z_size)
if args.cuda:
self.cuda()
def encode(self, x):
"""
Encoder that ouputs parameters for base distribution of z and flow parameters.
"""
h = self.q_z_nn(x)
h = h.view(-1, self.q_z_nn_output_dim)
mean_z = self.q_z_mean(h)
var_z = self.q_z_var(h)
return mean_z, var_z
def forward(self, x):
"""
Forward pass with planar flows for the transformation z_0 -> z_1 -> ... -> z_k.
Log determinant is computed as log_det_j = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ].
"""
z_mu, z_var = self.encode(x)
# Sample z_0
z0 = self.reparameterize(z_mu, z_var)
zero = torch.zeros(x.shape[0], 1).to(x)
zk, delta_logp = self.cnf(z0, zero) # run model forward
x_mean = self.decode(zk)
return x_mean, z_mu, z_var, -delta_logp.view(-1), z0, zk
class AmortizedBiasODEnet(nn.Module):
def __init__(self, hidden_dims, input_dim, layer_type="concat", nonlinearity="softplus"):
super(AmortizedBiasODEnet, self).__init__()
base_layer = {
"ignore": diffeq_layers.IgnoreLinear,
"hyper": diffeq_layers.HyperLinear,
"squash": diffeq_layers.SquashLinear,
"concat": diffeq_layers.ConcatLinear,
"concat_v2": diffeq_layers.ConcatLinear_v2,
"concatsquash": diffeq_layers.ConcatSquashLinear,
"blend": diffeq_layers.BlendLinear,
"concatcoord": diffeq_layers.ConcatLinear,
}[layer_type]
self.input_dim = input_dim
# build layers and add them
layers = []
activation_fns = []
hidden_shape = input_dim
for dim_out in hidden_dims:
layer = base_layer(hidden_shape, dim_out)
layers.append(layer)
activation_fns.append(NONLINEARITIES[nonlinearity])
hidden_shape = dim_out
self.layers = nn.ModuleList(layers)
self.activation_fns = nn.ModuleList(activation_fns[:-1])
def _unpack_params(self, params):
return [params]
def forward(self, t, y, am_biases):
dx = y
for l, layer in enumerate(self.layers):
dx = layer(t, dx)
this_bias, am_biases = am_biases[:, :dx.size(1)], am_biases[:, dx.size(1):]
dx = dx + this_bias
# if not last layer, use nonlinearity
if l < len(self.layers) - 1:
dx = self.activation_fns[l](dx)
return dx
class AmortizedLowRankODEnet(nn.Module):
def __init__(self, hidden_dims, input_dim, rank=1, layer_type="concat", nonlinearity="softplus"):
super(AmortizedLowRankODEnet, self).__init__()
base_layer = {
"ignore": diffeq_layers.IgnoreLinear,
"hyper": diffeq_layers.HyperLinear,
"squash": diffeq_layers.SquashLinear,
"concat": diffeq_layers.ConcatLinear,
"concat_v2": diffeq_layers.ConcatLinear_v2,
"concatsquash": diffeq_layers.ConcatSquashLinear,
"blend": diffeq_layers.BlendLinear,
"concatcoord": diffeq_layers.ConcatLinear,
}[layer_type]
self.input_dim = input_dim
# build layers and add them
layers = []
activation_fns = []
hidden_shape = input_dim
self.output_dims = hidden_dims
self.input_dims = (input_dim,) + hidden_dims[:-1]
for dim_out in hidden_dims:
layer = base_layer(hidden_shape, dim_out)
layers.append(layer)
activation_fns.append(NONLINEARITIES[nonlinearity])
hidden_shape = dim_out
self.layers = nn.ModuleList(layers)
self.activation_fns = nn.ModuleList(activation_fns[:-1])
self.rank = rank
def _unpack_params(self, params):
return [params]
def _rank_k_bmm(self, x, u, v):
xu = torch.bmm(x[:, None], u.view(x.shape[0], x.shape[-1], self.rank))
xuv = torch.bmm(xu, v.view(x.shape[0], self.rank, -1))
return xuv[:, 0]
def forward(self, t, y, am_params):
dx = y
for l, (layer, in_dim, out_dim) in enumerate(zip(self.layers, self.input_dims, self.output_dims)):
this_u, am_params = am_params[:, :in_dim * self.rank], am_params[:, in_dim * self.rank:]
this_v, am_params = am_params[:, :out_dim * self.rank], am_params[:, out_dim * self.rank:]
this_bias, am_params = am_params[:, :out_dim], am_params[:, out_dim:]
xw = layer(t, dx)
xw_am = self._rank_k_bmm(dx, this_u, this_v)
dx = xw + xw_am + this_bias
# if not last layer, use nonlinearity
if l < len(self.layers) - 1:
dx = self.activation_fns[l](dx)
return dx
class HyperODEnet(nn.Module):
def __init__(self, hidden_dims, input_dim, layer_type="concat", nonlinearity="softplus"):
super(HyperODEnet, self).__init__()
assert layer_type == "concat"
self.input_dim = input_dim
# build layers and add them
activation_fns = []
for dim_out in hidden_dims + (input_dim,):
activation_fns.append(NONLINEARITIES[nonlinearity])
self.activation_fns = nn.ModuleList(activation_fns[:-1])
self.output_dims = hidden_dims
self.input_dims = (input_dim,) + hidden_dims[:-1]
def _pack_inputs(self, t, x):
tt = torch.ones_like(x[:, :1]) * t
ttx = torch.cat([tt, x], 1)
return ttx
def _unpack_params(self, params):
layer_params = []
for in_dim, out_dim in zip(self.input_dims, self.output_dims):
this_num_params = concat_layer_num_params(in_dim, out_dim)
# get params for this layer
this_params, params = params[:, :this_num_params], params[:, this_num_params:]
# split into weight and bias
bias, weight_params = this_params[:, :out_dim], this_params[:, out_dim:]
weight = weight_params.view(weight_params.size(0), in_dim + 1, out_dim)
layer_params.append(weight)
layer_params.append(bias)
return layer_params
def _layer(self, t, x, weight, bias):
# weights is (batch, in_dim + 1, out_dim)
ttx = self._pack_inputs(t, x) # (batch, in_dim + 1)
ttx = ttx.view(ttx.size(0), 1, ttx.size(1)) # (batch, 1, in_dim + 1)
xw = torch.bmm(ttx, weight)[:, 0, :] # (batch, out_dim)
return xw + bias
def forward(self, t, y, *layer_params):
dx = y
for l, (weight, bias) in enumerate(zip(layer_params[::2], layer_params[1::2])):
dx = self._layer(t, dx, weight, bias)
# if not last layer, use nonlinearity
if l < len(layer_params) - 1:
dx = self.activation_fns[l](dx)
return dx
class LyperODEnet(nn.Module):
def __init__(self, hidden_dims, input_dim, layer_type="concat", nonlinearity="softplus"):
super(LyperODEnet, self).__init__()
base_layer = {
"ignore": diffeq_layers.IgnoreLinear,
"hyper": diffeq_layers.HyperLinear,
"squash": diffeq_layers.SquashLinear,
"concat": diffeq_layers.ConcatLinear,
"concat_v2": diffeq_layers.ConcatLinear_v2,
"concatsquash": diffeq_layers.ConcatSquashLinear,
"blend": diffeq_layers.BlendLinear,
"concatcoord": diffeq_layers.ConcatLinear,
}[layer_type]
self.input_dim = input_dim
# build layers and add them
layers = []
activation_fns = []
hidden_shape = input_dim
self.dims = (input_dim,) + hidden_dims
self.output_dims = hidden_dims
self.input_dims = (input_dim,) + hidden_dims[:-1]
for dim_out in hidden_dims[:-1]:
layer = base_layer(hidden_shape, dim_out)
layers.append(layer)
activation_fns.append(NONLINEARITIES[nonlinearity])
hidden_shape = dim_out
self.layers = nn.ModuleList(layers)
self.activation_fns = nn.ModuleList(activation_fns)
def _pack_inputs(self, t, x):
tt = torch.ones_like(x[:, :1]) * t
ttx = torch.cat([tt, x], 1)
return ttx
def _unpack_params(self, params):
return [params]
def _am_layer(self, t, x, weight, bias):
# weights is (batch, in_dim + 1, out_dim)
ttx = self._pack_inputs(t, x) # (batch, in_dim + 1)
ttx = ttx.view(ttx.size(0), 1, ttx.size(1)) # (batch, 1, in_dim + 1)
xw = torch.bmm(ttx, weight)[:, 0, :] # (batch, out_dim)
return xw + bias
def forward(self, t, x, am_params):
dx = x
for layer, act in zip(self.layers, self.activation_fns):
dx = act(layer(t, dx))
bias, weight_params = am_params[:, :self.dims[-1]], am_params[:, self.dims[-1]:]
weight = weight_params.view(weight_params.size(0), self.dims[-2] + 1, self.dims[-1])
dx = self._am_layer(t, dx, weight, bias)
return dx
def construct_amortized_odefunc(args, z_dim, amortization_type="bias"):
hidden_dims = get_hidden_dims(args)
if amortization_type == "bias":
diffeq = AmortizedBiasODEnet(
hidden_dims=hidden_dims,
input_dim=z_dim,
layer_type=args.layer_type,
nonlinearity=args.nonlinearity,
)
elif amortization_type == "hyper":
diffeq = HyperODEnet(
hidden_dims=hidden_dims,
input_dim=z_dim,
layer_type=args.layer_type,
nonlinearity=args.nonlinearity,
)
elif amortization_type == "lyper":
diffeq = LyperODEnet(
hidden_dims=hidden_dims,
input_dim=z_dim,
layer_type=args.layer_type,
nonlinearity=args.nonlinearity,
)
elif amortization_type == "low_rank":
diffeq = AmortizedLowRankODEnet(
hidden_dims=hidden_dims,
input_dim=z_dim,
layer_type=args.layer_type,
nonlinearity=args.nonlinearity,
rank=args.rank,
)
odefunc = layers.ODEfunc(
diffeq=diffeq,
divergence_fn=args.divergence_fn,
residual=args.residual,
rademacher=args.rademacher,
)
return odefunc
class AmortizedCNFVAE(VAE):
h_size = 256
def __init__(self, args):
super(AmortizedCNFVAE, self).__init__(args)
# CNF model
self.odefuncs = nn.ModuleList([
construct_amortized_odefunc(args, args.z_size, self.amortization_type) for _ in range(args.num_blocks)
])
self.q_am = self._amortized_layers(args)
assert len(self.q_am) == args.num_blocks or len(self.q_am) == 0
if args.cuda:
self.cuda()
self.register_buffer('integration_times', torch.tensor([0.0, args.time_length]))
self.atol = args.atol
self.rtol = args.rtol
self.solver = args.solver
def encode(self, x):
"""
Encoder that ouputs parameters for base distribution of z and flow parameters.
"""
h = self.q_z_nn(x)
h = h.view(-1, self.q_z_nn_output_dim)
mean_z = self.q_z_mean(h)
var_z = self.q_z_var(h)
am_params = [q_am(h) for q_am in self.q_am]
return mean_z, var_z, am_params
def forward(self, x):
self.log_det_j = 0.
z_mu, z_var, am_params = self.encode(x)
# Sample z_0
z0 = self.reparameterize(z_mu, z_var)
delta_logp = torch.zeros(x.shape[0], 1).to(x)
z = z0
for odefunc, am_param in zip(self.odefuncs, am_params):
am_param_unpacked = odefunc.diffeq._unpack_params(am_param)
odefunc.before_odeint()
states = odeint(
odefunc,
(z, delta_logp) + tuple(am_param_unpacked),
self.integration_times.to(z),
atol=self.atol,
rtol=self.rtol,
method=self.solver,
)
z, delta_logp = states[0][-1], states[1][-1]
x_mean = self.decode(z)
return x_mean, z_mu, z_var, -delta_logp.view(-1), z0, z
class AmortizedBiasCNFVAE(AmortizedCNFVAE):
amortization_type = "bias"
def _amortized_layers(self, args):
hidden_dims = get_hidden_dims(args)
bias_size = sum(hidden_dims)
return nn.ModuleList([nn.Linear(self.h_size, bias_size) for _ in range(args.num_blocks)])
class AmortizedLowRankCNFVAE(AmortizedCNFVAE):
amortization_type = "low_rank"
def _amortized_layers(self, args):
out_dims = get_hidden_dims(args)
in_dims = (out_dims[-1],) + out_dims[:-1]
params_size = (sum(in_dims) + sum(out_dims)) * args.rank + sum(out_dims)
return nn.ModuleList([nn.Linear(self.h_size, params_size) for _ in range(args.num_blocks)])
class HypernetCNFVAE(AmortizedCNFVAE):
amortization_type = "hyper"
def _amortized_layers(self, args):
hidden_dims = get_hidden_dims(args)
input_dims = (args.z_size,) + hidden_dims[:-1]
assert args.layer_type == "concat", "hypernets only support concat layers at the moment"
weight_dims = [concat_layer_num_params(in_dim, out_dim) for in_dim, out_dim in zip(input_dims, hidden_dims)]
weight_size = sum(weight_dims)
return nn.ModuleList([nn.Linear(self.h_size, weight_size) for _ in range(args.num_blocks)])
class LypernetCNFVAE(AmortizedCNFVAE):
amortization_type = "lyper"
def _amortized_layers(self, args):
dims = (args.z_size,) + get_hidden_dims(args)
weight_size = concat_layer_num_params(dims[-2], dims[-1])
return nn.ModuleList([nn.Linear(self.h_size, weight_size) for _ in range(args.num_blocks)])
| 14,375 | 33.808717 | 116 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/models/vae_lib/models/layers.py
|
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import numpy as np
import torch.nn.functional as F
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
class GatedConv2d(nn.Module):
def __init__(self, input_channels, output_channels, kernel_size, stride, padding, dilation=1, activation=None):
super(GatedConv2d, self).__init__()
self.activation = activation
self.sigmoid = nn.Sigmoid()
self.h = nn.Conv2d(input_channels, output_channels, kernel_size, stride, padding, dilation)
self.g = nn.Conv2d(input_channels, output_channels, kernel_size, stride, padding, dilation)
def forward(self, x):
if self.activation is None:
h = self.h(x)
else:
h = self.activation(self.h(x))
g = self.sigmoid(self.g(x))
return h * g
class GatedConvTranspose2d(nn.Module):
def __init__(
self, input_channels, output_channels, kernel_size, stride, padding, output_padding=0, dilation=1,
activation=None
):
super(GatedConvTranspose2d, self).__init__()
self.activation = activation
self.sigmoid = nn.Sigmoid()
self.h = nn.ConvTranspose2d(
input_channels, output_channels, kernel_size, stride, padding, output_padding, dilation=dilation
)
self.g = nn.ConvTranspose2d(
input_channels, output_channels, kernel_size, stride, padding, output_padding, dilation=dilation
)
def forward(self, x):
if self.activation is None:
h = self.h(x)
else:
h = self.activation(self.h(x))
g = self.sigmoid(self.g(x))
return h * g
class MaskedLinear(nn.Module):
"""
Creates masked linear layer for MLP MADE.
For input (x) to hidden (h) or hidden to hidden layers choose diagonal_zeros = False.
For hidden to output (y) layers:
If output depends on input through y_i = f(x_{<i}) set diagonal_zeros = True.
Else if output depends on input through y_i = f(x_{<=i}) set diagonal_zeros = False.
"""
def __init__(self, in_features, out_features, diagonal_zeros=False, bias=True):
super(MaskedLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.diagonal_zeros = diagonal_zeros
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
mask = torch.from_numpy(self.build_mask())
if torch.cuda.is_available():
mask = mask.cuda()
self.mask = torch.autograd.Variable(mask, requires_grad=False)
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_normal(self.weight)
if self.bias is not None:
self.bias.data.zero_()
def build_mask(self):
n_in, n_out = self.in_features, self.out_features
assert n_in % n_out == 0 or n_out % n_in == 0
mask = np.ones((n_in, n_out), dtype=np.float32)
if n_out >= n_in:
k = n_out // n_in
for i in range(n_in):
mask[i + 1:, i * k:(i + 1) * k] = 0
if self.diagonal_zeros:
mask[i:i + 1, i * k:(i + 1) * k] = 0
else:
k = n_in // n_out
for i in range(n_out):
mask[(i + 1) * k:, i:i + 1] = 0
if self.diagonal_zeros:
mask[i * k:(i + 1) * k:, i:i + 1] = 0
return mask
def forward(self, x):
output = x.mm(self.mask * self.weight)
if self.bias is not None:
return output.add(self.bias.expand_as(output))
else:
return output
def __repr__(self):
if self.bias is not None:
bias = True
else:
bias = False
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ', diagonal_zeros=' \
+ str(self.diagonal_zeros) + ', bias=' \
+ str(bias) + ')'
class MaskedConv2d(nn.Module):
"""
Creates masked convolutional autoregressive layer for pixelCNN.
For input (x) to hidden (h) or hidden to hidden layers choose diagonal_zeros = False.
For hidden to output (y) layers:
If output depends on input through y_i = f(x_{<i}) set diagonal_zeros = True.
Else if output depends on input through y_i = f(x_{<=i}) set diagonal_zeros = False.
"""
def __init__(self, in_features, out_features, size_kernel=(3, 3), diagonal_zeros=False, bias=True):
super(MaskedConv2d, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.size_kernel = size_kernel
self.diagonal_zeros = diagonal_zeros
self.weight = Parameter(torch.FloatTensor(out_features, in_features, *self.size_kernel))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
mask = torch.from_numpy(self.build_mask())
if torch.cuda.is_available():
mask = mask.cuda()
self.mask = torch.autograd.Variable(mask, requires_grad=False)
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_normal(self.weight)
if self.bias is not None:
self.bias.data.zero_()
def build_mask(self):
n_in, n_out = self.in_features, self.out_features
assert n_out % n_in == 0 or n_in % n_out == 0, "%d - %d" % (n_in, n_out)
# Build autoregressive mask
l = (self.size_kernel[0] - 1) // 2
m = (self.size_kernel[1] - 1) // 2
mask = np.ones((n_out, n_in, self.size_kernel[0], self.size_kernel[1]), dtype=np.float32)
mask[:, :, :l, :] = 0
mask[:, :, l, :m] = 0
if n_out >= n_in:
k = n_out // n_in
for i in range(n_in):
mask[i * k:(i + 1) * k, i + 1:, l, m] = 0
if self.diagonal_zeros:
mask[i * k:(i + 1) * k, i:i + 1, l, m] = 0
else:
k = n_in // n_out
for i in range(n_out):
mask[i:i + 1, (i + 1) * k:, l, m] = 0
if self.diagonal_zeros:
mask[i:i + 1, i * k:(i + 1) * k:, l, m] = 0
return mask
def forward(self, x):
output = F.conv2d(x, self.mask * self.weight, bias=self.bias, padding=(1, 1))
return output
def __repr__(self):
if self.bias is not None:
bias = True
else:
bias = False
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ', diagonal_zeros=' \
+ str(self.diagonal_zeros) + ', bias=' \
+ str(bias) + ', size_kernel=' \
+ str(self.size_kernel) + ')'
| 7,128 | 32.947619 | 115 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/models/vae_lib/models/__init__.py
| 0 | 0 | 0 |
py
|
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/models/vae_lib/models/flows.py
|
"""
Collection of flow strategies
"""
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
from ...vae_lib.models.layers import MaskedConv2d, MaskedLinear
import sys
sys.path.append("../../")
from models import UMNNMAFFlow
class Planar(nn.Module):
"""
PyTorch implementation of planar flows as presented in "Variational Inference with Normalizing Flows"
by Danilo Jimenez Rezende, Shakir Mohamed. Model assumes amortized flow parameters.
"""
def __init__(self):
super(Planar, self).__init__()
self.h = nn.Tanh()
self.softplus = nn.Softplus()
def der_h(self, x):
""" Derivative of tanh """
return 1 - self.h(x)**2
def forward(self, zk, u, w, b):
"""
Forward pass. Assumes amortized u, w and b. Conditions on diagonals of u and w for invertibility
will be be satisfied inside this function. Computes the following transformation:
z' = z + u h( w^T z + b)
or actually
z'^T = z^T + h(z^T w + b)u^T
Assumes the following input shapes:
shape u = (batch_size, z_size, 1)
shape w = (batch_size, 1, z_size)
shape b = (batch_size, 1, 1)
shape z = (batch_size, z_size).
"""
zk = zk.unsqueeze(2)
# reparameterize u such that the flow becomes invertible (see appendix paper)
uw = torch.bmm(w, u)
m_uw = -1. + self.softplus(uw)
w_norm_sq = torch.sum(w**2, dim=2, keepdim=True)
u_hat = u + ((m_uw - uw) * w.transpose(2, 1) / w_norm_sq)
# compute flow with u_hat
wzb = torch.bmm(w, zk) + b
z = zk + u_hat * self.h(wzb)
z = z.squeeze(2)
# compute logdetJ
psi = w * self.der_h(wzb)
log_det_jacobian = torch.log(torch.abs(1 + torch.bmm(psi, u_hat)))
log_det_jacobian = log_det_jacobian.squeeze(2).squeeze(1)
return z, log_det_jacobian
class Sylvester(nn.Module):
"""
Sylvester normalizing flow.
"""
def __init__(self, num_ortho_vecs):
super(Sylvester, self).__init__()
self.num_ortho_vecs = num_ortho_vecs
self.h = nn.Tanh()
triu_mask = torch.triu(torch.ones(num_ortho_vecs, num_ortho_vecs), diagonal=1).unsqueeze(0)
diag_idx = torch.arange(0, num_ortho_vecs).long()
self.register_buffer('triu_mask', Variable(triu_mask))
self.triu_mask.requires_grad = False
self.register_buffer('diag_idx', diag_idx)
def der_h(self, x):
return self.der_tanh(x)
def der_tanh(self, x):
return 1 - self.h(x)**2
def _forward(self, zk, r1, r2, q_ortho, b, sum_ldj=True):
"""
All flow parameters are amortized. Conditions on diagonals of R1 and R2 for invertibility need to be satisfied
outside of this function. Computes the following transformation:
z' = z + QR1 h( R2Q^T z + b)
or actually
z'^T = z^T + h(z^T Q R2^T + b^T)R1^T Q^T
:param zk: shape: (batch_size, z_size)
:param r1: shape: (batch_size, num_ortho_vecs, num_ortho_vecs)
:param r2: shape: (batch_size, num_ortho_vecs, num_ortho_vecs)
:param q_ortho: shape (batch_size, z_size , num_ortho_vecs)
:param b: shape: (batch_size, 1, self.z_size)
:return: z, log_det_j
"""
# Amortized flow parameters
zk = zk.unsqueeze(1)
# Save diagonals for log_det_j
diag_r1 = r1[:, self.diag_idx, self.diag_idx]
diag_r2 = r2[:, self.diag_idx, self.diag_idx]
r1_hat = r1
r2_hat = r2
qr2 = torch.bmm(q_ortho, r2_hat.transpose(2, 1))
qr1 = torch.bmm(q_ortho, r1_hat)
r2qzb = torch.bmm(zk, qr2) + b
z = torch.bmm(self.h(r2qzb), qr1.transpose(2, 1)) + zk
z = z.squeeze(1)
# Compute log|det J|
# Output log_det_j in shape (batch_size) instead of (batch_size,1)
diag_j = diag_r1 * diag_r2
diag_j = self.der_h(r2qzb).squeeze(1) * diag_j
diag_j += 1.
log_diag_j = diag_j.abs().log()
if sum_ldj:
log_det_j = log_diag_j.sum(-1)
else:
log_det_j = log_diag_j
return z, log_det_j
def forward(self, zk, r1, r2, q_ortho, b, sum_ldj=True):
return self._forward(zk, r1, r2, q_ortho, b, sum_ldj)
class TriangularSylvester(nn.Module):
"""
Sylvester normalizing flow with Q=P or Q=I.
"""
def __init__(self, z_size):
super(TriangularSylvester, self).__init__()
self.z_size = z_size
self.h = nn.Tanh()
diag_idx = torch.arange(0, z_size).long()
self.register_buffer('diag_idx', diag_idx)
def der_h(self, x):
return self.der_tanh(x)
def der_tanh(self, x):
return 1 - self.h(x)**2
def _forward(self, zk, r1, r2, b, permute_z=None, sum_ldj=True):
"""
All flow parameters are amortized. conditions on diagonals of R1 and R2 need to be satisfied
outside of this function.
Computes the following transformation:
z' = z + QR1 h( R2Q^T z + b)
or actually
z'^T = z^T + h(z^T Q R2^T + b^T)R1^T Q^T
with Q = P a permutation matrix (equal to identity matrix if permute_z=None)
:param zk: shape: (batch_size, z_size)
:param r1: shape: (batch_size, num_ortho_vecs, num_ortho_vecs).
:param r2: shape: (batch_size, num_ortho_vecs, num_ortho_vecs).
:param b: shape: (batch_size, 1, self.z_size)
:return: z, log_det_j
"""
# Amortized flow parameters
zk = zk.unsqueeze(1)
# Save diagonals for log_det_j
diag_r1 = r1[:, self.diag_idx, self.diag_idx]
diag_r2 = r2[:, self.diag_idx, self.diag_idx]
if permute_z is not None:
# permute order of z
z_per = zk[:, :, permute_z]
else:
z_per = zk
r2qzb = torch.bmm(z_per, r2.transpose(2, 1)) + b
z = torch.bmm(self.h(r2qzb), r1.transpose(2, 1))
if permute_z is not None:
# permute order of z again back again
z = z[:, :, permute_z]
z += zk
z = z.squeeze(1)
# Compute log|det J|
# Output log_det_j in shape (batch_size) instead of (batch_size,1)
diag_j = diag_r1 * diag_r2
diag_j = self.der_h(r2qzb).squeeze(1) * diag_j
diag_j += 1.
log_diag_j = diag_j.abs().log()
if sum_ldj:
log_det_j = log_diag_j.sum(-1)
else:
log_det_j = log_diag_j
return z, log_det_j
def forward(self, zk, r1, r2, q_ortho, b, sum_ldj=True):
return self._forward(zk, r1, r2, q_ortho, b, sum_ldj)
class IAF(nn.Module):
"""
PyTorch implementation of inverse autoregressive flows as presented in
"Improving Variational Inference with Inverse Autoregressive Flow" by Diederik P. Kingma, Tim Salimans,
Rafal Jozefowicz, Xi Chen, Ilya Sutskever, Max Welling.
Inverse Autoregressive Flow with either MADE MLPs or Pixel CNNs. Contains several flows. Each transformation
takes as an input the previous stochastic z, and a context h. The structure of each flow is then as follows:
z <- autoregressive_layer(z) + h, allow for diagonal connections
z <- autoregressive_layer(z), allow for diagonal connections
:
z <- autoregressive_layer(z), do not allow for diagonal connections.
Note that the size of h needs to be the same as h_size, which is the width of the MADE layers.
"""
def __init__(self, z_size, num_flows=2, num_hidden=0, h_size=50, forget_bias=1., conv2d=False):
super(IAF, self).__init__()
self.z_size = z_size
self.num_flows = num_flows
self.num_hidden = num_hidden
self.h_size = h_size
self.conv2d = conv2d
if not conv2d:
ar_layer = MaskedLinear
else:
ar_layer = MaskedConv2d
self.activation = torch.nn.ELU
# self.activation = torch.nn.ReLU
self.forget_bias = forget_bias
self.flows = []
self.param_list = []
# For reordering z after each flow
flip_idx = torch.arange(self.z_size - 1, -1, -1).long()
self.register_buffer('flip_idx', flip_idx)
for k in range(num_flows):
arch_z = [ar_layer(z_size, h_size), self.activation()]
self.param_list += list(arch_z[0].parameters())
z_feats = torch.nn.Sequential(*arch_z)
arch_zh = []
for j in range(num_hidden):
arch_zh += [ar_layer(h_size, h_size), self.activation()]
self.param_list += list(arch_zh[-2].parameters())
zh_feats = torch.nn.Sequential(*arch_zh)
linear_mean = ar_layer(h_size, z_size, diagonal_zeros=True)
linear_std = ar_layer(h_size, z_size, diagonal_zeros=True)
self.param_list += list(linear_mean.parameters())
self.param_list += list(linear_std.parameters())
if torch.cuda.is_available():
z_feats = z_feats.cuda()
zh_feats = zh_feats.cuda()
linear_mean = linear_mean.cuda()
linear_std = linear_std.cuda()
self.flows.append((z_feats, zh_feats, linear_mean, linear_std))
self.param_list = torch.nn.ParameterList(self.param_list)
def forward(self, z, h_context):
logdets = 0.
for i, flow in enumerate(self.flows):
if (i + 1) % 2 == 0 and not self.conv2d:
# reverse ordering to help mixing
z = z[:, self.flip_idx]
h = flow[0](z)
h = h + h_context
h = flow[1](h)
mean = flow[2](h)
gate = F.sigmoid(flow[3](h) + self.forget_bias)
z = gate * z + (1 - gate) * mean
logdets += torch.sum(gate.log().view(gate.size(0), -1), 1)
return z, logdets
class MMAF(nn.Module):
def __init__(self, z_size, num_flows=2, num_hidden=0, h_size=50, device='cpu', args=None):
super(MMAF, self).__init__()
self.model = UMNNMAFFlow(nb_flow=num_flows, nb_in=z_size, hidden_derivative=args.hidden_derivative,
hidden_embedding=args.hidden_embedding, embedding_s=args.embedding_size,
nb_steps=args.steps, solver=args.solver, cond_in=h_size, device=device)
self.z_size = z_size
self.num_flows = num_flows
self.num_hidden = num_hidden
self.h_size = h_size
self.steps = args.steps
def forward(self, z, h_context):
if self.steps == 0:
nb_steps = np.random.randint(10, 50) * 2
self.model.set_steps_nb(nb_steps)
return self.model.compute_log_jac_bis(z, h_context)
def forceLipshitz(self, L=1.5):
if L > 0:
self.model.forceLipshitz(L)
| 10,990 | 32.306061 | 118 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/models/vae_lib/optimization/loss.py
|
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
from ...vae_lib.utils.distributions import log_normal_diag, log_normal_standard, log_bernoulli
import torch.nn.functional as F
def binary_loss_function(recon_x, x, z_mu, z_var, z_0, z_k, ldj, beta=1.):
"""
Computes the binary loss function while summing over batch dimension, not averaged!
:param recon_x: shape: (batch_size, num_channels, pixel_width, pixel_height), bernoulli parameters p(x=1)
:param x: shape (batchsize, num_channels, pixel_width, pixel_height), pixel values rescaled between [0, 1].
:param z_mu: mean of z_0
:param z_var: variance of z_0
:param z_0: first stochastic latent variable
:param z_k: last stochastic latent variable
:param ldj: log det jacobian
:param beta: beta for kl loss
:return: loss, ce, kl
"""
reconstruction_function = nn.BCELoss(size_average=False)
batch_size = x.size(0)
# - N E_q0 [ ln p(x|z_k) ]
bce = reconstruction_function(recon_x, x)
# ln p(z_k) (not averaged)
log_p_zk = log_normal_standard(z_k, dim=1)
# ln q(z_0) (not averaged)
log_q_z0 = log_normal_diag(z_0, mean=z_mu, log_var=z_var.log(), dim=1)
# N E_q0[ ln q(z_0) - ln p(z_k) ]
summed_logs = torch.sum(log_q_z0 - log_p_zk)
# sum over batches
summed_ldj = torch.sum(ldj)
# ldj = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ]
kl = (summed_logs - summed_ldj)
loss = bce + beta * (summed_logs - summed_ldj)
loss /= float(batch_size)
bce /= float(batch_size)
kl /= float(batch_size)
return loss, bce, kl
def multinomial_loss_function(x_logit, x, z_mu, z_var, z_0, z_k, ldj, args, beta=1.):
"""
Computes the cross entropy loss function while summing over batch dimension, not averaged!
:param x_logit: shape: (batch_size, num_classes * num_channels, pixel_width, pixel_height), real valued logits
:param x: shape (batchsize, num_channels, pixel_width, pixel_height), pixel values rescaled between [0, 1].
:param z_mu: mean of z_0
:param z_var: variance of z_0
:param z_0: first stochastic latent variable
:param z_k: last stochastic latent variable
:param ldj: log det jacobian
:param args: global parameter settings
:param beta: beta for kl loss
:return: loss, ce, kl
"""
num_classes = 256
batch_size = x.size(0)
x_logit = x_logit.view(batch_size, num_classes, args.input_size[0], args.input_size[1], args.input_size[2])
# make integer class labels
target = (x * (num_classes - 1)).long()
# - N E_q0 [ ln p(x|z_k) ]
# sums over batch dimension (and feature dimension)
ce = cross_entropy(x_logit, target, size_average=False)
# ln p(z_k) (not averaged)
log_p_zk = log_normal_standard(z_k, dim=1)
# ln q(z_0) (not averaged)
log_q_z0 = log_normal_diag(z_0, mean=z_mu, log_var=z_var.log(), dim=1)
# N E_q0[ ln q(z_0) - ln p(z_k) ]
summed_logs = torch.sum(log_q_z0 - log_p_zk)
# sum over batches
summed_ldj = torch.sum(ldj)
# ldj = N E_q_z0[\sum_k log |det dz_k/dz_k-1| ]
kl = (summed_logs - summed_ldj)
loss = ce + beta * (summed_logs - summed_ldj)
loss /= float(batch_size)
ce /= float(batch_size)
kl /= float(batch_size)
return loss, ce, kl
def binary_loss_array(recon_x, x, z_mu, z_var, z_0, z_k, ldj, beta=1.):
"""
Computes the binary loss without averaging or summing over the batch dimension.
"""
batch_size = x.size(0)
# if not summed over batch_dimension
if len(ldj.size()) > 1:
ldj = ldj.view(ldj.size(0), -1).sum(-1)
# TODO: upgrade to newest pytorch version on master branch, there the nn.BCELoss comes with the option
# reduce, which when set to False, does no sum over batch dimension.
bce = -log_bernoulli(x.view(batch_size, -1), recon_x.view(batch_size, -1), dim=1)
# ln p(z_k) (not averaged)
log_p_zk = log_normal_standard(z_k, dim=1)
# ln q(z_0) (not averaged)
log_q_z0 = log_normal_diag(z_0, mean=z_mu, log_var=z_var.log(), dim=1)
# ln q(z_0) - ln p(z_k) ]
logs = log_q_z0 - log_p_zk
loss = bce + beta * (logs - ldj)
return loss
def multinomial_loss_array(x_logit, x, z_mu, z_var, z_0, z_k, ldj, args, beta=1.):
"""
Computes the discritezed logistic loss without averaging or summing over the batch dimension.
"""
num_classes = 256
batch_size = x.size(0)
x_logit = x_logit.view(batch_size, num_classes, args.input_size[0], args.input_size[1], args.input_size[2])
# make integer class labels
target = (x * (num_classes - 1)).long()
# - N E_q0 [ ln p(x|z_k) ]
# computes cross entropy over all dimensions separately:
ce = cross_entropy(x_logit, target, size_average=False, reduce=False)
# sum over feature dimension
ce = ce.view(batch_size, -1).sum(dim=1)
# ln p(z_k) (not averaged)
log_p_zk = log_normal_standard(z_k.view(batch_size, -1), dim=1)
# ln q(z_0) (not averaged)
log_q_z0 = log_normal_diag(
z_0.view(batch_size, -1), mean=z_mu.view(batch_size, -1), log_var=z_var.log().view(batch_size, -1), dim=1
)
# ln q(z_0) - ln p(z_k) ]
logs = log_q_z0 - log_p_zk
loss = ce + beta * (logs - ldj)
return loss
def cross_entropy(input, target, weight=None, size_average=True, ignore_index=-100, reduce=True):
r"""
Taken from the master branch of pytorch, accepts (N, C, d_1, d_2, ..., d_K) input shapes
instead of only (N, C, d_1, d_2) or (N, C).
This criterion combines `log_softmax` and `nll_loss` in a single
function.
See :class:`~torch.nn.CrossEntropyLoss` for details.
Args:
input: Variable :math:`(N, C)` where `C = number of classes`
target: Variable :math:`(N)` where each value is
`0 <= targets[i] <= C-1`
weight (Tensor, optional): a manual rescaling weight given to each
class. If given, has to be a Tensor of size `C`
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
sizeAverage is set to False, the losses are instead summed
for each minibatch. Ignored if reduce is False. Default: ``True``
ignore_index (int, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. When size_average is
True, the loss is averaged over non-ignored targets. Default: -100
reduce (bool, optional): By default, the losses are averaged or summed over
observations for each minibatch depending on size_average. When reduce
is False, returns a loss per batch element instead and ignores
size_average. Default: ``True``
"""
return nll_loss(F.log_softmax(input, 1), target, weight, size_average, ignore_index, reduce)
def nll_loss(input, target, weight=None, size_average=True, ignore_index=-100, reduce=True):
r"""
Taken from the master branch of pytorch, accepts (N, C, d_1, d_2, ..., d_K) input shapes
instead of only (N, C, d_1, d_2) or (N, C).
The negative log likelihood loss.
See :class:`~torch.nn.NLLLoss` for details.
Args:
input: :math:`(N, C)` where `C = number of classes` or :math:`(N, C, H, W)`
in case of 2D Loss, or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K > 1`
in the case of K-dimensional loss.
target: :math:`(N)` where each value is `0 <= targets[i] <= C-1`,
or :math:`(N, C, d_1, d_2, ..., d_K)` where :math:`K >= 1` for
K-dimensional loss.
weight (Tensor, optional): a manual rescaling weight given to each
class. If given, has to be a Tensor of size `C`
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. If size_average
is False, the losses are summed for each minibatch. Default: ``True``
ignore_index (int, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. When size_average is
True, the loss is averaged over non-ignored targets. Default: -100
"""
dim = input.dim()
if dim == 2:
return F.nll_loss(
input, target, weight=weight, size_average=size_average, ignore_index=ignore_index, reduce=reduce
)
elif dim == 4:
return F.nll_loss(
input, target, weight=weight, size_average=size_average, ignore_index=ignore_index, reduce=reduce
)
elif dim == 3 or dim > 4:
n = input.size(0)
c = input.size(1)
out_size = (n,) + input.size()[2:]
if target.size()[1:] != input.size()[2:]:
raise ValueError('Expected target size {}, got {}'.format(out_size, input.size()))
input = input.contiguous().view(n, c, 1, -1)
target = target.contiguous().view(n, 1, -1)
if reduce:
_loss = nn.NLLLoss2d(weight=weight, size_average=size_average, ignore_index=ignore_index, reduce=reduce)
return _loss(input, target)
out = F.nll_loss(
input, target, weight=weight, size_average=size_average, ignore_index=ignore_index, reduce=reduce
)
return out.view(out_size)
else:
raise ValueError('Expected 2 or more dimensions (got {})'.format(dim))
def calculate_loss(x_mean, x, z_mu, z_var, z_0, z_k, ldj, args, beta=1.):
"""
Picks the correct loss depending on the input type.
"""
if args.input_type == 'binary':
loss, rec, kl = binary_loss_function(x_mean, x, z_mu, z_var, z_0, z_k, ldj, beta=beta)
bpd = 0.
elif args.input_type == 'multinomial':
loss, rec, kl = multinomial_loss_function(x_mean, x, z_mu, z_var, z_0, z_k, ldj, args, beta=beta)
bpd = loss.data.item() / (np.prod(args.input_size) * np.log(2.))
else:
raise ValueError('Invalid input type for calculate loss: %s.' % args.input_type)
return loss, rec, kl, bpd
def calculate_loss_array(x_mean, x, z_mu, z_var, z_0, z_k, ldj, args):
"""
Picks the correct loss depending on the input type.
"""
if args.input_type == 'binary':
loss = binary_loss_array(x_mean, x, z_mu, z_var, z_0, z_k, ldj)
elif args.input_type == 'multinomial':
loss = multinomial_loss_array(x_mean, x, z_mu, z_var, z_0, z_k, ldj, args)
else:
raise ValueError('Invalid input type for calculate loss: %s.' % args.input_type)
return loss
| 10,621 | 38.051471 | 116 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/models/vae_lib/optimization/training.py
|
from __future__ import print_function
import time
import torch
from ...vae_lib.optimization.loss import calculate_loss
from ...vae_lib.utils.visual_evaluation import plot_reconstructions
from ...vae_lib.utils.log_likelihood import calculate_likelihood
import numpy as np
def train(epoch, train_loader, model, opt, args, logger):
model.train()
train_loss = np.zeros(len(train_loader))
train_bpd = np.zeros(len(train_loader))
num_data = 0
# set warmup coefficient
beta = min([(epoch * 1.) / max([args.warmup, 1.]), args.max_beta])
logger.info('beta = {:5.4f}'.format(beta))
end = time.time()
for batch_idx, (data, _) in enumerate(train_loader):
if args.cuda:
data = data.cuda()
if args.dynamic_binarization:
data = torch.bernoulli(data)
data = data.view(-1, *args.input_size)
opt.zero_grad()
x_mean, z_mu, z_var, ldj, z0, zk = model(data)
loss, rec, kl, bpd = calculate_loss(x_mean, data, z_mu, z_var, z0, zk, ldj, args, beta=beta)
loss.backward()
train_loss[batch_idx] = loss.item()
train_bpd[batch_idx] = bpd
opt.step()
if 'MMAF' in args.flow:
if args.Lipshitz > 0:
model.forceLipshitz(args.Lipshitz)
rec = rec.item()
kl = kl.item()
num_data += len(data)
batch_time = time.time() - end
end = time.time()
if batch_idx % args.log_interval == 0:
if args.input_type == 'binary':
perc = 100. * batch_idx / len(train_loader)
log_msg = (
'Epoch {:3d} [{:5d}/{:5d} ({:2.0f}%)] | Time {:.3f} | Loss {:11.6f} | '
'Rec {:11.6f} | KL {:11.6f}'.format(
epoch, num_data, len(train_loader.sampler), perc, batch_time, loss.item(), rec, kl
)
)
else:
perc = 100. * batch_idx / len(train_loader)
tmp = 'Epoch {:3d} [{:5d}/{:5d} ({:2.0f}%)] | Time {:.3f} | Loss {:11.6f} | Bits/dim {:8.6f}'
log_msg = tmp.format(epoch, num_data, len(train_loader.sampler), perc, batch_time, loss.item(),
bpd), '\trec: {:11.3f}\tkl: {:11.6f}'.format(rec, kl)
log_msg = "".join(log_msg)
if 'cnf' in args.flow:
log_msg += ' | NFE Forward {} | NFE Backward {}'.format(f_nfe, b_nfe)
logger.info(log_msg)
if args.input_type == 'binary':
logger.info('====> Epoch: {:3d} Average train loss: {:.4f}'.format(epoch, train_loss.sum() / len(train_loader)))
else:
logger.info(
'====> Epoch: {:3d} Average train loss: {:.4f}, average bpd: {:.4f}'.
format(epoch, train_loss.sum() / len(train_loader), train_bpd.sum() / len(train_loader))
)
return train_loss
def evaluate(data_loader, model, args, logger, testing=False, epoch=0):
model.eval()
if 'MMAF' in args.flow:
prev_steps = model.flow.steps
model.flow.steps = 100
model.flow.model.set_steps_nb(100)
loss = 0.
batch_idx = 0
bpd = 0.
if args.input_type == 'binary':
loss_type = 'elbo'
else:
loss_type = 'bpd'
if testing and 'cnf' in args.flow:
override_divergence_fn(model, "brute_force")
for data, _ in data_loader:
batch_idx += 1
if args.cuda:
data = data.cuda()
with torch.no_grad():
data = data.view(-1, *args.input_size)
x_mean, z_mu, z_var, ldj, z0, zk = model(data)
batch_loss, rec, kl, batch_bpd = calculate_loss(x_mean, data, z_mu, z_var, z0, zk, ldj, args)
bpd += batch_bpd
loss += batch_loss.item()
# PRINT RECONSTRUCTIONS
if batch_idx == 1 and testing is False:
plot_reconstructions(data, x_mean, batch_loss, loss_type, epoch, args)
loss /= len(data_loader)
bpd /= len(data_loader)
if testing:
logger.info('====> Test set loss: {:.4f}'.format(loss))
# Compute log-likelihood
if testing and not ("cnf" in args.flow): # don't compute log-likelihood for cnf models
with torch.no_grad():
test_data = data_loader.dataset.tensors[0]
if args.cuda:
test_data = test_data.cuda()
logger.info('Computing log-likelihood on test set')
model.eval()
if args.dataset == 'caltech':
log_likelihood, nll_bpd = calculate_likelihood(test_data, model, args, logger, S=2000, MB=500)
else:
log_likelihood, nll_bpd = calculate_likelihood(test_data, model, args, logger, S=5000, MB=500)
else:
log_likelihood = None
nll_bpd = None
if args.input_type in ['multinomial']:
bpd = loss / (np.prod(args.input_size) * np.log(2.))
if testing and not ("cnf" in args.flow):
logger.info('====> Test set log-likelihood: {:.4f}'.format(log_likelihood))
if args.input_type != 'binary':
logger.info('====> Test set bpd (elbo): {:.4f}'.format(bpd))
logger.info(
'====> Test set bpd (log-likelihood): {:.4f}'.
format(log_likelihood / (np.prod(args.input_size) * np.log(2.)))
)
if 'MMAF' in args.flow:
model.flow.steps = prev_steps
if not testing:
return loss, bpd
else:
return log_likelihood, nll_bpd
| 5,533 | 30.443182 | 120 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/models/vae_lib/optimization/__init__.py
| 0 | 0 | 0 |
py
|
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/models/vae_lib/utils/distributions.py
|
from __future__ import print_function
import torch
import torch.utils.data
import math
MIN_EPSILON = 1e-5
MAX_EPSILON = 1. - 1e-5
PI = torch.FloatTensor([math.pi])
if torch.cuda.is_available():
PI = PI.cuda()
# N(x | mu, var) = 1/sqrt{2pi var} exp[-1/(2 var) (x-mean)(x-mean)]
# log N(x| mu, var) = -log sqrt(2pi) -0.5 log var - 0.5 (x-mean)(x-mean)/var
def log_normal_diag(x, mean, log_var, average=False, reduce=True, dim=None):
log_norm = -0.5 * (log_var + (x - mean) * (x - mean) * log_var.exp().reciprocal())
if reduce:
if average:
return torch.mean(log_norm, dim)
else:
return torch.sum(log_norm, dim)
else:
return log_norm
def log_normal_normalized(x, mean, log_var, average=False, reduce=True, dim=None):
log_norm = -(x - mean) * (x - mean)
log_norm *= torch.reciprocal(2. * log_var.exp())
log_norm += -0.5 * log_var
log_norm += -0.5 * torch.log(2. * PI)
if reduce:
if average:
return torch.mean(log_norm, dim)
else:
return torch.sum(log_norm, dim)
else:
return log_norm
def log_normal_standard(x, average=False, reduce=True, dim=None):
log_norm = -0.5 * x * x
if reduce:
if average:
return torch.mean(log_norm, dim)
else:
return torch.sum(log_norm, dim)
else:
return log_norm
def log_bernoulli(x, mean, average=False, reduce=True, dim=None):
probs = torch.clamp(mean, min=MIN_EPSILON, max=MAX_EPSILON)
log_bern = x * torch.log(probs) + (1. - x) * torch.log(1. - probs)
if reduce:
if average:
return torch.mean(log_bern, dim)
else:
return torch.sum(log_bern, dim)
else:
return log_bern
| 1,768 | 25.80303 | 86 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/models/vae_lib/utils/plotting.py
|
from __future__ import division
from __future__ import print_function
import numpy as np
import matplotlib
# noninteractive background
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def plot_training_curve(train_loss, validation_loss, fname='training_curve.pdf', labels=None):
"""
Plots train_loss and validation loss as a function of optimization iteration
:param train_loss: np.array of train_loss (1D or 2D)
:param validation_loss: np.array of validation loss (1D or 2D)
:param fname: output file name
:param labels: if train_loss and validation loss are 2D, then labels indicate which variable is varied
accross training curves.
:return: None
"""
plt.close()
matplotlib.rcParams.update({'font.size': 14})
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
if len(train_loss.shape) == 1:
# Single training curve
fig, ax = plt.subplots(nrows=1, ncols=1)
figsize = (6, 4)
if train_loss.shape[0] == validation_loss.shape[0]:
# validation score evaluated every iteration
x = np.arange(train_loss.shape[0])
ax.plot(x, train_loss, '-', lw=2., color='black', label='train')
ax.plot(x, validation_loss, '-', lw=2., color='blue', label='val')
elif train_loss.shape[0] % validation_loss.shape[0] == 0:
# validation score evaluated every epoch
x = np.arange(train_loss.shape[0])
ax.plot(x, train_loss, '-', lw=2., color='black', label='train')
x = np.arange(validation_loss.shape[0])
x = (x + 1) * train_loss.shape[0] / validation_loss.shape[0]
ax.plot(x, validation_loss, '-', lw=2., color='blue', label='val')
else:
raise ValueError('Length of train_loss and validation_loss must be equal or divisible')
miny = np.minimum(validation_loss.min(), train_loss.min()) - 20.
maxy = np.maximum(validation_loss.max(), train_loss.max()) + 30.
ax.set_ylim([miny, maxy])
elif len(train_loss.shape) == 2:
# Multiple training curves
cmap = plt.cm.brg
cNorm = matplotlib.colors.Normalize(vmin=0, vmax=train_loss.shape[0])
scalarMap = matplotlib.cm.ScalarMappable(norm=cNorm, cmap=cmap)
fig, ax = plt.subplots(nrows=1, ncols=1)
figsize = (6, 4)
if labels is None:
labels = ['%d' % i for i in range(train_loss.shape[0])]
if train_loss.shape[1] == validation_loss.shape[1]:
for i in range(train_loss.shape[0]):
color_val = scalarMap.to_rgba(i)
# validation score evaluated every iteration
x = np.arange(train_loss.shape[0])
ax.plot(x, train_loss[i], '-', lw=2., color=color_val, label=labels[i])
ax.plot(x, validation_loss[i], '--', lw=2., color=color_val)
elif train_loss.shape[1] % validation_loss.shape[1] == 0:
for i in range(train_loss.shape[0]):
color_val = scalarMap.to_rgba(i)
# validation score evaluated every epoch
x = np.arange(train_loss.shape[1])
ax.plot(x, train_loss[i], '-', lw=2., color=color_val, label=labels[i])
x = np.arange(validation_loss.shape[1])
x = (x + 1) * train_loss.shape[1] / validation_loss.shape[1]
ax.plot(x, validation_loss[i], '-', lw=2., color=color_val)
miny = np.minimum(validation_loss.min(), train_loss.min()) - 20.
maxy = np.maximum(validation_loss.max(), train_loss.max()) + 30.
ax.set_ylim([miny, maxy])
else:
raise ValueError('train_loss and validation_loss must be 1D or 2D arrays')
ax.set_xlabel('iteration')
ax.set_ylabel('loss')
plt.title('Training and validation loss')
fig.set_size_inches(figsize)
fig.subplots_adjust(hspace=0.1)
plt.savefig(fname, bbox_inches='tight')
plt.close()
| 4,021 | 37.304762 | 106 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/models/vae_lib/utils/log_likelihood.py
|
from __future__ import print_function
import time
import numpy as np
from scipy.misc import logsumexp
from ...vae_lib.optimization.loss import calculate_loss_array
def calculate_likelihood(X, model, args, logger, S=5000, MB=500):
# set auxiliary variables for number of training and test sets
N_test = X.size(0)
X = X.view(-1, *args.input_size)
likelihood_test = []
if S <= MB:
R = 1
else:
R = S // MB
S = MB
end = time.time()
for j in range(N_test):
x_single = X[j].unsqueeze(0)
a = []
for r in range(0, R):
# Repeat it for all training points
x = x_single.expand(S, *x_single.size()[1:]).contiguous()
x_mean, z_mu, z_var, ldj, z0, zk = model(x)
a_tmp = calculate_loss_array(x_mean, x, z_mu, z_var, z0, zk, ldj, args)
a.append(-a_tmp.cpu().data.numpy())
# calculate max
a = np.asarray(a)
a = np.reshape(a, (a.shape[0] * a.shape[1], 1))
likelihood_x = logsumexp(a)
likelihood_test.append(likelihood_x - np.log(len(a)))
if j % 1 == 0:
logger.info('Progress: {:.2f}% | Time: {:.4f}'.format(j / (1. * N_test) * 100, time.time() - end))
end = time.time()
likelihood_test = np.array(likelihood_test)
nll = -np.mean(likelihood_test)
if args.input_type == 'multinomial':
bpd = nll / (np.prod(args.input_size) * np.log(2.))
elif args.input_type == 'binary':
bpd = 0.
else:
raise ValueError('invalid input type!')
return nll, bpd
| 1,595 | 25.163934 | 110 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/models/vae_lib/utils/load_data.py
|
from __future__ import print_function
import torch
import torch.utils.data as data_utils
import pickle
from scipy.io import loadmat
import numpy as np
import os
def load_static_mnist(args, **kwargs):
"""
Dataloading function for static mnist. Outputs image data in vectorized form: each image is a vector of size 784
"""
args.dynamic_binarization = False
args.input_type = 'binary'
args.input_size = [1, 28, 28]
# start processing
def lines_to_np_array(lines):
return np.array([[int(i) for i in line.split()] for line in lines])
with open(os.path.join('datasets', 'data', 'binarized_mnist_train.amat')) as f:
lines = f.readlines()
x_train = lines_to_np_array(lines).astype('float32')
with open(os.path.join('datasets', 'data', 'binarized_mnist_valid.amat')) as f:
lines = f.readlines()
x_val = lines_to_np_array(lines).astype('float32')
with open(os.path.join('datasets', 'data', 'binarized_mnist_test.amat')) as f:
lines = f.readlines()
x_test = lines_to_np_array(lines).astype('float32')
# shuffle train data
np.random.shuffle(x_train)
# idle y's
y_train = np.zeros((x_train.shape[0], 1))
y_val = np.zeros((x_val.shape[0], 1))
y_test = np.zeros((x_test.shape[0], 1))
# pytorch data loader
train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train))
train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs)
validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val))
val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs)
test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test))
test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader, args
def load_freyfaces(args, **kwargs):
# set args
args.input_size = [1, 28, 20]
args.input_type = 'multinomial'
args.dynamic_binarization = False
TRAIN = 1565
VAL = 200
TEST = 200
# start processing
with open('data/Freyfaces/freyfaces.pkl', 'rb') as f:
data = pickle.load(f, encoding="latin1")[0]
data = data / 255.
# NOTE: shuffling is done before splitting into train and test set, so test set is different for every run!
# shuffle data:
np.random.seed(args.freyseed)
np.random.shuffle(data)
# train images
x_train = data[0:TRAIN].reshape(-1, 28 * 20)
# validation images
x_val = data[TRAIN:(TRAIN + VAL)].reshape(-1, 28 * 20)
# test images
x_test = data[(TRAIN + VAL):(TRAIN + VAL + TEST)].reshape(-1, 28 * 20)
# idle y's
y_train = np.zeros((x_train.shape[0], 1))
y_val = np.zeros((x_val.shape[0], 1))
y_test = np.zeros((x_test.shape[0], 1))
# pytorch data loader
train = data_utils.TensorDataset(torch.from_numpy(x_train).float(), torch.from_numpy(y_train))
train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs)
validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val))
val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs)
test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test))
test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader, args
def load_omniglot(args, **kwargs):
n_validation = 1345
# set args
args.input_size = [1, 28, 28]
args.input_type = 'binary'
args.dynamic_binarization = True
# start processing
def reshape_data(data):
return data.reshape((-1, 28, 28)).reshape((-1, 28 * 28), order='F')
omni_raw = loadmat(os.path.join('data', 'OMNIGLOT', 'chardata.mat'))
# train and test data
train_data = reshape_data(omni_raw['data'].T.astype('float32'))
x_test = reshape_data(omni_raw['testdata'].T.astype('float32'))
# shuffle train data
np.random.shuffle(train_data)
# set train and validation data
x_train = train_data[:-n_validation]
x_val = train_data[-n_validation:]
# binarize
if args.dynamic_binarization:
args.input_type = 'binary'
np.random.seed(777)
x_val = np.random.binomial(1, x_val)
x_test = np.random.binomial(1, x_test)
else:
args.input_type = 'gray'
# idle y's
y_train = np.zeros((x_train.shape[0], 1))
y_val = np.zeros((x_val.shape[0], 1))
y_test = np.zeros((x_test.shape[0], 1))
# pytorch data loader
train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train))
train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs)
validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val))
val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs)
test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test))
test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader, args
def load_caltech101silhouettes(args, **kwargs):
# set args
args.input_size = [1, 28, 28]
args.input_type = 'binary'
args.dynamic_binarization = False
# start processing
def reshape_data(data):
return data.reshape((-1, 28, 28)).reshape((-1, 28 * 28), order='F')
caltech_raw = loadmat(os.path.join('data', 'Caltech101Silhouettes', 'caltech101_silhouettes_28_split1.mat'))
# train, validation and test data
x_train = 1. - reshape_data(caltech_raw['train_data'].astype('float32'))
np.random.shuffle(x_train)
x_val = 1. - reshape_data(caltech_raw['val_data'].astype('float32'))
np.random.shuffle(x_val)
x_test = 1. - reshape_data(caltech_raw['test_data'].astype('float32'))
y_train = caltech_raw['train_labels']
y_val = caltech_raw['val_labels']
y_test = caltech_raw['test_labels']
# pytorch data loader
train = data_utils.TensorDataset(torch.from_numpy(x_train), torch.from_numpy(y_train))
train_loader = data_utils.DataLoader(train, batch_size=args.batch_size, shuffle=True, **kwargs)
validation = data_utils.TensorDataset(torch.from_numpy(x_val).float(), torch.from_numpy(y_val))
val_loader = data_utils.DataLoader(validation, batch_size=args.batch_size, shuffle=False, **kwargs)
test = data_utils.TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(y_test))
test_loader = data_utils.DataLoader(test, batch_size=args.batch_size, shuffle=False, **kwargs)
return train_loader, val_loader, test_loader, args
def load_dataset(args, **kwargs):
if args.dataset == 'mnist':
train_loader, val_loader, test_loader, args = load_static_mnist(args, **kwargs)
elif args.dataset == 'caltech':
train_loader, val_loader, test_loader, args = load_caltech101silhouettes(args, **kwargs)
elif args.dataset == 'freyfaces':
train_loader, val_loader, test_loader, args = load_freyfaces(args, **kwargs)
elif args.dataset == 'omniglot':
train_loader, val_loader, test_loader, args = load_omniglot(args, **kwargs)
else:
raise Exception('Wrong name of the dataset!')
return train_loader, val_loader, test_loader, args
| 7,580 | 35.800971 | 116 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/models/vae_lib/utils/__init__.py
| 0 | 0 | 0 |
py
|
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/models/vae_lib/utils/visual_evaluation.py
|
from __future__ import print_function
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
def plot_reconstructions(data, recon_mean, loss, loss_type, epoch, args):
if args.input_type == 'multinomial':
# data is already between 0 and 1
num_classes = 256
# Find largest class logit
tmp = recon_mean.view(-1, num_classes, *args.input_size).max(dim=1)[1]
recon_mean = tmp.float() / (num_classes - 1.)
if epoch == 1:
if not os.path.exists(args.snap_dir + 'reconstruction/'):
os.makedirs(args.snap_dir + 'reconstruction/')
# VISUALIZATION: plot real images
plot_images(args, data.data.cpu().numpy()[0:9], args.snap_dir + 'reconstruction/', 'real', size_x=3, size_y=3)
# VISUALIZATION: plot reconstructions
if loss_type == 'bpd':
fname = str(epoch) + '_bpd_%5.3f' % loss
elif loss_type == 'elbo':
fname = str(epoch) + '_elbo_%6.4f' % loss
plot_images(args, recon_mean.data.cpu().numpy()[0:9], args.snap_dir + 'reconstruction/', fname, size_x=3, size_y=3)
def plot_images(args, x_sample, dir, file_name, size_x=3, size_y=3):
fig = plt.figure(figsize=(size_x, size_y))
# fig = plt.figure(1)
gs = gridspec.GridSpec(size_x, size_y)
gs.update(wspace=0.05, hspace=0.05)
for i, sample in enumerate(x_sample):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
sample = sample.reshape((args.input_size[0], args.input_size[1], args.input_size[2]))
sample = sample.swapaxes(0, 2)
sample = sample.swapaxes(0, 1)
if (args.input_type == 'binary') or (args.input_type in ['multinomial'] and args.input_size[0] == 1):
sample = sample[:, :, 0]
plt.imshow(sample, cmap='gray', vmin=0, vmax=1)
else:
plt.imshow(sample)
plt.savefig(dir + file_name + '.png', bbox_inches='tight')
plt.close(fig)
| 2,063 | 37.222222 | 119 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/models/UMNN/spectral_normalization.py
|
# Code from https://github.com/christiancosgrove/pytorch-spectral-normalization-gan/blob/master/spectral_normalization.py
import torch
from torch import nn
from torch.nn import Parameter
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
def joint_gaussian(n_samp=1000):
x2 = torch.distributions.Normal(0., 4.).sample_n(n_samp)
x1 = torch.distributions.Normal(0., 1.).sample_n(n_samp) + x2**2/4
return torch.cat((x1.unsqueeze(1), x2.unsqueeze(1)), 1)
class SpectralNorm(nn.Module):
def __init__(self, module, name='weight', power_iterations=1, factor=.8):
super(SpectralNorm, self).__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
self.factor = factor
if not self._made_params():
self._make_params()
#self._update_u_v()
def _update_u_v(self):
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2normalize(torch.mv(torch.t(w.view(height,-1).data), u.data))
u.data = l2normalize(torch.mv(w.view(height,-1).data, v.data))
# sigma = torch.dot(u.data, torch.mv(w.view(height,-1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
setattr(self.module, self.name, self.factor*w / sigma.expand_as(w))
def _made_params(self):
try:
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
return True
except AttributeError:
return False
def _make_params(self):
w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
u.data = l2normalize(u.data)
v.data = l2normalize(v.data)
w_bar = Parameter(w.data)
del self.module._parameters[self.name]
self.module.register_parameter(self.name + "_u", u)
self.module.register_parameter(self.name + "_v", v)
self.module.register_parameter(self.name + "_bar", w_bar)
def forward(self, *args):
#self._update_u_v()
return self.module.forward(*args)
| 2,536 | 32.381579 | 121 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/models/UMNN/made.py
|
"""
Implements Masked AutoEncoder for Density Estimation, by Germain et al. 2015
Re-implementation by Andrej Karpathy based on https://arxiv.org/abs/1502.03509
Modified by Antoine Wehenkel
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
# ------------------------------------------------------------------------------
class MaskedLinear(nn.Linear):
""" same as Linear except has a configurable mask on the weights """
def __init__(self, in_features, out_features, bias=True):
super().__init__(in_features, out_features, bias)
self.register_buffer('mask', torch.ones(out_features, in_features))
def set_mask(self, mask):
self.mask.data.copy_(torch.from_numpy(mask.astype(np.uint8).T))
def forward(self, input):
return F.linear(input, self.mask * self.weight, self.bias)
class MADE(nn.Module):
def __init__(self, nin, hidden_sizes, nout, num_masks=1, natural_ordering=False, random=False, device="cpu"):
"""
nin: integer; number of inputs
hidden sizes: a list of integers; number of units in hidden layers
nout: integer; number of outputs, which usually collectively parameterize some kind of 1D distribution
note: if nout is e.g. 2x larger than nin (perhaps the mean and std), then the first nin
will be all the means and the second nin will be stds. i.e. output dimensions depend on the
same input dimensions in "chunks" and should be carefully decoded downstream appropriately.
the output of running the tests for this file makes this a bit more clear with examples.
num_masks: can be used to train ensemble over orderings/connections
natural_ordering: force natural ordering of dimensions, don't use random permutations
"""
super().__init__()
self.random = random
self.nin = nin
self.nout = nout
self.device = device
self.pi = torch.tensor(math.pi).to(self.device)
self.hidden_sizes = hidden_sizes
assert self.nout % self.nin == 0, "nout must be integer multiple of nin"
# define a simple MLP neural net
self.net = []
hs = [nin] + hidden_sizes + [nout]
for h0,h1 in zip(hs, hs[1:]):
self.net.extend([
MaskedLinear(h0, h1),
nn.ReLU(),
])
self.net.pop() # pop the last ReLU for the output layer
self.net = nn.Sequential(*self.net).to(device)
# seeds for orders/connectivities of the model ensemble
self.natural_ordering = natural_ordering
self.num_masks = num_masks
self.seed = 0 # for cycling through num_masks orderings
self.m = {}
self.update_masks() # builds the initial self.m connectivity
# note, we could also precompute the masks and cache them, but this
# could get memory expensive for large number of masks.
def update_masks(self):
if self.m and self.num_masks == 1: return # only a single seed, skip for efficiency
L = len(self.hidden_sizes)
# fetch the next seed and construct a random stream
rng = np.random.RandomState(self.seed)
self.seed = (self.seed + 1) % self.num_masks
# sample the order of the inputs and the connectivity of all neurons
if self.random:
self.m[-1] = np.arange(self.nin) if self.natural_ordering else rng.permutation(self.nin)
for l in range(L):
self.m[l] = rng.randint(self.m[l-1].min(), self.nin-1, size=self.hidden_sizes[l])
else:
self.m[-1] = np.arange(self.nin)
for l in range(L):
self.m[l] = np.array([self.nin - 1 - (i % self.nin) for i in range(self.hidden_sizes[l])])
# construct the mask matrices
masks = [self.m[l-1][:,None] <= self.m[l][None,:] for l in range(L)]
masks.append(self.m[L-1][:,None] < self.m[-1][None,:])
# handle the case where nout = nin * k, for integer k > 1
if self.nout > self.nin:
k = int(self.nout / self.nin)
# replicate the mask across the other outputs
masks[-1] = np.concatenate([masks[-1]]*k, axis=1)
# set the masks in all MaskedLinear layers
layers = [l for l in self.net.modules() if isinstance(l, MaskedLinear)]
for l,m in zip(layers, masks):
l.set_mask(m)
# map between in_d and order
self.i_map = self.m[-1].copy()
for k in range(len(self.m[-1])):
self.i_map[self.m[-1][k]] = k
def forward(self, x, context=None):
if self.nout == 2:
transf = self.net(x)
mu, sigma = transf[:, :self.nin], transf[:, self.nin:]
z = (x - mu) * torch.exp(-sigma)
return z
return self.net(x)
def compute_ll(self, x):
# Jac and x of MADE
transf = self.net(x)
mu, sigma = transf[:, :self.nin], transf[:, self.nin:]
z = (x - mu) * torch.exp(-sigma)
log_prob_gauss = -.5 * (torch.log(self.pi * 2) + z ** 2).sum(1)
ll = - sigma.sum(1) + log_prob_gauss
return ll, z
def invert(self, z):
if self.nin != self.nout/2:
return None
# We suppose a Gaussian MADE
u = torch.zeros(z.shape)
for d in range(self.nin):
transf = self.forward(u)
mu, sigma = transf[:, self.i_map[d]], transf[:, self.nin + self.i_map[d]]
u[:, self.i_map[d]] = z[:, self.i_map[d]] * torch.exp(sigma) + mu
return u
# ------------------------------------------------------------------------------
class ConditionnalMADE(MADE):
def __init__(self, nin, cond_in, hidden_sizes, nout, num_masks=1, natural_ordering=False, random=False, device="cpu"):
"""
nin: integer; number of inputs
hidden sizes: a list of integers; number of units in hidden layers
nout: integer; number of outputs, which usually collectively parameterize some kind of 1D distribution
note: if nout is e.g. 2x larger than nin (perhaps the mean and std), then the first nin
will be all the means and the second nin will be stds. i.e. output dimensions depend on the
same input dimensions in "chunks" and should be carefully decoded downstream appropriately.
the output of running the tests for this file makes this a bit more clear with examples.
num_masks: can be used to train ensemble over orderings/connections
natural_ordering: force natural ordering of dimensions, don't use random permutations
"""
super().__init__(nin + cond_in, hidden_sizes, nout, num_masks, natural_ordering, random, device)
self.nin_non_cond = nin
self.cond_in = cond_in
def forward(self, x, context):
out = super().forward(torch.cat((context, x), 1))
out = out.contiguous().view(x.shape[0], int(out.shape[1]/self.nin), self.nin)[:, :, self.cond_in:].contiguous().view(x.shape[0], -1)
return out
def computeLL(self, x, context):
# Jac and x of MADE
transf = self.net(torch.cat((context, x), 1))
transf = transf.contiguous().view(x.shape[0], int(transf.shape[1] / self.nin), self.nin)[:, :, self.cond_in:].contiguous().view(x.shape[0], -1)
mu, sigma = transf[:, :self.nin], transf[:, self.nin:]
z = (x - mu) * torch.exp(-sigma)
log_prob_gauss = -.5 * (torch.log(self.pi * 2) + z ** 2).sum(1)
ll = - sigma.sum(1) + log_prob_gauss
return ll, z
def invert(self, z, context):
if self.nin != self.nout / 2:
return None
# We suppose a Gaussian MADE
u = torch.zeros(z.shape)
for d in range(self.nin):
transf = self.net(torch.cat((context, x), 1))
mu, sigma = transf[:, self.i_map[d]], transf[:, self.nin + self.i_map[d]]
u[:, self.i_map[d]] = z[:, self.i_map[d]] * torch.exp(sigma) + mu
return u
if __name__ == '__main__':
from torch.autograd import Variable
# run a quick and dirty test for the autoregressive property
D = 10
rng = np.random.RandomState(14)
x = (rng.rand(1, D) > 0.5).astype(np.float32)
configs = [
(D, [], D, False), # test various hidden sizes
(D, [200], D, False),
(D, [200, 220], D, False),
(D, [200, 220, 230], D, False),
(D, [200, 220], D, True), # natural ordering test
(D, [200, 220], 2*D, True), # test nout > nin
(D, [200, 220], 3*D, False), # test nout > nin
]
for nin, hiddens, nout, natural_ordering in configs:
print("checking nin %d, hiddens %s, nout %d, natural %s" %
(nin, hiddens, nout, natural_ordering))
model = MADE(nin, hiddens, nout, natural_ordering=natural_ordering)
z = torch.randn(1, nin)
model.invert(z)
continue
# run backpropagation for each dimension to compute what other
# dimensions it depends on.
res = []
for k in range(nout):
xtr = Variable(torch.from_numpy(x), requires_grad=True)
xtrhat = model(xtr)
loss = xtrhat[0,k]
loss.backward()
depends = (xtr.grad[0].numpy() != 0).astype(np.uint8)
depends_ix = list(np.where(depends)[0])
isok = k % nin not in depends_ix
res.append((len(depends_ix), k, depends_ix, isok))
# pretty print the dependencies
res.sort()
for nl, k, ix, isok in res:
print("output %2d depends on inputs: %30s : %s" % (k, ix, "OK" if isok else "NOTOK"))
| 9,945 | 40.26971 | 151 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/models/UMNN/ParallelNeuralIntegral.py
|
import torch
import numpy as np
import math
def _flatten(sequence):
flat = [p.contiguous().view(-1) for p in sequence]
return torch.cat(flat) if len(flat) > 0 else torch.tensor([])
def compute_cc_weights(nb_steps):
lam = np.arange(0, nb_steps + 1, 1).reshape(-1, 1)
lam = np.cos((lam @ lam.T) * math.pi / nb_steps)
lam[:, 0] = .5
lam[:, -1] = .5 * lam[:, -1]
lam = lam * 2 / nb_steps
W = np.arange(0, nb_steps + 1, 1).reshape(-1, 1)
W[np.arange(1, nb_steps + 1, 2)] = 0
W = 2 / (1 - W ** 2)
W[0] = 1
W[np.arange(1, nb_steps + 1, 2)] = 0
cc_weights = torch.tensor(lam.T @ W).float()
steps = torch.tensor(np.cos(np.arange(0, nb_steps + 1, 1).reshape(-1, 1) * math.pi / nb_steps)).float()
return cc_weights, steps
tensor_cache = {}
def integrate(x0, nb_steps, step_sizes, integrand, h, compute_grad=False, x_tot=None):
#Clenshaw-Curtis Quadrature Method
if tensor_cache.get(nb_steps) is None:
cc_weights, steps = compute_cc_weights(nb_steps)
device = x0.get_device() if x0.is_cuda else "cpu"
cc_weights, steps = cc_weights.to(device), steps.to(device)
tensor_cache[nb_steps] = (cc_weights, steps)
cc_weights, steps = tensor_cache[nb_steps]
xT = x0 + nb_steps*step_sizes
if not compute_grad:
x0_t = x0.unsqueeze(1).expand(-1, nb_steps + 1, -1)
xT_t = xT.unsqueeze(1).expand(-1, nb_steps + 1, -1)
h_steps = h.unsqueeze(1).expand(-1, nb_steps + 1, -1)
steps_t = steps.unsqueeze(0).expand(x0_t.shape[0], -1, x0_t.shape[2])
X_steps = x0_t + (xT_t-x0_t)*(steps_t + 1)/2
X_steps = X_steps.contiguous().view(-1, x0_t.shape[2])
h_steps = h_steps.contiguous().view(-1, h.shape[1])
dzs = integrand(X_steps, h_steps)
dzs = dzs.view(xT_t.shape[0], nb_steps+1, -1)
dzs = dzs*cc_weights.unsqueeze(0).expand(dzs.shape)
z_est = dzs.sum(1)
return z_est*(xT - x0)/2
else:
x0_t = x0.unsqueeze(1).expand(-1, nb_steps + 1, -1)
xT_t = xT.unsqueeze(1).expand(-1, nb_steps + 1, -1)
x_tot = x_tot * (xT - x0) / 2
x_tot_steps = x_tot.unsqueeze(1).expand(-1, nb_steps + 1, -1) * cc_weights.unsqueeze(0).expand(x_tot.shape[0], -1, x_tot.shape[1])
h_steps = h.unsqueeze(1).expand(-1, nb_steps + 1, -1)
steps_t = steps.unsqueeze(0).expand(x0_t.shape[0], -1, x0_t.shape[2])
X_steps = x0_t + (xT_t - x0_t) * (steps_t + 1) / 2
X_steps = X_steps.contiguous().view(-1, x0_t.shape[2])
h_steps = h_steps.contiguous().view(-1, h.shape[1])
x_tot_steps = x_tot_steps.contiguous().view(-1, x_tot.shape[1])
g_param, g_h = computeIntegrand(X_steps, h_steps, integrand, x_tot_steps, nb_steps+1)
return g_param, g_h
def computeIntegrand(x, h, integrand, x_tot, nb_steps):
h.requires_grad_(True)
with torch.enable_grad():
f = integrand.forward(x, h)
g_param = _flatten(torch.autograd.grad(f, integrand.parameters(), x_tot, create_graph=True, retain_graph=True))
g_h = _flatten(torch.autograd.grad(f, h, x_tot))
return g_param, g_h.view(int(x.shape[0]/nb_steps), nb_steps, -1).sum(1)
class ParallelNeuralIntegral(torch.autograd.Function):
@staticmethod
def forward(ctx, x0, x, integrand, flat_params, h, nb_steps=20):
with torch.no_grad():
x_tot = integrate(x0, nb_steps, (x - x0)/nb_steps, integrand, h, False)
# Save for backward
ctx.integrand = integrand
ctx.nb_steps = nb_steps
ctx.save_for_backward(x0.clone(), x.clone(), h)
return x_tot
@staticmethod
def backward(ctx, grad_output):
x0, x, h = ctx.saved_tensors
integrand = ctx.integrand
nb_steps = ctx.nb_steps
integrand_grad, h_grad = integrate(x0, nb_steps, x/nb_steps, integrand, h, True, grad_output)
x_grad = integrand(x, h)
x0_grad = integrand(x0, h)
# Leibniz formula
return -x0_grad*grad_output, x_grad*grad_output, None, integrand_grad, h_grad.view(h.shape), None
| 4,099 | 38.423077 | 138 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/models/UMNN/MonotonicNN.py
|
import torch
import torch.nn as nn
from .NeuralIntegral import NeuralIntegral
from .ParallelNeuralIntegral import ParallelNeuralIntegral
def _flatten(sequence):
flat = [p.contiguous().view(-1) for p in sequence]
return torch.cat(flat) if len(flat) > 0 else torch.tensor([])
class IntegrandNN(nn.Module):
def __init__(self, in_d, hidden_layers):
super(IntegrandNN, self).__init__()
self.net = []
hs = [in_d] + hidden_layers + [1]
for h0, h1 in zip(hs, hs[1:]):
self.net.extend([
nn.Linear(h0, h1),
nn.ReLU(),
])
self.net.pop() # pop the last ReLU for the output layer
self.net.append(nn.ELU())
self.net = nn.Sequential(*self.net)
def forward(self, x, h):
return self.net(torch.cat((x, h), 1)) + 1.
class MonotonicNN(nn.Module):
def __init__(self, in_d, hidden_layers, nb_steps=50, dev="cpu"):
super(MonotonicNN, self).__init__()
self.integrand = IntegrandNN(in_d, hidden_layers)
self.net = []
hs = [in_d-1] + hidden_layers + [2]
for h0, h1 in zip(hs, hs[1:]):
self.net.extend([
nn.Linear(h0, h1),
nn.ReLU(),
])
self.net.pop() # pop the last ReLU for the output layer
# It will output the scaling and offset factors.
self.net = nn.Sequential(*self.net)
self.device = dev
self.nb_steps = nb_steps
'''
The forward procedure takes as input x which is the variable for which the integration must be made, h is just other conditionning variables.
'''
def forward(self, x, h):
x0 = torch.zeros(x.shape).to(self.device)
out = self.net(h)
offset = out[:, [0]]
scaling = torch.exp(out[:, [1]])
return scaling*ParallelNeuralIntegral.apply(x0, x, self.integrand, _flatten(self.integrand.parameters()), h, self.nb_steps) + offset
| 1,957 | 34.6 | 145 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/models/UMNN/__init__.py
|
from .UMNNMAFFlow import UMNNMAFFlow
from .MonotonicNN import MonotonicNN, IntegrandNN
from .UMNNMAF import IntegrandNetwork, UMNNMAF
from .made import MADE
from .NeuralIntegral import NeuralIntegral
from .ParallelNeuralIntegral import ParallelNeuralIntegral
| 258 | 42.166667 | 58 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/models/UMNN/NeuralIntegral.py
|
import torch
import numpy as np
import math
def _flatten(sequence):
flat = [p.contiguous().view(-1) for p in sequence]
return torch.cat(flat) if len(flat) > 0 else torch.tensor([])
def compute_cc_weights(nb_steps):
lam = np.arange(0, nb_steps + 1, 1).reshape(-1, 1)
lam = np.cos((lam @ lam.T) * math.pi / nb_steps)
lam[:, 0] = .5
lam[:, -1] = .5 * lam[:, -1]
lam = lam * 2 / nb_steps
W = np.arange(0, nb_steps + 1, 1).reshape(-1, 1)
W[np.arange(1, nb_steps + 1, 2)] = 0
W = 2 / (1 - W ** 2)
W[0] = 1
W[np.arange(1, nb_steps + 1, 2)] = 0
cc_weights = torch.tensor(lam.T @ W).float()
steps = torch.tensor(np.cos(np.arange(0, nb_steps + 1, 1).reshape(-1, 1) * math.pi / nb_steps)).float()
return cc_weights, steps
def integrate(x0, nb_steps, step_sizes, integrand, h, compute_grad=False, x_tot=None):
#Clenshaw-Curtis Quadrature Method
cc_weights, steps = compute_cc_weights(nb_steps)
device = x0.get_device() if x0.is_cuda else "cpu"
cc_weights, steps = cc_weights.to(device), steps.to(device)
if compute_grad:
g_param = 0.
g_h = 0.
else:
z = 0.
xT = x0 + nb_steps*step_sizes
for i in range(nb_steps + 1):
x = (x0 + (xT - x0)*(steps[i] + 1)/2)
if compute_grad:
dg_param, dg_h = computeIntegrand(x, h, integrand, x_tot*(xT - x0)/2)
g_param += cc_weights[i]*dg_param
g_h += cc_weights[i]*dg_h
else:
dz = integrand(x, h)
z = z + cc_weights[i]*dz
if compute_grad:
return g_param, g_h
return z*(xT - x0)/2
def computeIntegrand(x, h, integrand, x_tot):
with torch.enable_grad():
f = integrand.forward(x, h)
g_param = _flatten(torch.autograd.grad(f, integrand.parameters(), x_tot, create_graph=True, retain_graph=True))
g_h = _flatten(torch.autograd.grad(f, h, x_tot))
return g_param, g_h
class NeuralIntegral(torch.autograd.Function):
@staticmethod
def forward(ctx, x0, x, integrand, flat_params, h, nb_steps=20):
with torch.no_grad():
x_tot = integrate(x0, nb_steps, (x - x0)/nb_steps, integrand, h, False)
# Save for backward
ctx.integrand = integrand
ctx.nb_steps = nb_steps
ctx.save_for_backward(x0.clone(), x.clone(), h)
return x_tot
@staticmethod
def backward(ctx, grad_output):
x0, x, h = ctx.saved_tensors
integrand = ctx.integrand
nb_steps = ctx.nb_steps
integrand_grad, h_grad = integrate(x0, nb_steps, x/nb_steps, integrand, h, True, grad_output)
x_grad = integrand(x, h)
x0_grad = integrand(x0, h)
# Leibniz formula
return -x0_grad*grad_output, x_grad*grad_output, None, integrand_grad, h_grad.view(h.shape), None
| 2,840 | 31.284091 | 119 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/models/UMNN/UMNNMAF.py
|
import torch
import torch.nn as nn
from .NeuralIntegral import NeuralIntegral
from .ParallelNeuralIntegral import ParallelNeuralIntegral
import numpy as np
import math
from .made import MADE, ConditionnalMADE
class ELUPlus(nn.Module):
def __init__(self):
super().__init__()
self.elu = nn.ELU()
def forward(self, x):
return self.elu(x) + 1.
dict_act_func = {"Sigmoid": nn.Sigmoid(), "ELU": ELUPlus()}
def _flatten(sequence):
flat = [p.contiguous().view(-1) for p in sequence]
return torch.cat(flat) if len(flat) > 0 else torch.tensor([])
def compute_lipschitz_linear(W, nb_iter=10):
x = torch.randn(W.shape[1], 1).to(W.device)
for i in range(nb_iter):
x_prev = x
x = W.transpose(0, 1) @ (W @ x_prev)
x = x/torch.norm(x)
lam = (torch.norm(W.transpose(0, 1) @ (W @ x))/torch.norm(x))**.5
return lam
class UMNNMAF(nn.Module):
def __init__(self, net, input_size, nb_steps=100, device="cpu", solver="CC"):
super().__init__()
self.net = net.to(device)
self.device = device
self.input_size = input_size
self.nb_steps = nb_steps
self.cc_weights = None
self.steps = None
self.solver = solver
self.register_buffer("pi", torch.tensor(math.pi))
# Scaling could be changed to be an autoregressive network output
self.scaling = nn.Parameter(torch.zeros(input_size, device=self.device), requires_grad=False)
def to(self, device):
self.device = device
super().to(device)
return self
def forward(self, x, method=None, x0=None, context=None):
x0 = x0.to(x.device) if x0 is not None else torch.zeros(x.shape).to(x.device)
xT = x
h = self.net.make_embeding(xT, context)
z0 = h.view(h.shape[0], -1, x.shape[1])[:, 0, :]
# s is a scaling factor.
s = torch.exp(self.scaling.unsqueeze(0).expand(x.shape[0], -1))
if self.solver == "CC":
z = NeuralIntegral.apply(x0, x, self.net.parallel_nets, _flatten(self.net.parallel_nets.parameters()),
h, self.nb_steps) + z0
elif self.solver == "CCParallel":
z = ParallelNeuralIntegral.apply(x0, x, self.net.parallel_nets, _flatten(self.net.parallel_nets.parameters()),
h, self.nb_steps) + z0
else:
return None
return s*z
def compute_cc_weights(self, nb_steps):
lam = np.arange(0, nb_steps + 1, 1).reshape(-1, 1)
lam = np.cos((lam @ lam.T)*math.pi/nb_steps)
lam[:, 0] = .5
lam[:, -1] = .5*lam[:, -1]
lam = lam*2/nb_steps
W = np.arange(0, nb_steps + 1, 1).reshape(-1, 1)
W[np.arange(1, nb_steps + 1, 2)] = 0
W = 2/(1 - W**2)
W[0] = 1
W[np.arange(1, nb_steps + 1, 2)] = 0
self.cc_weights = torch.tensor(lam.T @ W).float().to(self.device)
self.steps = torch.tensor(np.cos(np.arange(0, nb_steps+1, 1).reshape(-1, 1) * math.pi/nb_steps)).float().to(self.device)
def compute_log_jac(self, x, context=None):
self.net.make_embeding(x, context)
jac = self.net.forward(x)
return torch.log(jac + 1e-10) + self.scaling.unsqueeze(0).expand(x.shape[0], -1)
def compute_log_jac_bis(self, x, context=None):
z = self.forward(x, context=context)
jac = self.net.forward(x)
return z, torch.log(jac + 1e-10) + self.scaling.unsqueeze(0).expand(x.shape[0], -1)
def compute_ll(self, x, context=None):
z = self.forward(x, context=context)
jac = self.net.forward(x)
z.clamp_(-10., 10.)
log_prob_gauss = -.5 * (torch.log(self.pi * 2) + z ** 2).sum(1)
ll = log_prob_gauss + torch.log(jac + 1e-10).sum(1) + self.scaling.unsqueeze(0).expand(x.shape[0], -1).sum(1)
return ll, z
def compute_ll_bis(self, x, context=None):
z = self.forward(x, context=context)
jac = self.net.forward(x)
ll = torch.log(jac + 1e-10) + self.scaling.unsqueeze(0).expand(x.shape[0], -1)
z.clamp_(-10., 10.)
return ll, z
def compute_bpp(self, x, alpha=1e-6, context=None):
d = x.shape[1]
ll, z = self.computeLL(x, context=context)
bpp = -ll/(d*np.log(2)) - np.log2(1 - 2*alpha) + 8 \
+ 1/d * (torch.log2(torch.sigmoid(x)) + torch.log2(1 - torch.sigmoid(x))).sum(1)
z.clamp_(-10., 10.)
return bpp, ll, z
def set_steps_nb(self, nb_steps):
self.nb_steps = nb_steps
def compute_lipschitz(self, nb_iter=10):
return self.net.parallel_nets.computeLipshitz(nb_iter)
def force_lipschitz(self, L=1.5):
self.net.parallel_nets.force_lipschitz(L)
# Kind of dichotomy with a factor 100.
def invert(self, z, iter=10, context=None):
nb_step = 10
step = 1/(nb_step - 1)
x_range = (torch.ones(z.shape[0], nb_step) * torch.arange(0, 1 + step/2, step)).permute(1, 0).to(self.device)
z = z.unsqueeze(0).expand(nb_step, -1, -1)
x = z.clone()
x_inv = torch.zeros(z.shape[1], z.shape[2]).to(self.device)
left, right = -50*torch.ones(z.shape[1], z.shape[2]).to(self.device), torch.ones(z.shape[1], z.shape[2])\
.to(self.device)*50
s = torch.exp(self.scaling.unsqueeze(0).unsqueeze(1).expand(x.shape[0], x.shape[1], -1))
with torch.no_grad():
for j in range(self.input_size):
if j % 100 == 0:
print(j)
# Compute embedding and keep only the one related to x_j
h = self.net.make_embeding(x_inv, context)
offset = h.view(x_inv.shape[0], -1, x_inv.shape[1])[:, 0, [j]]
h_idx = torch.arange(j, h.shape[1], z.shape[2]).to(self.device)
h = h[:, h_idx]
h, offset = h.squeeze(1).unsqueeze(0).expand(nb_step, -1, -1), offset.unsqueeze(0).expand(nb_step, -1, -1)
x0 = torch.zeros(offset.shape).view(-1, 1).to(self.device)
derivative = lambda x, h: self.net.parallel_nets.independant_forward(torch.cat((x, h), 1))
for i in range(iter):
x[:, :, j] = x_range * (right[:, j] - left[:, j]) + left[:, j]
# if i == 0:
# print(right[:, j], left[:, j])
z_est = s[:, :, [j]]*(offset + ParallelNeuralIntegral.apply(x0, x[:, :, j].contiguous().view(-1, 1),
derivative, None,
h.contiguous().view(x0.shape[0], -1),
self.nb_steps).contiguous().view(nb_step, -1, 1))
_, z_pos = torch.abs(z_est[:, :, 0] - z[:, :, j]).min(0)
pos_midle = z_pos + torch.arange(0, z.shape[1]).to(self.device)*nb_step
z_val = z_est[:, :, 0].t().contiguous().view(-1)[pos_midle]
x_flat = x[:, :, j].t().contiguous().view(-1)
mask = (z_val < z[0, :, j]).float()
pos_left = pos_midle - 1
pos_right = (pos_midle + 1) % x_flat.shape[0]
left[:, j] = (mask * x_flat[pos_midle] + (1 - mask) * x_flat[pos_left])
right[:, j] = (mask * x_flat[pos_right] + (1 - mask) * x_flat[pos_midle])
x_inv[:, j] = x_flat[pos_midle]
return x_inv
class IntegrandNetwork(nn.Module):
def __init__(self, nnets, nin, hidden_sizes, nout, act_func='ELU', device="cpu"):
super().__init__()
self.nin = nin
self.nnets = nnets
self.nout = nout
self.hidden_sizes = hidden_sizes
self.device = device
# define a simple MLP neural net
self.net = []
hs = [nin] + hidden_sizes + [nout]
for h0, h1 in zip(hs, hs[1:]):
self.net.extend([
nn.Linear(h0, h1),
nn.LeakyReLU(),
])
self.net.pop() # pop the last ReLU for the output layer
self.net.append(dict_act_func[act_func])
self.net = nn.Sequential(*self.net)
self.masks = torch.eye(nnets).to(device)
def to(self, device):
self.device = device
self.net.to(device)
self.masks.to(device)
return self
def forward(self, x, h):
x = torch.cat((x, h), 1)
nb_batch, size_x = x.shape
x_he = x.view(nb_batch, -1, self.nnets).transpose(1, 2).contiguous().view(nb_batch*self.nnets, -1)
y = self.net(x_he).view(nb_batch, -1)
return y
def independant_forward(self, x):
return self.net(x)
def compute_lipschitz(self, nb_iter=10):
with torch.no_grad():
L = 1
for layer in self.net.modules():
if isinstance(layer, nn.Linear):
L *= compute_lipschitz_linear(layer.weight, nb_iter)
return L
def force_lipschitz(self, L=1.5):
with torch.no_grad():
for layer in self.net.modules():
if isinstance(layer, nn.Linear):
layer.weight /= max(compute_lipschitz_linear(layer.weight, 10)/L, 1)
class EmbeddingNetwork(nn.Module):
def __init__(self, in_d, hiddens_embedding=[50, 50, 50, 50], hiddens_integrand=[50, 50, 50, 50], out_made=1,
cond_in=0, act_func='ELU', device="cpu"):
super().__init__()
self.m_embeding = None
self.device = device
self.in_d = in_d
if cond_in > 0:
self.made = ConditionnalMADE(in_d, cond_in, hiddens_embedding, (in_d + cond_in) * (out_made), num_masks=1,
natural_ordering=True).to(device)
else:
self.made = MADE(in_d, hiddens_embedding, in_d * (out_made), num_masks=1, natural_ordering=True).to(device)
self.parallel_nets = IntegrandNetwork(in_d, 1 + out_made, hiddens_integrand, 1, act_func=act_func, device=device)
def to(self, device):
self.device = device
self.made.to(device)
self.parallel_nets.to(device)
return self
def make_embeding(self, x_made, context=None):
self.m_embeding = self.made.forward(x_made, context)
return self.m_embeding
def forward(self, x_t):
return self.parallel_nets.forward(x_t, self.m_embeding)
| 10,524 | 38.716981 | 129 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/models/UMNN/UMNNMAFFlow.py
|
import torch
import torch.nn as nn
from .UMNNMAF import EmbeddingNetwork, UMNNMAF
import numpy as np
import math
class ListModule(object):
def __init__(self, module, prefix, *args):
"""
The ListModule class is a container for multiple nn.Module.
:nn.Module module: A module to add in the list
:string prefix:
:list of nn.module args: Other modules to add in the list
"""
self.module = module
self.prefix = prefix
self.num_module = 0
for new_module in args:
self.append(new_module)
def append(self, new_module):
if not isinstance(new_module, nn.Module):
raise ValueError('Not a Module')
else:
self.module.add_module(self.prefix + str(self.num_module), new_module)
self.num_module += 1
def __len__(self):
return self.num_module
def __getitem__(self, i):
if i < 0 or i >= self.num_module:
raise IndexError('Out of bound')
return getattr(self.module, self.prefix + str(i))
class UMNNMAFFlow(nn.Module):
def __init__(self, nb_flow=1, nb_in=1, hidden_derivative=[50, 50, 50, 50], hidden_embedding=[50, 50, 50, 50],
embedding_s=20, nb_steps=50, act_func='ELU', solver="CC", cond_in=0, device="cpu"):
"""
UMNNMAFFlow class is a normalizing flow made of UMNNMAF blocks.
:int nb_flow: The number of components in the flow
:int nb_in: The size of the input dimension (data)
:list(int) hidden_derivative: The size of hidden layers in the integrand networks
:list(int) hidden_embedding: The size of hidden layers in the embedding networks
:int embedding_s: The size of the embedding
:int nb_steps: The number of integration steps (0 for random)
:string solver: The solver (CC or CCParallel)
:int cond_in: The size of the conditionning variable
:string device: The device (cpu or gpu)
"""
super().__init__()
self.device = device
self.register_buffer("pi", torch.tensor(math.pi))
self.nets = ListModule(self, "Flow")
for i in range(nb_flow):
auto_net = EmbeddingNetwork(nb_in, hidden_embedding, hidden_derivative, embedding_s, act_func=act_func,
device=device, cond_in=cond_in).to(device)
model = UMNNMAF(auto_net, nb_in, nb_steps, device, solver=solver).to(device)
self.nets.append(model)
def to(self, device):
for net in self.nets:
net.to(device)
self.device = device
super().to(device)
return self
def forward(self, x, context=None):
inv_idx = torch.arange(x.size(1) - 1, -1, -1).long()
for net in self.nets:
x = net.forward(x, context=context)[:, inv_idx]
return x[:, inv_idx]
def invert(self, z, iter=10, context=None):
"""
From image to domain.
:param z: A tensor of noise.
:param iter: The number of iteration (accuracy should be around 25/100**iter
:param context: Conditioning variable
:return: Domain value
"""
inv_idx = torch.arange(z.size(1) - 1, -1, -1).long()
z = z[:, inv_idx]
for net_i in range(len(self.nets)-1, -1, -1):
z = self.nets[net_i].invert(z[:, inv_idx], iter, context=context)
return z
def compute_log_jac(self, x, context=None):
log_jac = 0.
inv_idx = torch.arange(x.size(1) - 1, -1, -1).long()
for net in self.nets:
log_jac += net.compute_log_jac(x, context=context)
x = net.forward(x, context=context)[:, inv_idx]
return log_jac
def compute_log_jac_bis(self, x, context=None):
log_jac = 0.
inv_idx = torch.arange(x.size(1) - 1, -1, -1).long()
for net in self.nets:
x, l = net.compute_log_jac_bis(x, context=context)
x = x[:, inv_idx]
log_jac += l
return x[:, inv_idx], log_jac
def compute_ll(self, x, context=None):
log_jac = 0.
inv_idx = torch.arange(x.size(1) - 1, -1, -1).long()
for net in self.nets:
z = net.forward(x, context=context)[:, inv_idx]
log_jac += net.compute_log_jac(x, context=context)
x = z
z = z[:, inv_idx]
log_prob_gauss = -.5 * (torch.log(self.pi * 2) + z ** 2).sum(1)
ll = log_jac.sum(1) + log_prob_gauss
return ll, z
def compute_ll_bis(self, x, context=None):
log_jac = 0.
inv_idx = torch.arange(x.size(1) - 1, -1, -1).long()
for net in self.nets:
log_jac += net.compute_log_jac(x, context=context)
x = net.forward(x, context=context)[:, inv_idx]
z = x[:, inv_idx]
log_prob_gauss = -.5 * (torch.log(self.pi * 2) + z ** 2)
ll = log_jac + log_prob_gauss
return ll, z
def compute_bpp(self, x, alpha=1e-6, context=None):
d = x.shape[1]
ll, z = self.compute_ll(x, context=context)
bpp = -ll / (d * np.log(2)) - np.log2(1 - 2 * alpha) + 8 \
+ 1 / d * (torch.log2(torch.sigmoid(x)) + torch.log2(1 - torch.sigmoid(x))).sum(1)
return bpp, ll, z
def set_steps_nb(self, nb_steps):
for net in self.nets:
net.set_steps_nb(nb_steps)
def compute_lipschitz(self, nb_iter=10):
L = 1.
for net in self.nets:
L *= net.compute_lipschitz(nb_iter)
return L
def force_lipschitz(self, L=1.5):
for net in self.nets:
net.force_lipschitz(L)
| 5,656 | 36.217105 | 115 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/datasets/power.py
|
import numpy as np
import datasets
class POWER:
class Data:
def __init__(self, data):
self.x = data.astype(np.float32)
self.N = self.x.shape[0]
def __init__(self):
trn, val, tst = load_data_normalised()
self.trn = self.Data(trn)
self.val = self.Data(val)
self.tst = self.Data(tst)
self.n_dims = self.trn.x.shape[1]
def load_data():
return np.load(datasets.root + 'power/data.npy')
def load_data_split_with_noise():
rng = np.random.RandomState(42)
data = load_data()
rng.shuffle(data)
N = data.shape[0]
data = np.delete(data, 3, axis=1)
data = np.delete(data, 1, axis=1)
############################
# Add noise
############################
# global_intensity_noise = 0.1*rng.rand(N, 1)
voltage_noise = 0.01 * rng.rand(N, 1)
# grp_noise = 0.001*rng.rand(N, 1)
gap_noise = 0.001 * rng.rand(N, 1)
sm_noise = rng.rand(N, 3)
time_noise = np.zeros((N, 1))
# noise = np.hstack((gap_noise, grp_noise, voltage_noise, global_intensity_noise, sm_noise, time_noise))
# noise = np.hstack((gap_noise, grp_noise, voltage_noise, sm_noise, time_noise))
noise = np.hstack((gap_noise, voltage_noise, sm_noise, time_noise))
data = data + noise
N_test = int(0.1 * data.shape[0])
data_test = data[-N_test:]
data = data[0:-N_test]
N_validate = int(0.1 * data.shape[0])
data_validate = data[-N_validate:]
data_train = data[0:-N_validate]
return data_train, data_validate, data_test
def load_data_normalised():
data_train, data_validate, data_test = load_data_split_with_noise()
data = np.vstack((data_train, data_validate))
mu = data.mean(axis=0)
s = data.std(axis=0)
data_train = (data_train - mu) / s
data_validate = (data_validate - mu) / s
data_test = (data_test - mu) / s
return data_train, data_validate, data_test
| 1,940 | 24.88 | 108 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/datasets/download_datasets.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 19 15:58:53 2017
@author: Chin-Wei
# some code adapted from https://github.com/yburda/iwae/blob/master/download_mnist.py
LSUN
https://github.com/fyu/lsun
"""
import urllib
import pickle
import os
import struct
import numpy as np
import gzip
import time
import urllib.request
savedir = 'datasets/data'
mnist = True
cifar10 = False
omniglot = False
maf = False
class Progbar(object):
def __init__(self, target, width=30, verbose=1):
'''
@param target: total number of steps expected
'''
self.width = width
self.target = target
self.sum_values = {}
self.unique_values = []
self.start = time.time()
self.total_width = 0
self.seen_so_far = 0
self.verbose = verbose
def update(self, current, values=[]):
'''
@param current: index of current step
@param values: list of tuples (name, value_for_last_step).
The progress bar will display averages for these values.
'''
for k, v in values:
if k not in self.sum_values:
self.sum_values[k] = [v * (current - self.seen_so_far), current - self.seen_so_far]
self.unique_values.append(k)
else:
self.sum_values[k][0] += v * (current - self.seen_so_far)
self.sum_values[k][1] += (current - self.seen_so_far)
self.seen_so_far = current
now = time.time()
if self.verbose == 1:
prev_total_width = self.total_width
sys.stdout.write("\b" * prev_total_width)
sys.stdout.write("\r")
numdigits = int(np.floor(np.log10(self.target))) + 1
barstr = '%%%dd/%%%dd [' % (numdigits, numdigits)
bar = barstr % (current, self.target)
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
sys.stdout.write(bar)
self.total_width = len(bar)
if current:
time_per_unit = (now - self.start) / current
else:
time_per_unit = 0
eta = time_per_unit * (self.target - current)
info = ''
if current < self.target:
info += ' - ETA: %ds' % eta
else:
info += ' - %ds' % (now - self.start)
for k in self.unique_values:
info += ' - %s:' % k
if type(self.sum_values[k]) is list:
avg = self.sum_values[k][0] / max(1, self.sum_values[k][1])
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
info += ' %s' % self.sum_values[k]
self.total_width += len(info)
if prev_total_width > self.total_width:
info += ((prev_total_width - self.total_width) * " ")
sys.stdout.write(info)
sys.stdout.flush()
if current >= self.target:
sys.stdout.write("\n")
if self.verbose == 2:
if current >= self.target:
info = '%ds' % (now - self.start)
for k in self.unique_values:
info += ' - %s:' % k
avg = self.sum_values[k][0] / max(1, self.sum_values[k][1])
if avg > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
sys.stdout.write(info + "\n")
def add(self, n, values=[]):
self.update(self.seen_so_far + n, values)
# mnist
def load_mnist_images_np(imgs_filename):
with open(imgs_filename, 'rb') as f:
f.seek(4)
nimages, rows, cols = struct.unpack('>iii', f.read(12))
dim = rows * cols
images = np.fromfile(f, dtype=np.dtype(np.ubyte))
images = (images / 255.0).astype('float32').reshape((nimages, dim))
return images
# cifar10
from six.moves.urllib.request import FancyURLopener
import tarfile
import sys
class ParanoidURLopener(FancyURLopener):
def http_error_default(self, url, fp, errcode, errmsg, headers):
raise Exception('URL fetch failure on {}: {} -- {}'.format(url, errcode, errmsg))
def get_file(fname, origin, untar=False):
datadir_base = os.path.expanduser(os.path.join('~', '.keras'))
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, 'datasets')
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
if not os.path.exists(fpath):
print('Downloading data from', origin)
global progbar
progbar = None
def dl_progress(count, block_size, total_size):
global progbar
if progbar is None:
progbar = Progbar(total_size)
else:
progbar.update(count * block_size)
ParanoidURLopener().retrieve(origin, fpath, dl_progress)
progbar = None
if untar:
if not os.path.exists(untar_fpath):
print('Untaring file...')
tfile = tarfile.open(fpath, 'r:gz')
tfile.extractall(path=datadir)
tfile.close()
return untar_fpath
return fpath
def load_batch(fpath, label_key='labels'):
f = open(fpath, 'rb')
if sys.version_info < (3,):
d = pickle.load(f)
else:
d = pickle.load(f, encoding="bytes")
# decode utf8
for k, v in d.items():
del (d[str(k)])
d[str(k)] = v
f.close()
data = d["data"]
labels = d[label_key]
data = data.reshape(data.shape[0], 3, 32, 32)
return data, labels
def load_cifar10():
dirname = "cifar-10-batches-py"
origin = "http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
path = get_file(dirname, origin=origin, untar=True)
print(path)
nb_train_samples = 50000
X_train = np.zeros((nb_train_samples, 3, 32, 32), dtype="uint8")
y_train = np.zeros((nb_train_samples,), dtype="uint8")
for i in range(1, 6):
fpath = os.path.join(path, 'data_batch_' + str(i))
print(fpath)
data, labels = load_batch(fpath)
X_train[(i - 1) * 10000:i * 10000, :, :, :] = data
y_train[(i - 1) * 10000:i * 10000] = labels
fpath = os.path.join(path, 'test_batch')
X_test, y_test = load_batch(fpath)
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
return (X_train, y_train), (X_test, y_test)
if __name__ == '__main__':
if not os.path.exists(savedir):
os.makedirs(savedir)
if mnist:
print('dynamically binarized mnist')
mnist_filenames = ['train-images-idx3-ubyte', 't10k-images-idx3-ubyte']
for filename in mnist_filenames:
local_filename = os.path.join(savedir, filename)
urllib.request.urlretrieve("http://yann.lecun.com/exdb/mnist/{}.gz".format(filename),
local_filename + '.gz')
with gzip.open(local_filename + '.gz', 'rb') as f:
file_content = f.read()
with open(local_filename, 'wb') as f:
f.write(file_content)
np.savetxt(local_filename, load_mnist_images_np(local_filename))
os.remove(local_filename + '.gz')
print('statically binarized mnist')
subdatasets = ['train', 'valid', 'test']
for subdataset in subdatasets:
filename = 'binarized_mnist_{}.amat'.format(subdataset)
url = 'http://www.cs.toronto.edu/~larocheh/public/datasets/binarized_mnist/binarized_mnist_{}.amat'.format(
subdataset)
local_filename = os.path.join(savedir, filename)
urllib.request.urlretrieve(url, local_filename)
if cifar10:
(X_train, y_train), (X_test, y_test) = load_cifar10()
pickle.dump((X_train, y_train, X_test, y_test),
open('{}/cifar10.pkl'.format(savedir), 'w'))
if omniglot:
url = 'https://github.com/yburda/iwae/raw/master/datasets/OMNIGLOT/chardata.mat'
filename = 'omniglot.amat'
local_filename = os.path.join(savedir, filename)
urllib.request.urlretrieve(url, local_filename)
if maf:
savedir = 'datasets'
url = 'https://zenodo.org/record/1161203/files/data.tar.gz'
local_filename = os.path.join(savedir, 'data.tar.gz')
urllib.request.urlretrieve(url, local_filename)
tar = tarfile.open(local_filename, "r:gz")
tar.extractall(savedir)
tar.close()
os.remove(local_filename)
| 9,226 | 31.60424 | 119 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/datasets/hepmass.py
|
import pandas as pd
import numpy as np
from collections import Counter
from os.path import join
import datasets
class HEPMASS:
"""
The HEPMASS data set.
http://archive.ics.uci.edu/ml/datasets/HEPMASS
"""
class Data:
def __init__(self, data):
self.x = data.astype(np.float32)
self.N = self.x.shape[0]
def __init__(self):
path = datasets.root + 'hepmass/'
trn, val, tst = load_data_no_discrete_normalised_as_array(path)
self.trn = self.Data(trn)
self.val = self.Data(val)
self.tst = self.Data(tst)
self.n_dims = self.trn.x.shape[1]
def load_data(path):
data_train = pd.read_csv(filepath_or_buffer=join(path, "1000_train.csv"), index_col=False)
data_test = pd.read_csv(filepath_or_buffer=join(path, "1000_test.csv"), index_col=False)
return data_train, data_test
def load_data_no_discrete(path):
"""
Loads the positive class examples from the first 10 percent of the dataset.
"""
data_train, data_test = load_data(path)
# Gets rid of any background noise examples i.e. class label 0.
data_train = data_train[data_train[data_train.columns[0]] == 1]
data_train = data_train.drop(data_train.columns[0], axis=1)
data_test = data_test[data_test[data_test.columns[0]] == 1]
data_test = data_test.drop(data_test.columns[0], axis=1)
# Because the data set is messed up!
data_test = data_test.drop(data_test.columns[-1], axis=1)
return data_train, data_test
def load_data_no_discrete_normalised(path):
data_train, data_test = load_data_no_discrete(path)
mu = data_train.mean()
s = data_train.std()
data_train = (data_train - mu) / s
data_test = (data_test - mu) / s
return data_train, data_test
def load_data_no_discrete_normalised_as_array(path):
data_train, data_test = load_data_no_discrete_normalised(path)
data_train, data_test = data_train.as_matrix(), data_test.as_matrix()
i = 0
# Remove any features that have too many re-occurring real values.
features_to_remove = []
for feature in data_train.T:
c = Counter(feature)
max_count = np.array([v for k, v in sorted(c.items())])[0]
if max_count > 5:
features_to_remove.append(i)
i += 1
data_train = data_train[:, np.array([i for i in range(data_train.shape[1]) if i not in features_to_remove])]
data_test = data_test[:, np.array([i for i in range(data_test.shape[1]) if i not in features_to_remove])]
N = data_train.shape[0]
N_validate = int(N * 0.1)
data_validate = data_train[-N_validate:]
data_train = data_train[0:-N_validate]
return data_train, data_validate, data_test
| 2,730 | 28.365591 | 112 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/datasets/gas.py
|
import pandas as pd
import numpy as np
import datasets
class GAS:
class Data:
def __init__(self, data):
self.x = data.astype(np.float32)
self.N = self.x.shape[0]
def __init__(self):
file = datasets.root + 'gas/ethylene_CO.pickle'
trn, val, tst = load_data_and_clean_and_split(file)
self.trn = self.Data(trn)
self.val = self.Data(val)
self.tst = self.Data(tst)
self.n_dims = self.trn.x.shape[1]
def load_data(file):
data = pd.read_pickle(file)
# data = pd.read_pickle(file).sample(frac=0.25)
# data.to_pickle(file)
data.drop("Meth", axis=1, inplace=True)
data.drop("Eth", axis=1, inplace=True)
data.drop("Time", axis=1, inplace=True)
return data
def get_correlation_numbers(data):
C = data.corr()
A = C > 0.98
B = A.as_matrix().sum(axis=1)
return B
def load_data_and_clean(file):
data = load_data(file)
B = get_correlation_numbers(data)
while np.any(B > 1):
col_to_remove = np.where(B > 1)[0][0]
col_name = data.columns[col_to_remove]
data.drop(col_name, axis=1, inplace=True)
B = get_correlation_numbers(data)
# print(data.corr())
data = (data - data.mean()) / data.std()
return data
def load_data_and_clean_and_split(file):
data = load_data_and_clean(file).as_matrix()
N_test = int(0.1 * data.shape[0])
data_test = data[-N_test:]
data_train = data[0:-N_test]
N_validate = int(0.1 * data_train.shape[0])
data_validate = data_train[-N_validate:]
data_train = data_train[0:-N_validate]
return data_train, data_validate, data_test
| 1,672 | 21.917808 | 59 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/datasets/bsds300.py
|
import numpy as np
import h5py
import datasets
class BSDS300:
"""
A dataset of patches from BSDS300.
"""
class Data:
"""
Constructs the dataset.
"""
def __init__(self, data):
self.x = data[:]
self.N = self.x.shape[0]
def __init__(self):
# load dataset
f = h5py.File(datasets.root + 'BSDS300/BSDS300.hdf5', 'r')
self.trn = self.Data(f['train'])
self.val = self.Data(f['validation'])
self.tst = self.Data(f['test'])
self.n_dims = self.trn.x.shape[1]
self.image_size = [int(np.sqrt(self.n_dims + 1))] * 2
f.close()
| 663 | 17.971429 | 66 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/datasets/miniboone.py
|
import numpy as np
import datasets
class MINIBOONE:
class Data:
def __init__(self, data):
self.x = data.astype(np.float32)
self.N = self.x.shape[0]
def __init__(self):
file = datasets.root + 'miniboone/data.npy'
trn, val, tst = load_data_normalised(file)
self.trn = self.Data(trn)
self.val = self.Data(val)
self.tst = self.Data(tst)
self.n_dims = self.trn.x.shape[1]
def load_data(root_path):
# NOTE: To remember how the pre-processing was done.
# data = pd.read_csv(root_path, names=[str(x) for x in range(50)], delim_whitespace=True)
# print data.head()
# data = data.as_matrix()
# # Remove some random outliers
# indices = (data[:, 0] < -100)
# data = data[~indices]
#
# i = 0
# # Remove any features that have too many re-occuring real values.
# features_to_remove = []
# for feature in data.T:
# c = Counter(feature)
# max_count = np.array([v for k, v in sorted(c.iteritems())])[0]
# if max_count > 5:
# features_to_remove.append(i)
# i += 1
# data = data[:, np.array([i for i in range(data.shape[1]) if i not in features_to_remove])]
# np.save("~/data/miniboone/data.npy", data)
data = np.load(root_path)
N_test = int(0.1 * data.shape[0])
data_test = data[-N_test:]
data = data[0:-N_test]
N_validate = int(0.1 * data.shape[0])
data_validate = data[-N_validate:]
data_train = data[0:-N_validate]
return data_train, data_validate, data_test
def load_data_normalised(root_path):
data_train, data_validate, data_test = load_data(root_path)
data = np.vstack((data_train, data_validate))
mu = data.mean(axis=0)
s = data.std(axis=0)
data_train = (data_train - mu) / s
data_validate = (data_validate - mu) / s
data_test = (data_test - mu) / s
return data_train, data_validate, data_test
| 1,955 | 26.942857 | 96 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/UMNN/datasets/__init__.py
|
root = 'datasets/data/'
from .power import POWER
from .gas import GAS
from .hepmass import HEPMASS
from .miniboone import MINIBOONE
from .bsds300 import BSDS300
| 162 | 19.375 | 32 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/adaptive_stft_utils/operators.py
|
import torch.autograd
import torch
import torch.nn.functional as F
def dithering_int(n):
if n == int(n):
return int(n)
return int(torch.bernoulli((n - int(n))) + int(n))
class SignPassGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input.sign()
@staticmethod
def backward(ctx, grad_output):
return grad_output
class Sign(torch.autograd.Function):
@staticmethod
def forward(ctx, n):
return torch.sign(n)
@staticmethod
def backward(ctx, grad_output):
return grad_output * 1e-3
class InvSign(torch.autograd.Function):
@staticmethod
def forward(ctx, n):
return torch.sign(n)
@staticmethod
def backward(ctx, grad_output):
return -grad_output * 1e-3
def clip_tensor_norm(parameters, max_norm, norm_type=2):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
max_norm = float(max_norm)
norm_type = float(norm_type)
if len(parameters) == 0:
return torch.tensor(0.)
device = parameters[0].device
total_norm = torch.norm(torch.stack([torch.norm(p.detach(), norm_type).to(
device) for p in parameters]), norm_type).detach()
def clamp(p):
clamped = torch.clamp(p, min=-total_norm * max_norm, max=total_norm * max_norm)
return clamped + 1e-4 * (p - clamped)
return [
clamp(p)
for p in parameters
]
class LimitGradient(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad_output):
grad_output = clip_tensor_norm(grad_output, max_norm=1.0, norm_type=2)[0]
return grad_output
| 1,729 | 23.027778 | 87 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/adaptive_stft_utils/losses.py
|
import torch.autograd
import torch
import torch.nn.functional as F
from .operators import clip_tensor_norm
def kurtosis(rfft_magnitudes_sq):
epsilon = 1e-7
max_norm = 0.1
kur_part = [
torch.sum(torch.pow(a, 2)) /
(torch.pow(torch.sum(a), 2).unsqueeze(-1) + epsilon)
for a in rfft_magnitudes_sq
]
n_wnd = len(rfft_magnitudes_sq)
assert n_wnd > 0
catted = torch.cat(clip_tensor_norm(kur_part, max_norm=max_norm, norm_type=2)) / max_norm
kur = torch.sum(catted) / n_wnd
return kur
| 541 | 24.809524 | 93 |
py
|
STFTgrad
|
STFTgrad-main/adaptiveSTFT/adaptive_stft_utils/mappings.py
|
import math
import sys
import pathlib
import torch
import torch.nn as nn
import torch.nn.functional as F
sys.path.insert(0, pathlib.Path(__file__).parent.parent.parent.absolute())
from UMNN.models.UMNN import MonotonicNN
# Monotonically increasing mapping
class IdxToWindow(nn.Module):
def __init__(self, signal_len, num_windows=80, baseline_mapping_trick=True):
super(IdxToWindow, self).__init__()
self.signal_len = signal_len
self.num_windows = num_windows
self.baseline_mapping_trick = baseline_mapping_trick
self.slope = nn.Parameter(torch.tensor(
signal_len / num_windows, dtype=torch.float32, requires_grad=True, device="cuda"))
self.overlap_net = nn.Sequential(
nn.Linear(1, 64),
nn.LeakyReLU(),
nn.Linear(64, 64),
nn.LeakyReLU(),
nn.Linear(64, 1),
nn.Sigmoid()
)
self.bias = nn.Parameter(torch.tensor(
1e-2, dtype=torch.float32, requires_grad=True, device="cuda"))
self.model_monotonic = MonotonicNN(
2, [128, 128, 128], nb_steps=768, dev="cuda").cuda()
self.signal_mid_point = nn.Parameter(torch.tensor(
signal_len / 2.0, dtype=torch.float32, requires_grad=True, device="cuda"))
def forward(self, idx):
assert len(idx.shape) == 1
# transform window idx
# scale down by 10
rescale = .1
in_var = (idx * rescale + self.bias).view(idx.size(0), 1)
stem = (self.model_monotonic(in_var, in_var).flatten() / rescale)
# at least advance by 32 sample per window
if self.baseline_mapping_trick:
baseline_mapping = 32 * idx
else:
baseline_mapping = 0
# convert window idx to sample idx
x_i = (stem * self.slope + baseline_mapping) + self.signal_mid_point
perc = self.overlap_net(idx.unsqueeze(-1) / self.signal_len * 2 - 1).flatten()
return (x_i, perc)
def make_find_window_configs(idx_to_window: IdxToWindow, last_sample: int):
"""
Creates a function which scans the mapping function to generate window
positions and overlaps ranging from just before the first sample to
just after the last sample.
"""
prev_cached_i = 0
def find_window_configs():
nonlocal prev_cached_i
# evaluate the window generator until we hit boundary on both sides,
# but keep one extra element on both sides past boundary
eval_cache = {}
def fast_idx_to_window(i):
batch_size = 16
idx = i // batch_size
arr = eval_cache.get(idx)
if arr is not None:
return (arr[0][i - idx * batch_size], arr[1][i - idx * batch_size])
arr = idx_to_window(torch.arange(start=idx * batch_size, end=idx * batch_size + batch_size,
step=1, dtype=torch.float32, device="cuda", requires_grad=False))
eval_cache[idx] = arr
return (arr[0][i - idx * batch_size], arr[1][i - idx * batch_size])
window_configs = []
i = prev_cached_i
w, p = fast_idx_to_window(i)
if w >= last_sample:
path = 0
# move left if too big
while w >= last_sample:
i = i - 1
w, p = fast_idx_to_window(i)
prev_cached_i = i
window_configs.append(fast_idx_to_window(i + 1))
# collect all items between last_sample and 0
while w >= 0:
window_configs.append((w, p))
i = i - 1
w, p = fast_idx_to_window(i)
window_configs.append((w, p))
window_configs.reverse()
elif w <= 0:
path = 1
# move right if too small
while w <= 0:
i = i + 1
w, p = fast_idx_to_window(i)
window_configs.append(fast_idx_to_window(i - 1))
prev_cached_i = i
# collect all items between 0 and last_sample
while w <= last_sample:
window_configs.append((w, p))
i = i + 1
w, p = fast_idx_to_window(i)
window_configs.append((w, p))
else:
path = 2
# w was in range
right_list = []
while w <= last_sample:
right_list.append((w, p))
i = i + 1
w, p = fast_idx_to_window(i)
right_list.append((w, p))
# move left from zero, to cover the starting regions
i = prev_cached_i - 1
w, p = fast_idx_to_window(i)
left_list = []
while w >= 0:
left_list.append((w, p))
i = i - 1
w, p = fast_idx_to_window(i)
left_list.append((w, p))
left_list.reverse()
window_configs = left_list + right_list
# filter out windows that are too small
filt_window_configs = []
prev_window_sample = -math.inf
assert window_configs[0][0] < 0, f"{i} {path} {window_configs}"
assert window_configs[-1][0] > last_sample, f"{i} {path} {window_configs}"
# min window size is 1
for i in range(len(window_configs)):
if i > 0:
if not (window_configs[i][0] > window_configs[i - 1][0]):
if path == 2:
path_desc = f"({(len(left_list), len(right_list))})" # type: ignore
else:
path_desc = None
assert False, f"path: {path} {path_desc}, i: {i}, {window_configs}, {window_configs[i][0]} > {window_configs[i - 1][0]}"
if window_configs[i][0] - prev_window_sample > 1:
filt_window_configs.append(window_configs[i])
prev_window_sample = window_configs[i][0]
assert len(filt_window_configs) > 0, f"{len(window_configs)}, path: {path}, prev_cached_i: {prev_cached_i}, {fast_idx_to_window(prev_cached_i)}, {fast_idx_to_window(prev_cached_i - 1)}, {idx_to_window.slope}"
assert filt_window_configs[0][0] < 0, f"{i} {path} {filt_window_configs}"
assert filt_window_configs[-1][0] > last_sample, f"{i} {path} {filt_window_configs}"
assert len(filt_window_configs) > 1, f"{filt_window_configs}, slope: {idx_to_window.slope.item()}"
return filt_window_configs
return find_window_configs
| 6,507 | 40.452229 | 216 |
py
|
TV-parameter-learning
|
TV-parameter-learning-master/main.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2022 Kristian Bredies ([email protected])
# Enis Chenchene ([email protected])
# Alireza Hosseini ([email protected])
#
# This file is part of the example code repository for the paper:
#
# K. Bredies, E. Chenchene, A. Hosseini.
# A hybrid proximal generalized conditional gradient method and application
# to total variation parameter learning, 2022.
# Submitted to ECC23, within the EUCA Series of European Control Conferences,
# To be held in Bucharest, Romania, from June 13 to June 16, 2023.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Run this script to reproduce all the numerical experiments obtained in Section III.D in:
K. Bredies, E. Chenchene, A. Hosseini.
A hybrid proximal generalized conditional gradient method and
application to total variation parameter learning, 2022.
Submitted to ECC23, within the EUCA Series of European Control Conferences
To be held in Bucharest, Romania, from June 13 to June 16, 2023.
Please note that the training of the quadratic model may take several hours.
"""
import zipfile
import io
import requests
from experiments import (load_dataset, train_quadratic_model, train_constant_model,
experiment1, experiment2)
if __name__ == "__main__":
print("\n \n *** Downloading dataset from Zenodo. *** \nPlease wait...")
# download the dataset from Zenodo, DOI:10.5281/zenodo.7267054
zip_file_url = 'https://zenodo.org/record/7267054/files/Figures.zip?download=1'
r = requests.get(zip_file_url, stream=True)
with zipfile.ZipFile(io.BytesIO(r.content)) as z:
z.extractall()
print("Dataset downloaded.")
noise_level = 0.05
# load the training set
dataset = load_dataset(noise_level)
# train the quadratic model
A = train_quadratic_model(dataset)
# train the constant model
best_constant = train_constant_model(dataset)
print("\n \n *** Starting experiment 1. ***")
experiment1(A, noise_level)
print("\n \n *** Starting experiment 2. ***")
MSE_alpha, MSE_u, MSE_alpha_constants, MSE_u_constants = experiment2(A, best_constant,
noise_level)
| 2,885 | 38 | 90 |
py
|
TV-parameter-learning
|
TV-parameter-learning-master/structures.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2022 Kristian Bredies ([email protected])
# Enis Chenchene ([email protected])
# Alireza Hosseini ([email protected])
#
# This file is part of the example code repository for the paper:
#
# K. Bredies, E. Chenchene, A. Hosseini.
# A hybrid proximal generalized conditional gradient method and application
# to total variation parameter learning, 2022.
# Submitted to ECC23, within the EUCA Series of European Control Conferences,
# To be held in Bucharest, Romania, from June 13 to June 16, 2023.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
This file contains several useful functions to reproduce the experiments in Section III.D in:
K. Bredies, E. Chenchene, A. Hosseini.
A hybrid proximal generalized conditional gradient method and
application to total variation parameter learning, 2022.
Submitted to ECC23, within the EUCA Series of European Control Conferences
To be held in Bucharest, Romania, from June 13 to June 16, 2023.
"""
import numpy as np
import scipy.sparse as sp
def grad_x(p):
diag = np.ones(p)
diag[-1] = 0
diag = np.tile(diag, p)
Dx = sp.spdiags([-diag, [0]+list(diag[:-1])], [0, 1], p**2, p**2)
return Dx
def grad_y(p):
diag = np.ones(p**2)
diag[-p:] = 0*diag[-p:]
up_diag = np.ones(p**2)
up_diag[:p] = 0*up_diag[:p]
Dy = sp.spdiags([-diag, up_diag], [0, p], p**2, p**2)
return Dy
def grad(psi, Dx, Dy, n):
sig_out = np.zeros((n, 2))
sig_out[:, 0] = Dx @ psi
sig_out[:, 1] = Dy @ psi
return sig_out
def div(sig, M1, M2):
'''
M1 = -Dx* e M2 = -Dy*
'''
div_sig_out = M1 @ sig[:, 0] + M2 @ sig[:, 1]
return div_sig_out
def grad_m(Psi, Dx, Dy, n, N):
'''
Computes the gradient of every Psi_i = Psi[:,i]
'''
Sig_out = np.zeros((n, N, 2))
Sig_out[:, :, 0] = Dx @ Psi
Sig_out[:, :, 1] = Dy @ Psi
return Sig_out
def div_m(Sig, M1, M2):
'''
M1 = -Dx* e M2 = -Dy*
'''
DivSig_out = M1 @ Sig[:, :, 0] + M2 @ Sig[:, :, 1]
return DivSig_out
def tv(psi, Dx, Dy, n):
return np.sum(np.linalg.norm(grad(psi, Dx, Dy, n), axis=1))
def TV(Psi, Dx, Dy, n, N):
return np.sum(np.linalg.norm(grad_m(Psi, Dx, Dy, n, N), axis=2), axis=0)
# Proximity operators
def proj_pos(A):
d, O = np.linalg.eigh(A)
return [email protected](np.maximum(d, 0))@O.T
def prox_fidelity(tau, x, f):
return (x+tau*f)/(1+tau)
def proj_12(sig):
sig_out = np.copy(sig)
norms = np.linalg.norm(sig, axis=1)
norms = np.repeat(norms[:, np.newaxis], 2, axis=1)
sig_out[norms > 1] = np.divide(sig_out[norms > 1], norms[norms > 1])
return sig_out
| 3,390 | 23.751825 | 93 |
py
|
TV-parameter-learning
|
TV-parameter-learning-master/experiments.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2022 Kristian Bredies ([email protected])
# Enis Chenchene ([email protected])
# Alireza Hosseini ([email protected])
#
# This file is part of the example code repository for the paper:
#
# K. Bredies, E. Chenchene, A. Hosseini.
# A hybrid proximal generalized conditional gradient method and application
# to total variation parameter learning, 2022.
# Submitted to ECC23, within the EUCA Series of European Control Conferences,
# To be held in Bucharest, Romania, from June 13 to June 16, 2023.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
This file contains all the experiments in Section III.D in:
K. Bredies, E. Chenchene, A. Hosseini.
A hybrid proximal generalized conditional gradient method and
application to total variation parameter learning, 2022.
Submitted to ECC23, within the EUCA Series of European Control Conferences
To be held in Bucharest, Romania, from June 13 to June 16, 2023.
"""
import random
import numpy as np
# plots and images
from matplotlib import rc
from PIL import Image
import structures as st
from plots import plot_exp1
from denoising import denoise, denoise_large
from data import apply_noise, create_sample
from train_quadratic_model import train_qm
from train_constant_model import train_cm
from single_patch import single_patch_best_par
def load_dataset(noise_level):
print("\n \nLoading the dataset...")
np.random.seed(1)
dataset = create_sample(noise_level, "train_set")
return dataset
def train_quadratic_model(dataset, maxit=25000):
# train a quadratic model
lam = 50
A, _Sig, _Du_list = train_qm(dataset, lam, maxit)
return A
def train_constant_model(dataset, maxit=20000):
# Train a constant model
lam = 50
print("\n \nExtracting 1000 patches from training set...")
dataset_small = random.sample(dataset, 1000)
best_constant, _Sig, _Du_list = train_cm(dataset_small, lam, maxit)
return best_constant
def experiment1(A, noise_level):
np.random.seed(1)
size_patches = 16
rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 17})
rc('text', usetex=True)
objects = []
for test in range(6):
print(f"\n## Starting image {test+1}")
img = Image.open(f"Figures/Figures_exp1/im{test+1}.png").convert('L')
sxx, syy = np.shape(img)
sx = int(((sxx/1.3)//size_patches)*size_patches)
sy = int(((syy/1.3)//size_patches)*size_patches)
true_image = 255-np.array(img.resize((sx, sy)), dtype=np.float64)
true_image /= 255
print(f"picture size is {sx} times {sy}")
noisy_image = apply_noise(true_image, noise_level)
denoised_image, params = denoise_large(true_image, noisy_image, A, size_patches)
objects.append([true_image, noisy_image, denoised_image, params])
plot_exp1(objects)
print("Experiment 1 ended. Results have been saved as experiment1_comparisons.pdf\n")
def experiment2(A, best_constant, noise_level):
np.random.seed(1)
# structures
Dx = st.grad_x(16)
Dy = st.grad_y(16)
M1 = -Dx.T
M2 = -Dy.T
print("\nLoading test set...")
test_set = create_sample(noise_level, "test_set", size_patches=16)
test_set = random.sample(test_set, 200)
print("Test set reduced to 200.")
consts = np.logspace(-4, -1, 8)
consts = np.append(consts, best_constant)
N = len(test_set)
MSE_alpha = 0
MSE_alpha_constants = np.zeros(len(consts))
MSE_u = 0
MSE_u_constants = np.zeros(len(consts))
count = 1
for (true_patch, noisy_patch) in test_set:
if count % 50 == 1:
print(f"\n ## Done {count} patches. {N-count} remain...\n")
# predict parameter with quadratic model
patch_noisy_b = np.append(noisy_patch, 1)
a_learned = patch_noisy_b.T@A@patch_noisy_b
# compute best parameter
a, sig, _Du_list = single_patch_best_par(true_patch, noisy_patch, 10, maxit=50000,
show_details=False)
# update mean squared error for the parameter
MSE_alpha += (a-a_learned)**2/N
if count % 50 == 1:
print(f"MSE_alpha quadratic model \n Equal to: {np.round(MSE_alpha*N/count,7)}")
for i, const in enumerate(consts):
MSE_alpha_constants[i] += (a-consts[i])**2/N
if count % 50 == 1:
print(f"MSE_alpha constant parameter {const}\n \
Equal to: {np.round(MSE_alpha_constants[i]*N/count,7)}")
# compute approximate solution wrt to best parameter
denoised_patch = st.div(sig, M1, M2)+noisy_patch
# update mean squared error for the image
MSE_u += np.linalg.norm(true_patch-denoised_patch)**2/N
if count % 50 == 1:
print(f"\nMSE_u quadratic model \n Equal to: {np.round(MSE_u*N/count,4)}")
# denoise with respect to constant (non-adaptive) parameters
for i, const in enumerate(consts):
denoised_patch, _warning = denoise(noisy_patch, const, maxit=100000)
MSE_u_constants[i] += np.linalg.norm(true_patch-denoised_patch)**2/N
if count % 50 == 1:
print(f"MSE_u constant parameter {const}\n \
Equal to: {np.round(MSE_u_constants[i]*N/count,4)}")
count += 1
return MSE_alpha, MSE_u, MSE_alpha_constants, MSE_u_constants
| 6,124 | 32.108108 | 94 |
py
|
TV-parameter-learning
|
TV-parameter-learning-master/train_quadratic_model.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2022 Kristian Bredies ([email protected])
# Enis Chenchene ([email protected])
# Alireza Hosseini ([email protected])
#
# This file is part of the example code repository for the paper:
#
# K. Bredies, E. Chenchene, A. Hosseini.
# A hybrid proximal generalized conditional gradient method and application
# to total variation parameter learning, 2022.
# Submitted to ECC23, within the EUCA Series of European Control Conferences,
# To be held in Bucharest, Romania, from June 13 to June 16, 2023.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
This file contains the training function for the quadratic model.
For details and references, see Section III.D in:
K. Bredies, E. Chenchene, A. Hosseini.
A hybrid proximal generalized conditional gradient method and
application to total variation parameter learning, 2022.
Submitted to ECC23, within the EUCA Series of European Control Conferences
To be held in Bucharest, Romania, from June 13 to June 16, 2023.
"""
import numpy as np
import structures as st
from plots import compare_results, plot_D
def D_func_qm(Sig, Sig_old, A, A_old, U, Dx, Dy, M1, M2, Fs, Fs_b, lam, n, N):
piece_1 = 0
piece_2 = 0
piece_1 = 1/N*np.sum(np.multiply(st.div_m(Sig_old, M1, M2)+Fs,
st.div_m(Sig_old, M1, M2)-st.div_m(Sig, M1, M2)))
piece_2 = 1/N*np.multiply(st.TV(U, Dx, Dy, n, N)[np.newaxis, :], Fs_b)@Fs_b.T
piece_2 = np.sum(np.multiply(piece_2, A_old-A))
return piece_1+piece_2-lam/2*np.sum(np.square(A_old-A))
def step_size_qm(Sig, Sig_old, A, A_old, Du, lam, N):
'''
The Lipschitz constant can be estimated by 8/N.
'''
dist = np.sum(np.square(Sig-Sig_old))+np.sum(np.square(A-A_old))
return min(1, N*(Du+lam/2*np.sum(np.square(A-A_old)))/(8*dist))
def train_qm(dataset, lam, maxit):
print("\n *** Starting training quadratic model. Info displayed every 30 iterations. *** \
\n ******* Note that this phase may take several hours *******")
# structure
n = len(dataset[0][0])
size_patches = int(np.sqrt(n))
N = len(dataset) # number of patches
Dx = st.grad_x(size_patches)
Dy = st.grad_y(size_patches)
M1 = -Dx.T
M2 = -Dy.T
# true and noisy images as numpy array for speed
U = np.zeros((n, N))
Fs = np.zeros((n, N))
for k in range(N):
U[:, k] = dataset[k][0]
Fs[:, k] = dataset[k][1]
# Add bias term to noisy images for flexibility
Fs_b = np.vstack([Fs, np.ones(N)])
# initializing
Sig = np.zeros((n, N, 2))
A = np.zeros((n+1, n+1))
Du_list = []
D_u = 1
it = 0
while D_u > 1e-4 and it <= maxit:
A_old = np.copy(A)
Sig_old = np.copy(Sig)
# partial update on A
pars = st.TV(U, Dx, Dy, n, N)-st.TV(st.div_m(Sig, M1, M2)+Fs, Dx, Dy, n, N)
temp = np.multiply(pars[np.newaxis, :], Fs_b)@Fs_b.T
A = st.proj_pos(A-1/(N*lam)*temp)
# partial update on Sig
Sig = st.grad_m(st.div_m(Sig, M1, M2)+Fs, Dx, Dy, n, N)
Norms = np.linalg.norm(Sig, axis=2)
Norms = np.repeat(Norms[:, :, np.newaxis], 2, axis=2)
Sig[Norms > 0] = np.divide(Sig[Norms > 0], Norms[Norms > 0]) # normalized
for i in range(N):
Sig[:, i, :] = Fs_b[:, i].T@A@Fs_b[:, i]*Sig[:, i, :]
# step size
D_u = D_func_qm(Sig, Sig_old, A, A_old, U, Dx, Dy, M1, M2, Fs, Fs_b, lam, n, N)
theta = step_size_qm(Sig, Sig_old, A, A_old, D_u, lam, N)
Du_list.append(D_u)
# full update on A and Sig
A = A_old+theta*(A-A_old)
Sig = Sig_old+theta*(Sig-Sig_old)
it += 1
if it % 200 == 100:
print(f'\n ########## Iteration: {it}')
print(f'step size = {theta}')
print(f'D(u^k) = {D_u}')
print(f'|A(k+1)-A(k)| = {np.linalg.norm(A-A_old)}')
print('Plotting 5 patches...')
compare_results(Sig, dataset, int(min(5, N)))
print('Plotted.')
# plotting
plot_D(Du_list)
if it % 30 == 5 and it % 200 != 5:
print(f'\n ########## Iteration: {it}')
print(f'step size = {theta}')
print(f'D(u^k) = {D_u}')
print(f'|A(k+1)-A(k)| = {np.linalg.norm(A-A_old)}')
print(f'\n ########## Final iteration: {it}')
print(f'Final residual D(u^k) = {D_u}')
print('Plotting 5 patches...')
compare_results(Sig, dataset, int(min(5, N)))
print('Plotting the residual as a function of the iterations...')
# plotting
plot_D(Du_list)
print('Residual as a function of the iteration plotted and saved as du.pdf')
return A, Sig, Du_list
| 5,435 | 34.298701 | 94 |
py
|
TV-parameter-learning
|
TV-parameter-learning-master/single_patch.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2022 Kristian Bredies ([email protected])
# Enis Chenchene ([email protected])
# Alireza Hosseini ([email protected])
#
# This file is part of the example code repository for the paper:
#
# K. Bredies, E. Chenchene, A. Hosseini.
# A hybrid proximal generalized conditional gradient method and application
# to total variation parameter learning, 2022.
# Submitted to ECC23, within the EUCA Series of European Control Conferences,
# To be held in Bucharest, Romania, from June 13 to June 16, 2023.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
This file contains the function that allows us to find the best parameter for one figure.
For details and references, see Section III.D in:
K. Bredies, E. Chenchene, A. Hosseini.
A hybrid proximal generalized conditional gradient method and
application to total variation parameter learning, 2022.
Submitted to ECC23, within the EUCA Series of European Control Conferences
To be held in Bucharest, Romania, from June 13 to June 16, 2023.
"""
import numpy as np
import structures as st
def D_func_single_patch(sig, sig_old, a, a_old, u, Dx, Dy, M1, M2, f, lam, n):
piece_1 = 0
piece_2 = 0
piece_1 = np.sum(np.multiply(st.div(sig_old, M1, M2)+f,
st.div(sig_old, M1, M2)-st.div(sig, M1, M2)))
piece_2 = st.tv(u, Dx, Dy, n)*(a_old-a)
return piece_1+piece_2-lam/2*(a-a_old)**2
def step_size_single_patch(sig, sig_old, a, a_old, Du, lam):
'''
The Lipschitz constant can be estimated by 8.
'''
dist = np.sum(np.square(sig-sig_old))+(a-a_old)**2
return min(1, (Du + lam/2*(a_old-a)**2)/(8*dist))
def single_patch_best_par(true_image, noisy_image, lam, maxit, show_details=True):
# structure
n = len(noisy_image)
size_patches = int(np.sqrt(n))
Dx = st.grad_x(size_patches)
Dy = st.grad_y(size_patches)
M1 = -Dx.T
M2 = -Dy.T
u = np.copy(true_image)
f = np.copy(noisy_image)
# initializing
sig = np.zeros((n, 2))
a = 0
Du_list = []
for it in range(maxit):
a_old = a
sig_old = np.copy(sig)
# partial update on A
par = st.tv(u, Dx, Dy, n)-st.tv(st.div(sig, M1, M2)+f, Dx, Dy, n)
a = np.maximum(a-par/lam, 0)
# partial update on Sig
sig = st.grad(st.div(sig, M1, M2)+f, Dx, Dy, n)
Norms = np.linalg.norm(sig, axis=1)
Norms = np.repeat(Norms[:, np.newaxis], 2, axis=1)
sig[Norms > 0] = np.divide(sig[Norms > 0], Norms[Norms > 0]) # normalized
sig = a*sig
# step size
D_u = D_func_single_patch(sig, sig_old, a, a_old, u, Dx, Dy, M1, M2, f, lam, n)
theta = step_size_single_patch(sig, sig_old, a, a_old, D_u, lam)
Du_list.append(D_u)
# full update on A and Sig
a = a_old+theta*(a-a_old)
sig = sig_old+theta*(sig-sig_old)
if show_details:
if it % 10000 == 5:
print(f'\n ########## Iteration: {it}')
print(f"best parameter = {a}")
print(f'step size = {theta}')
print(f'D(u^k) = {D_u}')
print(f'|a(k+1)-a(k)| = {np.abs(a-a_old)}')
return a, sig, Du_list
| 3,956 | 32.252101 | 89 |
py
|
TV-parameter-learning
|
TV-parameter-learning-master/data.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2022 Kristian Bredies ([email protected])
# Enis Chenchene ([email protected])
# Alireza Hosseini ([email protected])
#
# This file is part of the example code repository for the paper:
#
# K. Bredies, E. Chenchene, A. Hosseini.
# A hybrid proximal generalized conditional gradient method and application
# to total variation parameter learning, 2022.
# Submitted to ECC23, within the EUCA Series of European Control Conferences,
# To be held in Bucharest, Romania, from June 13 to June 16, 2023.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
Data processing.
For details and references, see Section III.D in:
K. Bredies, E. Chenchene, A. Hosseini.
A hybrid proximal generalized conditional gradient method and
application to total variation parameter learning, 2022.
Submitted to ECC23, within the EUCA Series of European Control Conferences
To be held in Bucharest, Romania, from June 13 to June 16, 2023.
"""
import glob
import random
import numpy as np
from PIL import Image
def read_image(img_name, px=10, py=10):
img = Image.open(img_name).convert('L')
image_arr = 255-np.array(img.resize((px, py)), dtype=np.float64)
image_arr /= 255
return image_arr
def apply_noise(image, noise_level):
px, py = np.shape(image)
gauss = np.random.normal(0, noise_level, (px, py))
return image+gauss
def create_sample(noise_level, folder, size_patches=16):
dataset = []
num_files = 0
for filename in glob.glob(f'Figures/Cartoon/{folder}/*'):
img = Image.open(filename).convert('L')
sxx, syy = np.shape(img)
sx = int(((sxx/2.3)//size_patches)*size_patches)
sy = int(((syy/2.3)//size_patches)*size_patches)
image_arr = 255-np.array(img.resize((sx, sy)), dtype=np.float64)
image_arr /= 255
for i in np.arange(0, sy, size_patches):
for j in np.arange(0, sx, size_patches):
patch = image_arr[i:i+size_patches, j:j+size_patches]
gauss = np.random.normal(0, noise_level, (size_patches, size_patches))
dataset.append([patch.reshape(int(size_patches**2)),
(patch+gauss).reshape(int(size_patches**2))])
num_files += 1
print(f"Dataset has size: {len(dataset)}\nNumber of pictures: {num_files}")
return random.sample(dataset, len(dataset)) # shuffle for better visualization
| 3,155 | 33.681319 | 86 |
py
|
TV-parameter-learning
|
TV-parameter-learning-master/train_constant_model.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2022 Kristian Bredies ([email protected])
# Enis Chenchene ([email protected])
# Alireza Hosseini ([email protected])
#
# This file is part of the example code repository for the paper:
#
# K. Bredies, E. Chenchene, A. Hosseini.
# A hybrid proximal generalized conditional gradient method and application
# to total variation parameter learning, 2022.
# Submitted to ECC23, within the EUCA Series of European Control Conferences,
# To be held in Bucharest, Romania, from June 13 to June 16, 2023.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
This file contains the training function for the constant model.
For details and references, see Section III.D in:
K. Bredies, E. Chenchene, A. Hosseini.
A hybrid proximal generalized conditional gradient method and
application to total variation parameter learning, 2022.
Submitted to ECC23, within the EUCA Series of European Control Conferences
To be held in Bucharest, Romania, from June 13 to June 16, 2023.
"""
import numpy as np
import structures as st
def D_func_cm(Sig, Sig_old, a, a_old, U, Dx, Dy, M1, M2, Fs, lam, n, N):
piece_1 = 0
piece_2 = 0
piece_1 = 1/N*np.sum(np.multiply(st.div_m(Sig_old, M1, M2)+Fs,
st.div_m(Sig_old, M1, M2)-st.div_m(Sig, M1, M2)))
piece_2 = np.mean(st.TV(U, Dx, Dy, n, N))*(a_old-a)
return piece_1+piece_2-lam/2*(a_old-a)**2
def step_size_cm(Sig, Sig_old, a, a_old, Du, lam, N):
'''
The Lipschitz constant can be estimated by 8/N.
'''
dist = np.sum(np.square(Sig-Sig_old))+(a-a_old)**2
return min(1, N*(Du + lam/2*(a-a_old)**2)/(8*dist))
def train_cm(dataset, lam, maxit):
print("\n *** Starting training constant model. Info displayed every 1500 iterations. *** ")
# structure
n = len(dataset[0][0])
size_patches = int(np.sqrt(n))
N = len(dataset) # number of patches
Dx = st.grad_x(size_patches)
Dy = st.grad_y(size_patches)
M1 = -Dx.T
M2 = -Dy.T
# true and noisy images as numpy array for speed
U = np.zeros((n, N))
Fs = np.zeros((n, N))
for k in range(N):
U[:, k] = dataset[k][0]
Fs[:, k] = dataset[k][1]
# initializing
Sig = np.zeros((n, N, 2))
a = 0
Du_list = []
D_u = 1
it = 0
while D_u > 1e-5 and it <= maxit:
a_old = a
Sig_old = np.copy(Sig)
# partial update on A
pars = st.TV(U, Dx, Dy, n, N)-st.TV(st.div_m(Sig, M1, M2)+Fs, Dx, Dy, n, N)
a = np.maximum(a-np.mean(pars)/lam, 0)
# partial update on Sig
Sig = st.grad_m(st.div_m(Sig, M1, M2)+Fs, Dx, Dy, n, N)
Norms = np.linalg.norm(Sig, axis=2)
Norms = np.repeat(Norms[:, :, np.newaxis], 2, axis=2)
Sig[Norms > 0] = np.divide(Sig[Norms > 0], Norms[Norms > 0]) # normalized
Sig = a*Sig
# step size
D_u = D_func_cm(Sig, Sig_old, a, a_old, U, Dx, Dy, M1, M2, Fs, lam, n, N)
theta = step_size_cm(Sig, Sig_old, a, a_old, D_u, lam, N)
Du_list.append(D_u)
# full update on A and Sig
a = a_old+theta*(a-a_old)
Sig = Sig_old+theta*(Sig-Sig_old)
it += 1
if it % 1500 == 5 and it >= 6:
print(f'\n ########## Iteration: {it}')
print(f'Parameter alpha = {a}')
print(f'step size = {theta}')
print(f'D(u^k) = {D_u}')
print(f'|a(k+1)-a(k)| = {np.abs(a-a_old)}')
print(f'\n ########## Final iteration: {it}')
print(f'Final parameter alpha = {a}')
print(f'Final residual D(u^k) = {D_u}')
return a, Sig, Du_list
| 4,346 | 32.438462 | 96 |
py
|
TV-parameter-learning
|
TV-parameter-learning-master/plots.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2022 Kristian Bredies ([email protected])
# Enis Chenchene ([email protected])
# Alireza Hosseini ([email protected])
#
# This file is part of the example code repository for the paper:
#
# K. Bredies, E. Chenchene, A. Hosseini.
# A hybrid proximal generalized conditional gradient method and application
# to total variation parameter learning, 2022.
# Submitted to ECC23, within the EUCA Series of European Control Conferences,
# To be held in Bucharest, Romania, from June 13 to June 16, 2023.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
This file contains useful functions to plot our numerical results.
For details and references, see Section III.D in:
K. Bredies, E. Chenchene, A. Hosseini.
A hybrid proximal generalized conditional gradient method and
application to total variation parameter learning, 2022.
Submitted to ECC23, within the EUCA Series of European Control Conferences
To be held in Bucharest, Romania, from June 13 to June 16, 2023.
"""
import random
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
import structures as st
def compare_results(Sig, dataset, num_plots):
n, N, _ = np.shape(Sig)
p = int(np.sqrt(n))
Dx = st.grad_x(p)
M1 = -Dx.T
Dy = st.grad_y(p)
M2 = -Dy.T
# plot num_plots random patches
indexes = random.sample(list(range(N)), num_plots)
rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 17})
rc('text', usetex=True)
# plot
_fig, axs = plt.subplots(num_plots, 3, figsize=(6, 10))
axs = np.atleast_2d(axs)
for i in range(num_plots):
axs[i, 0].imshow(dataset[indexes[i]][0].reshape((p,p)), cmap="gray", vmin=0, vmax=1.0)
if i == 0:
axs[i, 0].set_title('Ground-truth')
axs[i, 0].text(-4,15, f'patch {indexes[i]}', rotation='vertical')
axs[i, 0].axis('off')
axs[i, 1].imshow(dataset[indexes[i]][1].reshape((p,p)), cmap="gray", vmin=0, vmax=1.0)
if i == 0:
axs[i, 1].set_title('Noisy')
axs[i, 1].axis('off')
debiased = st.div(Sig[:, indexes[i], :], M1, M2)+dataset[indexes[i]][1]
axs[i, 2].imshow(debiased.reshape((p, p)), cmap="gray", vmin=0, vmax=1.0)
if i == 0:
axs[i, 2].set_title('Denoised')
axs[i, 2].axis('off')
plt.savefig("patches.pdf", bbox_inches='tight')
plt.show()
def plot_D(Du_list, Save=True):
rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 20})
rc('text', usetex=True)
plt.figure(figsize=(10, 5))
plt.semilogy(list(range(len(Du_list))), Du_list, linewidth=0.25, color='k')
plt.grid(True, which="both")
plt.xlim([0, len(Du_list)])
plt.xlabel(r"Iterations $(k)$")
plt.ylabel(r"$D(A^k,v^k)$")
if Save:
plt.savefig("du.pdf", bbox_inches='tight')
plt.show()
def plot_exp1(objects):
rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 17})
rc('text', usetex=True)
# plot
_fig, axs = plt.subplots(6, 4, figsize=(11, 16))
axs = np.atleast_2d(axs)
for (i, test) in enumerate(objects):
true_image = test[0]
noisy_image = test[1]
denoised_image = test[2]
params = test[3]
axs[i, 0].imshow(true_image, cmap="gray_r", vmin=0, vmax=1.0)
axs[i, 0].set_xticks([])
axs[i, 0].set_yticks([])
if i == 0:
axs[i, 0].set_title('Ground-truth')
axs[i, 1].imshow(noisy_image, cmap="gray_r", vmin=0, vmax=1.0)
axs[i, 1].set_xticks([])
axs[i, 1].set_yticks([])
if i == 0:
axs[i, 1].set_title('Noisy')
axs[i, 2].imshow(denoised_image, cmap="gray_r", vmin=0, vmax=1.0)
axs[i, 2].set_xticks([])
axs[i, 2].set_yticks([])
if i == 0:
axs[i, 2].set_title('Denoised')
pars_img = axs[i, 3].imshow(params, cmap="hot")
axs[i, 3].set_xticks([])
axs[i, 3].set_yticks([])
im_ratio = params.shape[0]/params.shape[1]
plt.colorbar(pars_img, ax=axs[i, 3], fraction=0.046*im_ratio, pad=0.04)
if i == 0:
axs[i, 3].set_title("Predicted parameters")
plt.savefig("experiment1_comparisons.pdf", bbox_inches='tight')
plt.savefig("experiment1_comparisons.png", bbox_inches='tight')
plt.show()
| 5,081 | 32.88 | 94 |
py
|
TV-parameter-learning
|
TV-parameter-learning-master/denoising.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2022 Kristian Bredies ([email protected])
# Enis Chenchene ([email protected])
# Alireza Hosseini ([email protected])
#
# This file is part of the example code repository for the paper:
#
# K. Bredies, E. Chenchene, A. Hosseini.
# A hybrid proximal generalized conditional gradient method and application
# to total variation parameter learning, 2022.
# Submitted to ECC23, within the EUCA Series of European Control Conferences,
# To be held in Bucharest, Romania, from June 13 to June 16, 2023.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
This file contains the functions for TV denoising of a single patch "denoise" and
of a figure divided into several patches "denoise_large".
For details and references, see Section III.D in:
K. Bredies, E. Chenchene, A. Hosseini.
A hybrid proximal generalized conditional gradient method and
application to total variation parameter learning, 2022.
Submitted to ECC23, within the EUCA Series of European Control Conferences
To be held in Bucharest, Romania, from June 13 to June 16, 2023.
"""
import numpy as np
import structures as st
def denoise(f, par, maxit=100000, show_details=False):
'''
Implements primal-dual for TV-denoising with parameter "par", "f" being the noisy image.
'''
# structure
n = len(f)
p = int(np.sqrt(n))
Dx = st.grad_x(p)
Dy = st.grad_y(p)
M1 = -Dx.T
M2 = -Dy.T
# initialize
psi = np.zeros(n)
sig = np.zeros((n, 2))
# parameters
tau = 1/np.sqrt(8)
mu = 1/np.sqrt(8)
res = 1
it = 1
while res > 1e-7 and it < maxit:
psi_old = np.copy(psi)
psi = st.prox_fidelity(tau/par, psi+tau*st.div(sig, M1, M2), f)
sig_old = np.copy(sig)
sig = st.proj_12(sig+mu*st.grad(2*psi-psi_old, Dx, Dy, n))
# residual
res_p = np.linalg.norm( (psi_old-psi)/tau + st.div(sig_old-sig, M1, M2))**2
res_d = np.linalg.norm( (sig_old-sig)/mu - st.grad(psi_old-psi, Dx, Dy, n))**2
res = res_p+res_d
it += 1
if show_details:
if it % 10000 == 5 and it > 10000:
print(f"##### iteration: {it}")
print(f"primal residual: {res_p}")
print(f"dual residual: {res_d}")
print(f"residual: {res_p+res_d}")
if it == maxit:
warning = 1
else:
warning = 0
return psi, warning
def denoise_large(true_image, noisy_image, A, size_patches):
'''
Splits "noisy_image" into several patches and denoises every patch individually.
'''
px, py = np.shape(noisy_image)
tot_patches = int(px*py/size_patches**2)
print(f"Splitting into {tot_patches} patches")
denoised_image = np.zeros((px, py))
params = np.zeros((int(px/size_patches), int(py/size_patches)))
done = 0
for i in np.arange(0, px, size_patches):
for j in np.arange(0, py, size_patches):
# extract patch
noisy_patch = noisy_image[i:i+size_patches, j:j+size_patches]
noisy_patch_vect = noisy_patch.reshape(size_patches**2)
# compute best parameter
noisy_patch_vect_b = np.append(noisy_patch_vect, 1) # add bias
best_par = noisy_patch_vect_b.T@A@noisy_patch_vect_b
# debias with learned parameter
denoised_patch, warning = denoise(noisy_patch_vect, best_par)
if warning == 1:
print(f"Warning: denoising in patch {(i,j)} failed.")
denoised_image[i:i+size_patches, j:j+size_patches] = \
denoised_patch.reshape((size_patches,size_patches))
params[int(i/size_patches), int(j/size_patches)] = best_par
done += 1
if done % 50 == 0:
print(f"Done {done} patches. Still {tot_patches-done} remain...")
return denoised_image, params
| 4,600 | 33.081481 | 92 |
py
|
daanet
|
daanet-master/app.py
|
import sys
from gpu_env import DEVICE_ID, MODEL_ID, CONFIG_SET
from utils.helper import set_logger, parse_args, get_args_cli
def run():
set_logger(model_id='%s:%s' % (DEVICE_ID, MODEL_ID))
followup_args = get_args_cli(sys.argv[3:]) if len(sys.argv) > 3 else None
args = parse_args(sys.argv[2] if len(sys.argv) > 2 else None, MODEL_ID, CONFIG_SET, followup_args)
getattr(__import__('api'), sys.argv[1])(args)
if __name__ == '__main__':
run()
| 466 | 28.1875 | 102 |
py
|
daanet
|
daanet-master/api.py
|
import logging
from tensorflow.python.framework.errors_impl import NotFoundError, InvalidArgumentError
from gpu_env import ModeKeys, APP_NAME
from utils.helper import build_model
logger = logging.getLogger(APP_NAME)
def train(args):
# check run_mode
if 'run_mode' in args:
args.set_hparam('run_mode', ModeKeys.TRAIN.value)
model = build_model(args)
try:
model.restore(use_ema=False, use_partial_loader=False)
model.reset() # for continous training, we reset some layers to random if necessary
except (NotFoundError, InvalidArgumentError) as e:
logger.debug(e)
logger.info('no available model, will train from scratch!')
model.train()
def evaluate(args):
model = build_model(args)
model.restore()
return model.evaluate()
def demo(args):
args.is_serving = True # set it to true to ignore data set loading
model = build_model(args)
model.restore()
sample_context = ''
sample_questions = ['What was Maria Curie the first female recipient of?',
'What year was Casimir Pulaski born in Warsaw?',
'Who was one of the most famous people born in Warsaw?',
'Who was Frédéric Chopin?',
'How old was Chopin when he moved to Warsaw with his family?']
sample_answers = ['Nobel Prize',
'1745',
'Maria Skłodowska-Curie',
'Famous musicians',
'seven months old']
for q, g in zip(sample_questions, sample_answers):
a = model.predict(sample_context, q) # real work is here!
logger.info('QUESTION: %s' % q)
logger.info('ANSWER: %s <- GOLD: %s' % (a, g))
| 1,758 | 32.826923 | 92 |
py
|
daanet
|
daanet-master/grid_search.py
|
import itertools
import os
import sys
from ruamel.yaml import YAML
from utils.helper import set_logger, fill_gpu_jobs, get_tmp_yaml
def run():
logger = set_logger()
with open('grid.yaml') as fp:
settings = YAML().load(fp)
test_set = sys.argv[1:] if len(sys.argv) > 1 else settings['common']['config']
all_args = [settings[t] for t in test_set]
entrypoint = settings['common']['entrypoint']
with open('default.yaml') as fp:
settings_default = YAML().load(fp)
os.environ['suffix_model_id'] = settings_default['default']['suffix_model_id']
cmd = ' '.join(['python app.py', entrypoint, '%s'])
all_jobs = []
for all_arg in all_args:
k, v = zip(*[(k, v) for k, v in all_arg.items()])
all_jobs += [{kk: pp for kk, pp in zip(k, p)} for p in itertools.product(*v)]
while all_jobs:
all_jobs = fill_gpu_jobs(all_jobs, logger,
job_parser=lambda x: cmd % get_tmp_yaml(x,
(os.environ['suffix_model_id'] if
os.environ['suffix_model_id'] else
'+'.join(test_set)) + '-'),
wait_until_next=settings['common']['wait_until_next'],
retry_delay=settings['common']['retry_delay'],
do_shuffle=True)
logger.info('all jobs are done!')
if __name__ == '__main__':
run()
| 1,612 | 36.511628 | 108 |
py
|
daanet
|
daanet-master/gpu_env.py
|
import os
from datetime import datetime
from enum import Enum
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
IGNORE_PATTERNS = ('data', '*.pyc', 'CVS', '.git', 'tmp', '.svn', '__pycache__', '.gitignore', '.*.yaml')
MODEL_ID = datetime.now().strftime("%m%d-%H%M%S") + (
os.environ['suffix_model_id'] if 'suffix_model_id' in os.environ else '')
APP_NAME = 'mrc'
class SummaryType(Enum):
SCALAR = 1
HISTOGRAM = 2
SAMPLED = 3
class ModeKeys(Enum):
TRAIN = 1
EVAL = 2
INFER = 3
INTERACT = 4
DECODE = 5
TEST = 6
try:
import GPUtil
# GPUtil.showUtilization()
DEVICE_ID_LIST = GPUtil.getFirstAvailable(order='random', maxMemory=0.1, maxLoad=0.1)
DEVICE_ID = DEVICE_ID_LIST[0] # grab first element from list
os.environ["CUDA_VISIBLE_DEVICES"] = str(DEVICE_ID)
CONFIG_SET = 'default_gpu'
except FileNotFoundError:
print('no gpu found!')
DEVICE_ID = 'x'
CONFIG_SET = 'default'
except RuntimeError:
print('all gpus are occupied!')
DEVICE_ID = '?'
CONFIG_SET = 'default_gpu'
print('use config: %s' % CONFIG_SET)
| 1,139 | 23.255319 | 105 |
py
|
daanet
|
daanet-master/nlp/match_blocks.py
|
import tensorflow as tf
from nlp.nn import linear_logit, layer_norm
from nlp.seq2seq.common import dropout, softmax_mask
def Attention_match(context, query, context_mask, query_mask,
num_units=None,
scope='attention_match_block', reuse=None, **kwargs):
with tf.variable_scope(scope, reuse=reuse):
if num_units is None:
num_units = context.get_shape().as_list()[-1]
score = tf.matmul(context, query, transpose_b=True)
else:
score = tf.matmul(linear_logit(context, num_units, scope='context2hidden'),
linear_logit(query, num_units, scope='query2hidden'),
transpose_b=True)
mask = tf.matmul(tf.expand_dims(context_mask, -1), tf.expand_dims(query_mask, -1), transpose_b=True)
paddings = tf.ones_like(mask) * (-2 ** 32 + 1)
masked_score = tf.where(tf.equal(mask, 0), paddings, score / (num_units ** 0.5)) # B, Lc, Lq
query2context_score = tf.reduce_sum(masked_score, axis=2, keepdims=True) # B, Lc, 1
match_score = tf.nn.softmax(query2context_score, axis=1) # (B, Lc, 1)
return context * match_score
def Transformer_match(context,
query,
context_mask,
query_mask,
num_units=None,
num_heads=8,
dropout_keep_rate=1.0,
causality=True,
scope='MultiHead_Attention_Block',
reuse=None,
residual=False,
normalize_output=True,
**kwargs):
"""Applies multihead attention.
Args:
context: A 3d tensor with shape of [N, T_q, C_q].
query: A 3d tensor with shape of [N, T_k, C_k].
num_units: A scalar. Attention size.
dropout_rate: A floating point number.
is_training: Boolean. Controller of mechanism for dropout.
causality: Boolean. If true, units that reference the future are masked.
num_heads: An int. Number of heads.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns
A 3d tensor with shape of (N, T_q, C)
"""
if num_units is None or residual:
num_units = context.get_shape().as_list()[-1]
with tf.variable_scope(scope, reuse=reuse):
# Set the fall back option for num_units
# Linear projections
Q = tf.layers.dense(context, num_units, activation=tf.nn.relu) # (N, T_q, C)
K = tf.layers.dense(query, num_units, activation=tf.nn.relu) # (N, T_k, C)
V = tf.layers.dense(query, num_units, activation=tf.nn.relu) # (N, T_k, C)
# Split and concat
Q_ = tf.concat(tf.split(Q, num_heads, axis=2), axis=0) # (h*N, T_q, C/h)
K_ = tf.concat(tf.split(K, num_heads, axis=2), axis=0) # (h*N, T_k, C/h)
V_ = tf.concat(tf.split(V, num_heads, axis=2), axis=0) # (h*N, T_k, C/h)
# Multiplication
outputs = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1])) # (h*N, T_q, T_k)
# Scale
outputs = outputs / (K_.get_shape().as_list()[-1] ** 0.5)
# Key Masking, aka query
mask1 = tf.tile(query_mask, [num_heads, 1]) # (h*N, T_k)
mask1 = tf.tile(tf.expand_dims(mask1, 1), [1, tf.shape(context)[1], 1]) # (h*N, T_q, T_k)
paddings = tf.ones_like(outputs) * (-2 ** 32 + 1)
outputs = tf.where(tf.equal(mask1, 0), paddings, outputs) # (h*N, T_q, T_k)
# Causality = Future blinding
if causality:
diag_vals = tf.ones_like(outputs[0, :, :]) # (T_q, T_k)
tril = tf.contrib.linalg.LinearOperatorLowerTriangular(diag_vals).to_dense() # (T_q, T_k)
masks = tf.tile(tf.expand_dims(tril, 0), [tf.shape(outputs)[0], 1, 1]) # (h*N, T_q, T_k)
paddings = tf.ones_like(masks) * (-2 ** 32 + 1)
outputs = tf.where(tf.equal(masks, 0), paddings, outputs) # (h*N, T_q, T_k)
# Activation
outputs = tf.nn.softmax(outputs) # (h*N, T_q, T_k)
# Query Masking aka, context
mask2 = tf.tile(context_mask, [num_heads, 1]) # (h*N, T_q)
mask2 = tf.tile(tf.expand_dims(mask2, -1), [1, 1, tf.shape(query)[1]]) # (h*N, T_q, T_k)
outputs *= mask2 # (h*N, T_q, T_k)
# Dropouts
outputs = tf.nn.dropout(outputs, keep_prob=dropout_keep_rate)
# Weighted sum
outputs = tf.matmul(outputs, V_) # ( h*N, T_q, C/h)
# Restore shape
outputs = tf.concat(tf.split(outputs, num_heads, axis=0), axis=2) # (N, T_q, C)
if residual:
# Residual connection
outputs += context
if normalize_output:
# Normalize
outputs = layer_norm(outputs) # (N, T_q, C)
return outputs
def BiDaf_match(a, b, a_mask, b_mask, residual, scope=None, reuse=None, **kwargs):
# context: [batch, l, d]
# question: [batch, l2, d]
with tf.variable_scope(scope, reuse=reuse):
n_a = tf.tile(tf.expand_dims(a, 2), [1, 1, tf.shape(b)[1], 1])
n_b = tf.tile(tf.expand_dims(b, 1), [1, tf.shape(a)[1], 1, 1])
n_ab = n_a * n_b
n_abab = tf.concat([n_ab, n_a, n_b], -1)
kernel = tf.squeeze(tf.layers.dense(n_abab, units=1), -1)
a_mask = tf.expand_dims(a_mask, -1)
b_mask = tf.expand_dims(b_mask, -1)
kernel_mask = tf.matmul(a_mask, b_mask, transpose_b=True)
kernel += (kernel_mask - 1) * 1e5
con_query = tf.matmul(tf.nn.softmax(kernel, 1), b)
con_query = con_query * a_mask
query_con = tf.matmul(tf.transpose(
tf.reduce_max(kernel, 2, keepdims=True), [0, 2, 1]), a * a_mask)
query_con = tf.tile(query_con, [1, tf.shape(a)[1], 1])
if residual:
return tf.concat([a * query_con, a * con_query, a, query_con], 2)
else:
return tf.concat([a * query_con, a * con_query, a, query_con], 2)
def dot_attention(inputs, memory, mask, hidden_size, keep_prob=1.0, is_train=None, scope=None):
with tf.variable_scope(scope or 'dot_attention'):
d_inputs = dropout(inputs, keep_prob=keep_prob, is_train=is_train)
d_memory = dropout(memory, keep_prob=keep_prob, is_train=is_train)
JX = tf.shape(inputs)[1]
with tf.variable_scope("attention"):
inputs_ = tf.nn.relu(
tf.layers.dense(d_inputs, hidden_size, use_bias=False, name="inputs"))
memory_ = tf.nn.relu(
tf.layers.dense(d_memory, hidden_size, use_bias=False, name="memory"))
outputs = tf.matmul(inputs_, tf.transpose(
memory_, [0, 2, 1])) / (hidden_size ** 0.5)
mask = tf.tile(tf.expand_dims(mask, axis=1), [1, JX, 1])
logits = tf.nn.softmax(softmax_mask(outputs, mask))
outputs = tf.matmul(logits, memory)
res = tf.concat([inputs, outputs], axis=2)
with tf.variable_scope("gate"):
dim = res.get_shape().as_list()[-1]
d_res = dropout(res, keep_prob=keep_prob, is_train=is_train)
gate = tf.nn.sigmoid(tf.layers.dense(d_res, dim, use_bias=False))
return res * gate
| 7,338 | 41.421965 | 108 |
py
|
daanet
|
daanet-master/nlp/nn.py
|
import tensorflow as tf
initializer = tf.contrib.layers.variance_scaling_initializer(factor=1.0,
mode='FAN_AVG',
uniform=True,
dtype=tf.float32)
initializer_relu = tf.contrib.layers.variance_scaling_initializer(factor=2.0,
mode='FAN_IN',
uniform=False,
dtype=tf.float32)
regularizer = tf.contrib.layers.l2_regularizer(scale=3e-7)
def minus_mask(x, mask, offset=1e30):
"""
masking by subtract a very large number
:param x: sequence data in the shape of [B, L, D]
:param mask: 0-1 mask in the shape of [B, L]
:param offset: very large negative number
:return: masked x
"""
return x - tf.expand_dims(1.0 - mask, axis=-1) * offset
def mul_mask(x, mask):
"""
masking by multiply zero
:param x: sequence data in the shape of [B, L, D]
:param mask: 0-1 mask in the shape of [B, L]
:return: masked x
"""
return x * tf.expand_dims(mask, axis=-1)
def masked_reduce_mean(x, mask):
return tf.reduce_sum(mul_mask(x, mask), axis=1) / tf.reduce_sum(mask, axis=1, keepdims=True)
def masked_reduce_max(x, mask):
return tf.reduce_max(minus_mask(x, mask), axis=1)
def weighted_sparse_softmax_cross_entropy(labels, preds, weights):
"""
computing sparse softmax cross entropy by weighting differently on classes
:param labels: sparse label in the shape of [B], size of label is L
:param preds: logit in the shape of [B, L]
:param weights: weight in the shape of [L]
:return: weighted sparse softmax cross entropy in the shape of [B]
"""
return tf.losses.sparse_softmax_cross_entropy(labels,
logits=preds,
weights=get_bounded_class_weight(labels, weights))
def get_bounded_class_weight(labels, weights, ub=None):
if weights is None:
return 1.0
else:
w = tf.gather(weights, labels)
w = w / tf.reduce_min(w)
w = tf.clip_by_value(1.0 + tf.log1p(w),
clip_value_min=1.0,
clip_value_max=ub if ub is not None else tf.cast(tf.shape(weights)[0], tf.float32) / 2.0)
return w
def weighted_smooth_softmax_cross_entropy(labels, num_labels, preds, weights,
epsilon=0.1):
"""
computing smoothed softmax cross entropy by weighting differently on classes
:param epsilon: smoothing factor
:param num_labels: maximum number of labels
:param labels: sparse label in the shape of [B], size of label is L
:param preds: logit in the shape of [B, L]
:param weights: weight in the shape of [L]
:return: weighted sparse softmax cross entropy in the shape of [B]
"""
return tf.losses.softmax_cross_entropy(tf.one_hot(labels, num_labels),
logits=preds,
label_smoothing=epsilon,
weights=get_bounded_class_weight(labels, weights))
def get_var(name, shape, dtype=tf.float32,
initializer_fn=initializer,
regularizer_fn=regularizer, **kwargs):
return tf.get_variable(name, shape,
initializer=initializer_fn,
dtype=dtype,
regularizer=regularizer_fn, **kwargs)
def layer_norm(inputs,
epsilon=1e-8,
scope=None,
reuse=None):
"""Applies layer normalization.
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`.
epsilon: A floating number. A very small number for preventing ZeroDivision Error.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A tensor with the same shape and data dtype as `inputs`.
"""
with tf.variable_scope(scope or 'Layer_Normalize', reuse=reuse):
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)
beta = tf.get_variable("beta", shape=params_shape, initializer=tf.constant_initializer(0.0))
gamma = tf.get_variable("gama", shape=params_shape, initializer=tf.constant_initializer(1.0))
normalized = (inputs - mean) / ((variance + epsilon) ** .5)
outputs = gamma * normalized + beta
return outputs
def linear_logit(x, units, act_fn=None, dropout_keep=1., use_layer_norm=False, scope=None, reuse=None, **kwargs):
with tf.variable_scope(scope or 'linear_logit', reuse=reuse):
logit = tf.layers.dense(x, units=units, activation=act_fn,
kernel_initializer=initializer,
kernel_regularizer=regularizer)
# do dropout
logit = tf.nn.dropout(logit, keep_prob=dropout_keep)
if use_layer_norm:
logit = tf.contrib.layers.layer_norm(logit)
return logit
def bilinear_logit(x, units, act_fn=None,
first_units=256,
first_act_fn=tf.nn.relu, scope=None, **kwargs):
with tf.variable_scope(scope or 'bilinear_logit'):
first = linear_logit(x, first_units, act_fn=first_act_fn, scope='first', **kwargs)
return linear_logit(first, units, scope='second', act_fn=act_fn, **kwargs)
def label_smoothing(inputs, epsilon=0.1):
"""Applies label smoothing. See https://arxiv.org/abs/1512.00567.
Args:
inputs: A 3d tensor with shape of [N, T, V], where V is the number of vocabulary.
epsilon: Smoothing rate.
For example,
```
import tensorflow as tf
inputs = tf.convert_to_tensor([[[0, 0, 1],
[0, 1, 0],
[1, 0, 0]],
[[1, 0, 0],
[1, 0, 0],
[0, 1, 0]]], tf.float32)
outputs = label_smoothing(inputs)
with tf.Session() as sess:
print(sess.run([outputs]))
>>
[array([[[ 0.03333334, 0.03333334, 0.93333334],
[ 0.03333334, 0.93333334, 0.03333334],
[ 0.93333334, 0.03333334, 0.03333334]],
[[ 0.93333334, 0.03333334, 0.03333334],
[ 0.93333334, 0.03333334, 0.03333334],
[ 0.03333334, 0.93333334, 0.03333334]]], dtype=float32)]
```
"""
K = inputs.get_shape().as_list()[-1] # number of channels
return ((1 - epsilon) * inputs) + (epsilon / K)
def normalize_by_axis(x, axis, smooth_factor=1e-5):
x += smooth_factor
return x / tf.reduce_sum(x, axis, keepdims=True) # num A x num B
def get_cross_correlated_mat(num_out_A, num_out_B, learn_cooc='FIXED', cooc_AB=None, scope=None, reuse=None):
with tf.variable_scope(scope or 'CrossCorrlated_Mat', reuse=reuse):
if learn_cooc == 'FIXED' and cooc_AB is not None:
pB_given_A = normalize_by_axis(cooc_AB, 1)
pA_given_B = normalize_by_axis(cooc_AB, 0)
elif learn_cooc == 'JOINT':
share_cooc = tf.nn.relu(get_var('cooc_ab', shape=[num_out_A, num_out_B]))
pB_given_A = normalize_by_axis(share_cooc, 1)
pA_given_B = normalize_by_axis(share_cooc, 0)
elif learn_cooc == 'DISJOINT':
cooc1 = tf.nn.relu(get_var('pb_given_a', shape=[num_out_A, num_out_B]))
cooc2 = tf.nn.relu(get_var('pa_given_b', shape=[num_out_A, num_out_B]))
pB_given_A = normalize_by_axis(cooc1, 1)
pA_given_B = normalize_by_axis(cooc2, 0)
else:
raise NotImplementedError
return pA_given_B, pB_given_A
def get_self_correlated_mat(num_out_A, scope=None, reuse=None):
with tf.variable_scope(scope or 'Self_Correlated_mat', reuse=reuse):
cooc1 = get_var('pa_corr', shape=[num_out_A, num_out_A],
initializer_fn=tf.contrib.layers.variance_scaling_initializer(factor=0.1,
mode='FAN_AVG',
uniform=True,
dtype=tf.float32),
regularizer_fn=tf.contrib.layers.l2_regularizer(scale=3e-4))
return tf.matmul(cooc1, cooc1, transpose_b=True) + tf.eye(num_out_A)
def gate_filter(x, scope=None, reuse=None):
with tf.variable_scope(scope or 'Gate', reuse=reuse):
threshold = get_var('threshold', shape=[])
gate = tf.cast(tf.greater(x, threshold), tf.float32)
return x * gate
from tensorflow.python.ops import array_ops
def focal_loss2(onehot_labels, prediction_tensor, alpha=0.25, gamma=2, ):
y_ = tf.cast(onehot_labels, dtype=tf.float32)
sigmoid_p = tf.nn.sigmoid(prediction_tensor)
zeros = array_ops.zeros_like(sigmoid_p, dtype=sigmoid_p.dtype)
# For poitive prediction, only need consider front part loss, back part is 0;
# target_tensor > zeros <=> z=1, so poitive coefficient = z - p.
pos_p_sub = array_ops.where(y_ > zeros, y_ - sigmoid_p, zeros)
# For negative prediction, only need consider back part loss, front part is 0;
# target_tensor > zeros <=> z=1, so negative coefficient = 0.
neg_p_sub = array_ops.where(y_ > zeros, zeros, sigmoid_p)
# per_entry_cross_ent = - alpha * (pos_p_sub ** gamma) * tf.log(tf.clip_by_value(sigmoid_p, 1e-8, 1.0)) \
# - (1 - alpha) * (neg_p_sub ** gamma) * tf.log(tf.clip_by_value(1.0 - sigmoid_p, 1e-8, 1.0))
per_entry_cross_ent = - (pos_p_sub ** gamma) * tf.log(tf.clip_by_value(sigmoid_p, 1e-8, 1.0)) - (
neg_p_sub ** gamma) * tf.log(tf.clip_by_value(1.0 - sigmoid_p, 1e-8, 1.0))
dn, dp = tf.dynamic_partition(per_entry_cross_ent, tf.cast(y_, tf.int32), 2)
return tf.reduce_sum(dp), tf.reduce_sum(dn), tf.reduce_sum(per_entry_cross_ent)
def focal_loss(prediction_tensor, target_tensor, weights=None, alpha=0.25, gamma=2):
r"""Compute focal loss for predictions.
Multi-labels Focal loss formula:
FL = -alpha * (z-p)^gamma * log(p) -(1-alpha) * p^gamma * log(1-p)
,which alpha = 0.25, gamma = 2, p = sigmoid(x), z = target_tensor.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets
weights: A float tensor of shape [batch_size, num_anchors]
alpha: A scalar tensor for focal loss alpha hyper-parameter
gamma: A scalar tensor for focal loss gamma hyper-parameter
Returns:
loss: A (scalar) tensor representing the value of the loss function
"""
sigmoid_p = tf.nn.sigmoid(prediction_tensor)
zeros = array_ops.zeros_like(sigmoid_p, dtype=sigmoid_p.dtype)
# For poitive prediction, only need consider front part loss, back part is 0;
# target_tensor > zeros <=> z=1, so poitive coefficient = z - p.
pos_p_sub = array_ops.where(target_tensor > zeros, target_tensor - sigmoid_p, zeros)
# For negative prediction, only need consider back part loss, front part is 0;
# target_tensor > zeros <=> z=1, so negative coefficient = 0.
neg_p_sub = array_ops.where(target_tensor > zeros, zeros, sigmoid_p)
per_entry_cross_ent = - alpha * (pos_p_sub ** gamma) * tf.log(tf.clip_by_value(sigmoid_p, 1e-8, 1.0)) \
- (1 - alpha) * (neg_p_sub ** gamma) * tf.log(tf.clip_by_value(1.0 - sigmoid_p, 1e-8, 1.0))
return tf.reduce_sum(per_entry_cross_ent)
def spatial_dropout(x, scope=None, reuse=None):
input_dim = x.get_shape().as_list()[-1]
with tf.variable_scope(scope or 'spatial_dropout', reuse=reuse):
d = tf.random_uniform(shape=[1], minval=0, maxval=input_dim, dtype=tf.int32)
f = tf.one_hot(d, on_value=0., off_value=1., depth=input_dim)
g = x * f # do dropout
g *= (1. + 1. / input_dim) # do rescale
return g
def get_last_output(output, seq_length, scope=None, reuse=None):
"""Get the last value of the returned output of an RNN.
http://disq.us/p/1gjkgdr
output: [batch x number of steps x ... ] Output of the dynamic lstm.
sequence_length: [batch] Length of each of the sequence.
"""
with tf.variable_scope(scope or 'gather_nd', reuse=reuse):
rng = tf.range(0, tf.shape(seq_length)[0])
indexes = tf.stack([rng, seq_length - 1], 1)
return tf.gather_nd(output, indexes)
def get_lstm_init_state(batch_size, num_layers, num_units, direction, scope=None, reuse=None, **kwargs):
with tf.variable_scope(scope or 'lstm_init_state', reuse=reuse):
num_dir = 2 if direction.startswith('bi') else 1
c = get_var('lstm_init_c', shape=[num_layers * num_dir, num_units])
c = tf.tile(tf.expand_dims(c, axis=1), [1, batch_size, 1])
h = get_var('lstm_init_h', shape=[num_layers * num_dir, num_units])
h = tf.tile(tf.expand_dims(h, axis=1), [1, batch_size, 1])
return c, h
def highway_layer(x, scope=None, reuse=None, **kwargs):
with tf.variable_scope(scope or "highway_layer", reuse=reuse):
d = x.get_shape()[-1]
trans = linear_logit(x, d, scope='trans', reuse=reuse)
trans = tf.nn.relu(trans)
gate = linear_logit(x, d, scope='gate', reuse=reuse)
gate = tf.nn.sigmoid(gate)
out = gate * trans + (1 - gate) * x
return out
def highway_network(x, num_layers, scope=None, reuse=None, **kwargs):
with tf.variable_scope(scope or "highway_network", reuse=reuse):
prev = x
cur = None
for layer_idx in range(num_layers):
cur = highway_layer(prev, scope="layer_{}".format(layer_idx), reuse=reuse)
prev = cur
return cur
| 14,303 | 42.345455 | 119 |
py
|
daanet
|
daanet-master/nlp/__init__.py
| 0 | 0 | 0 |
py
|
|
daanet
|
daanet-master/nlp/encode_blocks.py
|
import tensorflow as tf
from nlp.nn import initializer, regularizer, spatial_dropout, get_lstm_init_state, layer_norm
def LSTM_encode(seqs, scope='lstm_encode_block', reuse=None, **kwargs):
with tf.variable_scope(scope, reuse=reuse):
batch_size = tf.shape(seqs)[0]
_seqs = tf.transpose(seqs, [1, 0, 2]) # to T, B, D
lstm = tf.contrib.cudnn_rnn.CudnnLSTM(**kwargs)
init_state = get_lstm_init_state(batch_size, **kwargs)
output = lstm(_seqs, init_state)[0] # 2nd return is state, ignore
return tf.transpose(output, [1, 0, 2]) # back to B, T, D
def TCN_encode(seqs, num_layers, normalize_output=True, scope='tcn_encode_block', reuse=None,
layer_norm_scope='layer_norm', **kwargs):
with tf.variable_scope(scope, reuse=reuse):
outputs = [seqs]
for i in range(num_layers):
dilation_size = 2 ** i
out = Res_DualCNN_encode(outputs[-1], dilation=dilation_size, scope='res_biconv_%d' % i, **kwargs)
outputs.append(out)
result = outputs[-1]
if normalize_output:
result = layer_norm(result, scope=layer_norm_scope, reuse=reuse)
return result
def Res_DualCNN_encode(seqs, use_spatial_dropout=True, scope='res_biconv_block', reuse=None, **kwargs):
input_dim = seqs.get_shape().as_list()[-1]
with tf.variable_scope(scope, reuse=reuse):
out1 = CNN_encode(seqs, scope='first_conv1d', **kwargs)
if use_spatial_dropout:
out1 = spatial_dropout(out1)
out2 = CNN_encode(out1, scope='second_conv1d', **kwargs)
if use_spatial_dropout:
out2 = CNN_encode(out2)
output_dim = out2.get_shape().as_list()[-1]
if input_dim != output_dim:
res_x = tf.layers.conv1d(seqs,
filters=output_dim,
kernel_size=1,
activation=None,
name='res_1x1conv')
else:
res_x = seqs
return tf.nn.relu(out2 + res_x)
def CNN_encode(seqs, filter_size=3, dilation=1,
num_filters=None, direction='forward', act_fn=tf.nn.relu,
scope=None,
reuse=None, **kwargs):
input_dim = seqs.get_shape().as_list()[-1]
num_filters = num_filters if num_filters else input_dim
# add causality: shift the whole seq to the right
padding = (filter_size - 1) * dilation
if direction == 'forward':
pad_seqs = tf.pad(seqs, [[0, 0], [padding, 0], [0, 0]])
padding_scheme = 'VALID'
elif direction == 'backward':
pad_seqs = tf.pad(seqs, [[0, 0], [0, padding], [0, 0]])
padding_scheme = 'VALID'
elif direction == 'none':
pad_seqs = seqs # no padding, must set to SAME so that we have same length
padding_scheme = 'SAME'
else:
raise NotImplementedError
with tf.variable_scope(scope or 'causal_conv_%s_%s' % (filter_size, direction), reuse=reuse):
return tf.layers.conv1d(
pad_seqs,
num_filters,
filter_size,
activation=act_fn,
padding=padding_scheme,
dilation_rate=dilation,
kernel_initializer=initializer,
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=regularizer)
| 3,388 | 39.831325 | 110 |
py
|
daanet
|
daanet-master/nlp/seq2seq/pointer_generator.py
|
import tensorflow as tf
from tensorflow.contrib.seq2seq.python.ops.attention_wrapper import _compute_attention
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
# from tensorflow.contrib.rnn.python.ops.core_rnn_cell import _linear
from tensorflow.python.ops.rnn_cell_impl import _zero_state_tensors
from tensorflow.python.util import nest
class PointerGeneratorGreedyEmbeddingHelper(tf.contrib.seq2seq.GreedyEmbeddingHelper):
def __init__(self, embeddings, start_tokens, end_token, unk_token):
super(PointerGeneratorGreedyEmbeddingHelper, self).__init__(embeddings, start_tokens, end_token)
self.vocab_size = tf.shape(embeddings)[-1]
self.unk_token = unk_token
def sample(self, time, outputs, state, name=None):
"""sample for PointerGeneratorGreedyEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, use argmax to get the most probable id
if not isinstance(outputs, ops.Tensor):
raise TypeError("Expected outputs to be a single Tensor, got: %s" %
type(outputs))
sample_ids = tf.argmax(outputs, axis=-1, output_type=tf.int32)
return sample_ids
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""next_inputs_fn for GreedyEmbeddingHelper."""
del time, outputs # unused by next_inputs_fn
finished = tf.equal(sample_ids, self._end_token)
all_finished = tf.reduce_all(finished)
# since we have OOV words, we need change these words to UNK
condition = tf.less(sample_ids, self.vocab_size)
sample_ids = tf.where(condition, sample_ids, tf.ones_like(sample_ids) * self.unk_token)
next_inputs = tf.cond(
all_finished,
# If we're finished, the next_inputs value doesn't matter
lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
return (finished, next_inputs, state)
class PointerGeneratorDecoder(tf.contrib.seq2seq.BasicDecoder):
"""Pointer Generator sampling decoder."""
def __init__(self, source_extend_tokens, source_oov_words, coverage, cell, helper, initial_state,
output_layer=None, multi_rnn=False):
self.source_oov_words = source_oov_words
self.source_extend_tokens = source_extend_tokens
self.coverage = coverage
self.multi_rnn = multi_rnn
self.history_inputs = None
super(PointerGeneratorDecoder, self).__init__(cell, helper, initial_state, output_layer)
@property
def output_size(self):
# Return the cell output and the id
return tf.contrib.seq2seq.BasicDecoderOutput(
rnn_output=self._rnn_output_size() + self.source_oov_words,
sample_id=self._helper.sample_ids_shape)
@property
def output_dtype(self):
# Assume the dtype of the cell is the output_size structure
# containing the input_state's first component's dtype.
# Return that structure and the sample_ids_dtype from the helper.
dtype = nest.flatten(self._initial_state)[0].dtype
return tf.contrib.seq2seq.BasicDecoderOutput(
nest.map_structure(lambda _: dtype, self._rnn_output_size() + self.source_oov_words),
self._helper.sample_ids_dtype)
def step(self, time, inputs, state, name=None):
"""Perform a decoding step.
Args:
time: scalar `int32` tensor.
inputs: A (structure of) input tensors.
state: A (structure of) state tensors and TensorArrays.
name: Name scope for any created operations.
Returns:
`(outputs, next_state, next_inputs, finished)`.
"""
with ops.name_scope(name, "PGDecoderStep", (time, inputs, state)):
if self.history_inputs is None:
self.history_inputs = tf.expand_dims(inputs, axis=1) # B,1,D
else:
t_his = tf.expand_dims(inputs, axis=1)
self.history_inputs = tf.concat([self.history_inputs, t_his], axis=1)
self._cell.set_history(self.history_inputs)
cell_outputs, cell_state = self._cell(inputs, state)
# attention = cell_state.attention
# att_cell_state = cell_state.cell_state
# alignments = cell_state.alignments
attention = cell_state.attention
att_cell_state = cell_state.cell_state[-1]
alignments = cell_state.alignments
with tf.variable_scope('calculate_pgen'):
p_gen = tf.layers.dense(tf.concat([attention, inputs, att_cell_state.c, att_cell_state.h], 1), 1,
use_bias=True)
# p_gen = _linear([attention, inputs, att_cell_state], 1, True)
p_gen = tf.sigmoid(p_gen)
if self._output_layer is not None:
cell_outputs = tf.layers.dense(cell_outputs, 1024, activation=tf.nn.tanh)
cell_outputs = self._output_layer(cell_outputs)
vocab_dist = tf.nn.softmax(cell_outputs) * p_gen
# z = tf.reduce_sum(alignments,axis=1)
# z = tf.reduce_sum(tf.cast(tf.less_equal(alignments, 0),tf.int32))
alignments = alignments * (1 - p_gen)
# x = tf.reduce_sum(tf.cast(tf.less_equal((1-p_gen), 0),tf.int32))
# y = tf.reduce_sum(tf.cast(tf.less_equal(alignments[3], 0),tf.int32))
# this is only for debug
# alignments2 = tf.Print(alignments2,[tf.shape(inputs),x,y,alignments[2][9:12]],message="zeros in vocab dist and alignments")
# since we have OOV words, we need expand the vocab dist
vocab_size = tf.shape(vocab_dist)[-1]
extended_vsize = vocab_size + self.source_oov_words
batch_size = tf.shape(vocab_dist)[0]
extra_zeros = tf.zeros((batch_size, self.source_oov_words))
# batch * extend vocab size
vocab_dists_extended = tf.concat(axis=-1, values=[vocab_dist, extra_zeros])
# vocab_dists_extended = tf.Print(vocab_dists_extended,[tf.shape(vocab_dists_extended),self.source_oov_words],message='vocab_dists_extended size')
batch_nums = tf.range(0, limit=batch_size) # shape (batch_size)
batch_nums = tf.expand_dims(batch_nums, 1) # shape (batch_size, 1)
attn_len = tf.shape(self.source_extend_tokens)[1] # number of states we attend over
batch_nums = tf.tile(batch_nums, [1, attn_len]) # shape (batch_size, attn_len)
indices = tf.stack((batch_nums, self.source_extend_tokens), axis=2) # shape (batch_size, enc_t, 2)
shape = [batch_size, extended_vsize]
attn_dists_projected = tf.scatter_nd(indices, alignments, shape)
final_dists = attn_dists_projected + vocab_dists_extended
# final_dists = tf.Print(final_dists,[tf.reduce_sum(tf.cast(tf.less_equal(final_dists[0],0),tf.int32))],message='final dist')
# note: sample_ids will contains OOV words
sample_ids = self._helper.sample(
time=time, outputs=final_dists, state=cell_state)
(finished, next_inputs, next_state) = self._helper.next_inputs(
time=time,
outputs=cell_outputs,
state=cell_state,
sample_ids=sample_ids)
all_finished = math_ops.reduce_all(finished)
if all_finished is True:
self.history_inputs = None
outputs = tf.contrib.seq2seq.BasicDecoderOutput(final_dists, sample_ids)
return (outputs, next_state, next_inputs, finished)
class PointerGeneratorAttentionWrapper(tf.contrib.seq2seq.AttentionWrapper):
def __init__(self, cell,
attention_mechanism,
encoder_func=None, # return [B,D]
attention_layer_size=None,
alignment_history=False,
cell_input_fn=None,
output_attention=True,
initial_cell_state=None,
coverage=False,
name=None,
multi_rnn=False):
super(PointerGeneratorAttentionWrapper, self).__init__(
cell,
attention_mechanism,
attention_layer_size,
alignment_history,
cell_input_fn,
output_attention,
initial_cell_state,
name)
self.coverage = coverage
self.multi_rnn = multi_rnn
self.encoder_func = encoder_func
self.history_inputs = None
def zero_state(self, batch_size, dtype):
"""Return an initial (zero) state tuple for this `AttentionWrapper`.
**NOTE** Please see the initializer documentation for details of how
to call `zero_state` if using an `AttentionWrapper` with a
`BeamSearchDecoder`.
Args:
batch_size: `0D` integer tensor: the batch size.
dtype: The internal state data type.
Returns:
An `AttentionWrapperState` tuple containing zeroed out tensors and,
possibly, empty `TensorArray` objects.
Raises:
ValueError: (or, possibly at runtime, InvalidArgument), if
`batch_size` does not match the output size of the encoder passed
to the wrapper object at initialization time.
"""
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
if self._initial_cell_state is not None:
cell_state = self._initial_cell_state
else:
cell_state = self._cell.zero_state(batch_size, dtype)
error_message = (
"When calling zero_state of AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and the requested batch size. Are you using "
"the BeamSearchDecoder? If so, make sure your encoder output has "
"been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and "
"the batch_size= argument passed to zero_state is "
"batch_size * beam_width.")
with tf.control_dependencies(
self._batch_size_checks(batch_size, error_message)):
cell_state = nest.map_structure(
lambda s: tf.identity(s, name="checked_cell_state"),
cell_state)
return tf.contrib.seq2seq.AttentionWrapperState(
cell_state=cell_state,
time=tf.zeros([], dtype=tf.int32),
attention=_zero_state_tensors(self._attention_layer_size, batch_size,
dtype),
alignments=self._item_or_tuple(
attention_mechanism.initial_alignments(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms),
attention_state=self._item_or_tuple(
attention_mechanism.initial_state(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms),
# since we need to read the alignment history several times, so we need set clear_after_read to False
alignment_history=self._item_or_tuple(
tf.TensorArray(dtype=dtype, size=0, clear_after_read=False, dynamic_size=True)
if self._alignment_history else ()
for _ in self._attention_mechanisms))
def set_history(self, history_inputs):
self.history_inputs = history_inputs
def call(self, inputs, state):
"""Perform a step of attention-wrapped RNN.
- Step 1: Mix the `inputs` and previous step's `attention` output via
`cell_input_fn`.
- Step 2: Call the wrapped `cell` with this input and its previous state.
- Step 3: Score the cell's output with `attention_mechanism`.
- Step 4: Calculate the alignments by passing the score through the
`normalizer`.
- Step 5: Calculate the context vector as the inner product between the
alignments and the attention_mechanism's values (memory).
- Step 6: Calculate the attention output by concatenating the cell output
and context through the attention layer (a linear layer with
`attention_layer_size` outputs).
Args:
inputs: (Possibly nested tuple of) Tensor, the input at this time step.
state: An instance of `AttentionWrapperState` containing
tensors from the previous time step.
Returns:
A tuple `(attention_or_cell_output, next_state)`, where:
- `attention_or_cell_output` depending on `output_attention`.
- `next_state` is an instance of `AttentionWrapperState`
containing the state calculated at this time step.
Raises:
TypeError: If `state` is not an instance of `AttentionWrapperState`.
"""
if not isinstance(state, tf.contrib.seq2seq.AttentionWrapperState):
raise TypeError("Expected state to be instance of AttentionWrapperState. "
"Received type %s instead." % type(state))
# Step 1: Calculate the true inputs to the cell based on the
# previous attention value.
cell_inputs = self._cell_input_fn(inputs, state.attention)
cell_state = state.cell_state
# lstm
cell_output, next_cell_state = self._cell(cell_inputs, cell_state)
# other encoder
if self.encoder_func:
encoder_output = self.encoder_func(self.history_inputs)
cell_output = tf.concat([cell_output, encoder_output], axis=-1) # B,d1+d2
cell_batch_size = (
cell_output.shape[0].value or tf.shape(cell_output)[0])
error_message = (
"When applying AttentionWrapper %s: " % self.name +
"Non-matching batch sizes between the memory "
"(encoder output) and the query (decoder output). Are you using "
"the BeamSearchDecoder? You may need to tile your memory input via "
"the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with tf.control_dependencies(self._batch_size_checks(cell_batch_size, error_message)):
cell_output = tf.identity(
cell_output, name="checked_cell_output")
if self._is_multi:
# previous_alignments = state.alignments
previous_attention_state = state.attention_state
previous_alignment_history = state.alignment_history
else:
# previous_alignments = [state.alignments]
previous_attention_state = [state.attention_state]
previous_alignment_history = [state.alignment_history]
all_alignments = []
all_attentions = []
all_histories = []
all_attention_states = []
for i, attention_mechanism in enumerate(self._attention_mechanisms):
# if self.coverage:
# # if we use coverage mode, previous alignments is coverage vector
# # alignment history stack has shape: decoder time * batch * atten_len
# # convert it to coverage vector
# previous_alignments[i] = tf.cond(
# previous_alignment_history[i].size()>0,
# lambda: tf.reduce_sum(tf.transpose(previous_alignment_history[i].stack(),[1,2,0]),axis=2),
# lambda: tf.zeros_like(previous_alignments[i]))
# attention, alignments = _compute_attention(
# attention_mechanism, cell_output, previous_alignments[i],
# self._attention_layers[i] if self._attention_layers else None)
# alignment_history = previous_alignment_history[i].write(
# state.time, alignments) if self._alignment_history else ()
# all_alignments.append(alignments)
# all_histories.append(alignment_history)
# all_attentions.append(attention)
attention, alignments, next_attention_state = _compute_attention(
attention_mechanism, cell_output, previous_attention_state[i],
self._attention_layers[i] if self._attention_layers else None)
alignment_history = previous_alignment_history[i].write(
state.time, alignments) if self._alignment_history else ()
all_attention_states.append(next_attention_state)
all_alignments.append(alignments)
all_histories.append(alignment_history)
all_attentions.append(attention)
attention = tf.concat(all_attentions, 1)
next_state = tf.contrib.seq2seq.AttentionWrapperState(
time=state.time + 1,
cell_state=next_cell_state,
attention=attention,
attention_state=self._item_or_tuple(all_attention_states),
alignments=self._item_or_tuple(all_alignments),
alignment_history=self._item_or_tuple(all_histories))
if self._output_attention:
return attention, next_state
else:
return cell_output, next_state
def _pg_bahdanau_score(processed_query, keys, coverage, coverage_vector):
"""Implements Bahdanau-style (additive) scoring function.
Args:
processed_query: Tensor, shape `[batch_size, num_units]` to compare to keys.
keys: Processed memory, shape `[batch_size, max_time, num_units]`.
coverage: Whether to use coverage mode.
coverage_vector: only used when coverage is true
Returns:
A `[batch_size, max_time]` tensor of unnormalized score values.
"""
dtype = processed_query.dtype
# Get the number of hidden units from the trailing dimension of keys
num_units = keys.shape[2].value or tf.shape(keys)[2]
# Reshape from [batch_size, ...] to [batch_size, 1, ...] for broadcasting.
wei = tf.layers.dense(processed_query, units=1)
processed_query = tf.expand_dims(processed_query, 1)
v = tf.get_variable(
"attention_v", [num_units], dtype=dtype)
b = tf.get_variable(
"attention_b", [num_units], dtype=dtype,
initializer=tf.zeros_initializer())
simi_score = tf.matmul(keys, processed_query, transpose_b=True)
simi_score = tf.squeeze(simi_score, 2) * wei
if coverage:
w_c = tf.get_variable(
"coverage_w", [num_units], dtype=dtype)
# debug
# coverage_vector = tf.Print(coverage_vector,[coverage_vector],message="score")
coverage_vector = tf.expand_dims(coverage_vector, -1)
return tf.reduce_sum(v * tf.tanh(keys + processed_query + coverage_vector * w_c + b), [2]) + simi_score
else:
return tf.reduce_sum(v * tf.tanh(keys + processed_query + b), [2]) + simi_score
class PointerGeneratorBahdanauAttention(tf.contrib.seq2seq.BahdanauAttention):
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
normalize=False,
coverage=False,
probability_fn=None,
score_mask_value=float("-inf"),
name="PointerGeneratorBahdanauAttention"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
probability_fn: (optional) A `callable`. Converts the score to
probabilities. The default is @{tf.nn.softmax}. Other options include
@{tf.contrib.seq2seq.hardmax} and @{tf.contrib.sparsemax.sparsemax}.
Its signature should be: `probabilities = probability_fn(score)`.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
name: Name to use when creating ops.
coverage: whether use coverage mode
"""
super(PointerGeneratorBahdanauAttention, self).__init__(
num_units=num_units,
memory=memory,
memory_sequence_length=memory_sequence_length,
normalize=normalize,
probability_fn=probability_fn,
score_mask_value=score_mask_value,
name=name)
self.coverage = coverage
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
previous_alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with tf.variable_scope(None, "pointer_generator_bahdanau_attention", [query]):
processed_query = self.query_layer(query) if self.query_layer else query
# score = _pg_bahdanau_score(processed_query, self._keys, self.coverage, previous_alignments)
score = _pg_bahdanau_score(processed_query, self._keys, self.coverage, state)
alignments = self._probability_fn(score, state)
next_state = alignments
return alignments, next_state
| 22,126 | 49.061086 | 158 |
py
|
daanet
|
daanet-master/nlp/seq2seq/common.py
|
# coding=utf-8
import math
import time
import tensorflow as tf
from tensorflow.python.ops.image_ops_impl import ResizeMethod
from tensorflow.python.ops.rnn_cell_impl import LSTMStateTuple
INF = 1e30
def initializer(): return tf.contrib.layers.variance_scaling_initializer(factor=1.0,
mode='FAN_AVG',
uniform=True,
dtype=tf.float32)
def rand_uniform_initializer(
mag): return tf.random_uniform_initializer(-mag, mag, seed=314159)
def truc_norm_initializer(
std): return tf.truncated_normal_initalizer(stddev=std)
def initializer_relu(): return tf.contrib.layers.variance_scaling_initializer(factor=2.0,
mode='FAN_IN',
uniform=False,
dtype=tf.float32)
regularizer = tf.contrib.layers.l2_regularizer(scale=3e-7)
def get_var(name, shape, dtype=tf.float32,
initializer_fn=initializer,
regularizer_fn=regularizer, **kwargs):
return tf.get_variable(name, shape,
initializer=initializer_fn,
dtype=dtype,
regularizer=regularizer_fn, **kwargs)
def softmax_mask(val, mask):
return -INF * (1 - tf.cast(mask, tf.float32)) + val
def dropout(args, keep_prob, is_train, mode="recurrent"):
if keep_prob < 1.0:
noise_shape = None
scale = 1.0
shape = tf.shape(args)
if mode == "embedding":
noise_shape = [shape[0], 1]
scale = keep_prob
if mode == "recurrent" and len(args.get_shape().as_list()) == 3:
noise_shape = [shape[0], 1, shape[-1]]
args = tf.cond(is_train, lambda: tf.nn.dropout(
args, keep_prob, noise_shape=noise_shape) * scale, lambda: args)
return args
def dense(inputs, hidden_size, use_bias=True, scope=None):
with tf.variable_scope(scope or "dense"):
shape = tf.shape(inputs)
dim = inputs.get_shape().as_list()[-1]
out_shape = [shape[idx] for idx in range(
len(inputs.get_shape().as_list()) - 1)] + [hidden_size]
flat_inputs = tf.reshape(inputs, [-1, dim])
W = tf.get_variable("W", [dim, hidden_size])
res = tf.matmul(flat_inputs, W)
if use_bias:
bias = tf.get_variable(
"bias", [hidden_size], initializer=tf.constant_initializer(0.))
res = tf.nn.bias_add(res, bias)
res = tf.reshape(res, out_shape)
return res
def layer_norm(x, filters=None, epsilon=1e-6, scope=None, reuse=None):
"""Layer normalize the tensor x, averaging over the last dimension."""
if filters is None:
filters = x.get_shape()[-1]
with tf.variable_scope(scope, default_name="layer_norm", values=[x], reuse=reuse):
scale = tf.get_variable(
"layer_norm_scale", [filters], regularizer=regularizer, initializer=tf.ones_initializer())
bias = tf.get_variable(
"layer_norm_bias", [filters], regularizer=regularizer, initializer=tf.zeros_initializer())
result = layer_norm_compute_python(x, epsilon, scale, bias)
return result
def layer_norm_compute_python(x, epsilon, scale, bias):
"""Layer norm raw computation."""
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
return norm_x * scale + bias
def get_scope_name():
return tf.get_variable_scope().name.split('/')[0]
def make_var(name, shape, trainable=True):
return tf.get_variable(name, shape,
initializer=initializer(),
dtype=tf.float32,
trainable=trainable,
regularizer=regularizer)
def mblock(scope_name, device_name=None, reuse=None):
def f2(f):
def f2_v(self, *args, **kwargs):
start_t = time.time()
if device_name:
with tf.device(device_name), tf.variable_scope(scope_name, reuse=reuse):
f(self, *args, **kwargs)
else:
with tf.variable_scope(scope_name, reuse=reuse):
f(self, *args, **kwargs)
self.logger.info('%s is build in %.4f secs' %
(scope_name, time.time() - start_t))
return f2_v
return f2
def get_init_state(args, name, q_type, shape):
hinit_embed = make_var('hinit_ebd_' + name, shape)
cinit_embed = make_var('cinit_ebd_' + name, shape)
h_init = tf.expand_dims(
tf.nn.embedding_lookup(hinit_embed, q_type), axis=0)
c_init = tf.expand_dims(
tf.nn.embedding_lookup(cinit_embed, q_type), axis=0)
cell_init_state = {
'lstm': lambda: LSTMStateTuple(c_init, h_init),
'sru': lambda: h_init,
'gru': lambda: h_init,
'rnn': lambda: h_init}[args.cell.replace('bi-', '')]()
return cell_init_state
def highway(x, size=None, activation=tf.nn.relu,
num_layers=2, scope="highway", dropout=0.0, reuse=None):
with tf.variable_scope(scope, reuse):
if size is None:
size = x.shape.as_list()[-1]
else:
x = conv(x, size, name="input_projection", reuse=reuse)
for i in range(num_layers):
T = conv(x, size, bias=True, activation=tf.sigmoid,
name="gate_%d" % i, reuse=reuse)
H = conv(x, size, bias=True, activation=activation,
name="activation_%d" % i, reuse=reuse)
H = tf.nn.dropout(H, 1.0 - dropout)
x = H * T + x * (1.0 - T)
return x
def conv(inputs, output_size, bias=None, activation=None, kernel_size=1, name="conv", reuse=None):
with tf.variable_scope(name, reuse=reuse):
shapes = inputs.shape.as_list()
if len(shapes) > 4:
raise NotImplementedError
elif len(shapes) == 4:
filter_shape = [1, kernel_size, shapes[-1], output_size]
bias_shape = [1, 1, 1, output_size]
strides = [1, 1, 1, 1]
else:
filter_shape = [kernel_size, shapes[-1], output_size]
bias_shape = [1, 1, output_size]
strides = 1
conv_func = tf.nn.conv1d if len(shapes) == 3 else tf.nn.conv2d
kernel_ = tf.get_variable("kernel_",
filter_shape,
dtype=tf.float32,
regularizer=regularizer,
initializer=initializer_relu() if activation is not None else initializer())
outputs = conv_func(inputs, kernel_, strides, "VALID")
if bias:
outputs += tf.get_variable("bias_",
bias_shape,
regularizer=regularizer,
initializer=tf.zeros_initializer())
if activation is not None:
return activation(outputs)
else:
return outputs
def sparse_nll_loss(probs, labels, epsilon=1e-9, scope=None):
"""
negative log likelihood loss
"""
with tf.name_scope(scope, "log_loss"):
labels = tf.one_hot(labels, tf.shape(
probs)[1], axis=1, dtype=tf.float32)
losses = - tf.reduce_sum(labels * tf.log(probs + epsilon), 1)
return losses
def normalize_distribution(p, eps=1e-9):
p += eps
norm = tf.reduce_sum(p, axis=1)
return tf.cast(p, tf.float32) / tf.reshape(norm, (-1, 1))
def kl_divergence(p, q, eps=1e-9):
p = normalize_distribution(p, eps)
q = normalize_distribution(q, eps)
return tf.reduce_sum(p * tf.log(p / q), axis=1)
def get_kl_loss(start_label, start_probs, bandwidth=1.0):
a = tf.reshape(tf.range(tf.shape(start_probs)[1]), (1, -1))
b = tf.reshape(start_label, (-1, 1))
start_true_probs = tf.exp(-tf.cast(tf.squared_difference(a,
b), tf.float32) / bandwidth)
return sym_kl_divergence(start_true_probs, start_probs)
def sym_kl_divergence(p, q, eps=1e-9):
return (kl_divergence(p, q, eps) + kl_divergence(q, p, eps)) / 2.0
def get_conv_feature(x, out_dim, window_len, upsampling=False):
a = tf.layers.conv1d(x, out_dim, window_len, strides=max(
int(math.floor(window_len / 2)), 1))
if upsampling:
return upsampling_a2b(a, x, out_dim)
else:
return a
def upsampling_a2b(a, b, D_a):
return tf.squeeze(tf.image.resize_images(tf.expand_dims(a, axis=-1), [tf.shape(b)[1], D_a],
method=ResizeMethod.NEAREST_NEIGHBOR), axis=-1)
| 9,076 | 36.508264 | 110 |
py
|
daanet
|
daanet-master/nlp/seq2seq/__init__.py
| 0 | 0 | 0 |
py
|
|
daanet
|
daanet-master/nlp/seq2seq/rnn.py
|
# coding=utf-8
import tensorflow as tf
import tensorflow.contrib as tc
import gpu_env
from .common import dropout, dense, get_var
def single_rnn_cell(cell_name, num_units, is_train=None, keep_prob=0.75):
"""
Get a single rnn cell
"""
cell_name = cell_name.upper()
if cell_name == "GRU":
cell = tf.contrib.rnn.GRUCell(num_units)
elif cell_name == "LSTM":
cell = tf.contrib.rnn.LSTMCell(num_units)
else:
cell = tf.contrib.rnn.BasicRNNCell(num_units)
# dropout wrapper
if is_train and keep_prob < 1.0:
cell = tf.contrib.rnn.DropoutWrapper(
cell=cell,
input_keep_prob=keep_prob,
output_keep_prob=keep_prob)
return cell
def multi_rnn_cell(cell_name, num_units, is_train=None, keep_prob=1.0, num_layers=3):
cell_name = cell_name.upper()
if cell_name == "GRU":
cells = [tf.contrib.rnn.GRUCell(num_units) for _ in range(num_layers)]
elif cell_name == "LSTM":
cells = [tf.contrib.rnn.LayerNormBasicLSTMCell(num_units) for _ in range(num_layers)]
else:
cells = [tf.contrib.rnn.BasicRNNCell(num_units) for _ in range(num_layers)]
cell = tf.contrib.rnn.MultiRNNCell(cells)
# dropout wrapper
if is_train and keep_prob < 1.0:
cell = tf.contrib.rnn.DropoutWrapper(
cell=cell,
input_keep_prob=keep_prob,
output_keep_prob=keep_prob)
return cell
def get_lstm_init_state(batch_size, num_layers, num_units, direction, scope=None, reuse=None, **kwargs):
with tf.variable_scope(scope or 'lstm_init_state', reuse=reuse):
num_dir = 2 if direction.startswith('bi') else 1
c = get_var('lstm_init_c', shape=[num_layers * num_dir, num_units])
c = tf.tile(tf.expand_dims(c, axis=1), [1, batch_size, 1])
h = get_var('lstm_init_h', shape=[num_layers * num_dir, num_units])
h = tf.tile(tf.expand_dims(h, axis=1), [1, batch_size, 1])
return c, h
def LSTM_encode(seqs, scope=None, reuse=None, **kwargs):
with tf.variable_scope(scope or 'lstm_encode_block', reuse=reuse):
batch_size = tf.shape(seqs)[0]
# to T, B, D
_seqs = tf.transpose(seqs, [1, 0, 2])
lstm = tf.contrib.cudnn_rnn.CudnnLSTM(**kwargs)
init_state = get_lstm_init_state(batch_size, **kwargs)
output, state = lstm(_seqs, init_state)
return tf.transpose(output, [1, 0, 2]), state
def custom_dynamic_rnn(cell, inputs, inputs_len, initial_state=None):
"""
Implements a dynamic rnn that can store scores in the pointer network,
the reason why we implements this is that the raw_rnn or dynamic_rnn function in Tensorflow
seem to require the hidden unit and memory unit has the same dimension, and we cannot
store the scores directly in the hidden unit.
Args:
cell: RNN cell
inputs: the input sequence to rnn
inputs_len: valid length
initial_state: initial_state of the cell
Returns:
outputs and state
"""
batch_size = tf.shape(inputs)[0]
max_time = tf.shape(inputs)[1]
inputs_ta = tf.TensorArray(dtype=gpu_env.DTYPE_F, size=max_time)
inputs_ta = inputs_ta.unstack(tf.transpose(inputs, [1, 0, 2]))
emit_ta = tf.TensorArray(dtype=gpu_env.DTYPE_F, dynamic_size=True, size=0)
t0 = tf.constant(0, dtype=tf.int32)
if initial_state is not None:
s0 = initial_state
else:
s0 = cell.zero_state(batch_size, dtype=gpu_env.DTYPE_F)
f0 = tf.zeros([batch_size], dtype=tf.bool)
def loop_fn(time, prev_s, emit_ta, finished):
"""
the loop function of rnn
"""
cur_x = inputs_ta.read(time)
scores, cur_state = cell(cur_x, prev_s)
# copy through
scores = tf.where(finished, tf.zeros_like(scores), scores)
if isinstance(cell, tc.rnn.LSTMCell):
cur_c, cur_h = cur_state
prev_c, prev_h = prev_s
cur_state = tc.rnn.LSTMStateTuple(tf.where(finished, prev_c, cur_c),
tf.where(finished, prev_h, cur_h))
else:
cur_state = tf.where(finished, prev_s, cur_state)
emit_ta = emit_ta.write(time, scores)
finished = tf.greater_equal(time + 1, inputs_len)
return [time + 1, cur_state, emit_ta, finished]
_, state, emit_ta, _ = tf.while_loop(
cond=lambda _1, _2, _3, finished: tf.logical_not(
tf.reduce_all(finished)),
body=loop_fn,
loop_vars=(t0, s0, emit_ta, f0),
parallel_iterations=32,
swap_memory=False)
outputs = tf.transpose(emit_ta.stack(), [1, 0, 2])
return outputs, state
def reduce_state(fw_state, bw_state, hidden_size, act_fn=tf.nn.relu, scope=None):
# concatennation of fw and bw cell
with tf.variable_scope(scope or "reduce_final_state"):
_c = tf.concat([fw_state.c, bw_state.c], axis=1)
_h = tf.concat([fw_state.h, bw_state.h], axis=1)
c = act_fn(dense(_c, hidden_size, use_bias=True, scope="reduce_c"))
h = act_fn(dense(_h, hidden_size, use_bias=True, scope="reduce_h"))
return tc.rnn.LSTMStateTuple(c, h)
class CudaRNN:
def __init__(self, num_layers, num_units, cell_type):
self.num_layers = num_layers
self.num_units = num_units
if cell_type.endswith('gru'):
self.grus = [(tf.contrib.cudnn_rnn.CudnnGRU(1, num_units),
tf.contrib.cudnn_rnn.CudnnGRU(1, num_units)) for _ in range(num_layers)]
elif cell_type.endswith('lstm'):
self.grus = [(tf.contrib.cudnn_rnn.CudnnLSTM(1, num_units),
tf.contrib.cudnn_rnn.CudnnLSTM(1, num_units)) for _ in range(num_layers)]
else:
raise NotImplementedError
def __call__(self, inputs, seq_len, keep_prob=1.0,
is_train=None, init_states=None, concat_layers=True):
outputs = [tf.transpose(inputs, [1, 0, 2])]
batch_size = tf.shape(inputs)[0]
if not init_states:
init_states = []
for layer in range(self.num_layers):
init_fw = tf.tile(tf.Variable(
tf.zeros([1, 1, self.num_units])), [1, batch_size, 1])
init_bw = tf.tile(tf.Variable(
tf.zeros([1, 1, self.num_units])), [1, batch_size, 1])
init_states.append((init_fw, init_bw))
dropout_mask = []
for layer in range(self.num_layers):
input_size_ = inputs.get_shape().as_list(
)[-1] if layer == 0 else 2 * self.num_units
mask_fw = dropout(tf.ones([1, batch_size, input_size_], dtype=tf.float32),
keep_prob=keep_prob, is_train=is_train, mode=None)
mask_bw = dropout(tf.ones([1, batch_size, input_size_], dtype=tf.float32),
keep_prob=keep_prob, is_train=is_train, mode=None)
dropout_mask.append((mask_fw, mask_bw))
for layer in range(self.num_layers):
gru_fw, gru_bw = self.grus[layer]
init_fw, init_bw = init_states[layer]
mask_fw, mask_bw = dropout_mask[layer]
with tf.variable_scope("fw_{}".format(layer)):
out_fw, _ = gru_fw(
outputs[-1] * mask_fw, initial_state=(init_fw,))
with tf.variable_scope("bw_{}".format(layer)):
inputs_bw = tf.reverse_sequence(
outputs[-1] * mask_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1)
out_bw, _ = gru_bw(inputs_bw, initial_state=(init_bw,))
out_bw = tf.reverse_sequence(
out_bw, seq_lengths=seq_len, seq_dim=0, batch_dim=1)
outputs.append(tf.concat([out_fw, out_bw], axis=2))
if concat_layers:
res = tf.concat(outputs[1:], axis=2)
else:
res = outputs[-1]
res = tf.transpose(res, [1, 0, 2])
return res
class native_gru:
def __init__(self, num_layers, num_units, batch_size, input_size, keep_prob=1.0, is_train=None, scope="native_gru"):
self.num_layers = num_layers
self.grus = []
self.inits = []
self.dropout_mask = []
self.scope = scope
for layer in range(num_layers):
input_size_ = input_size if layer == 0 else 2 * num_units
gru_fw = tf.contrib.rnn.GRUCell(num_units)
gru_bw = tf.contrib.rnn.GRUCell(num_units)
init_fw = tf.tile(tf.Variable(
tf.zeros([1, num_units])), [batch_size, 1])
init_bw = tf.tile(tf.Variable(
tf.zeros([1, num_units])), [batch_size, 1])
mask_fw = dropout(tf.ones([batch_size, 1, input_size_], dtype=tf.float32),
keep_prob=keep_prob, is_train=is_train, mode=None)
mask_bw = dropout(tf.ones([batch_size, 1, input_size_], dtype=tf.float32),
keep_prob=keep_prob, is_train=is_train, mode=None)
self.grus.append((gru_fw, gru_bw,))
self.inits.append((init_fw, init_bw,))
self.dropout_mask.append((mask_fw, mask_bw,))
def __call__(self, inputs, seq_len, keep_prob=1.0, is_train=None, concat_layers=True):
outputs = [inputs]
with tf.variable_scope(self.scope):
for layer in range(self.num_layers):
gru_fw, gru_bw = self.grus[layer]
init_fw, init_bw = self.inits[layer]
mask_fw, mask_bw = self.dropout_mask[layer]
with tf.variable_scope("fw_{}".format(layer)):
out_fw, _ = tf.nn.dynamic_rnn(
gru_fw, outputs[-1] * mask_fw, seq_len, initial_state=init_fw, dtype=tf.float32)
with tf.variable_scope("bw_{}".format(layer)):
inputs_bw = tf.reverse_sequence(
outputs[-1] * mask_bw, seq_lengths=seq_len, seq_dim=1, batch_dim=0)
out_bw, _ = tf.nn.dynamic_rnn(
gru_bw, inputs_bw, seq_len, initial_state=init_bw, dtype=tf.float32)
out_bw = tf.reverse_sequence(
out_bw, seq_lengths=seq_len, seq_dim=1, batch_dim=0)
outputs.append(tf.concat([out_fw, out_bw], axis=2))
if concat_layers:
res = tf.concat(outputs[1:], axis=2)
else:
res = outputs[-1]
return res
| 10,472 | 40.7251 | 120 |
py
|
daanet
|
daanet-master/daanet/base.py
|
import json
import os
import tensorflow as tf
from base import base_model
from gpu_env import ModeKeys
from model_utils.helper import LossCounter, get_filename
from utils.eval_4.eval import compute_bleu_rouge
from utils.helper import build_model
# model controller
class RCBase(base_model.BaseModel):
def __init__(self, args):
super().__init__(args)
def train(self, mode=ModeKeys.TRAIN):
self.load_embedding()
loss_logger = LossCounter(self.fetch_nodes[mode]['task_loss'].keys(),
log_interval=self.args.log_interval,
batch_size=self.args.batch_size,
tb_writer=self.tb_writer)
pre_metric = {self.args.metric_early_stop: 0.0}
for j in range(self.args.epoch_last + 1, self.args.epoch_last + self.args.epoch_total + 1):
self.logger.info('start train epoch %d ...' % j)
try:
while True:
batch = self.data_io.next_batch(self.args.batch_size, mode)
fetches = self.run_sess_op(batch, mode)
loss_logger.record(fetches)
except EOFError:
self.save(j)
metric = self.restore_evaluate(self.args)
# if metric[self.args.metric_early_stop] < pre_metric[self.args.metric_early_stop]:
# self.logger.info('early stop in epoch %s' % j)
# break
pre_metric = metric
self.logger.info('epoch %d is done!' % j)
def restore_evaluate(self, args):
args.set_hparam('run_mode', ModeKeys.EVAL.value)
args.set_hparam('dropout_keep_prob', 1.0)
graph = tf.Graph()
with graph.as_default():
model = build_model(args, False)
model.restore()
return model.evaluate()
def evaluate(self, epoch=-1, mode=ModeKeys.EVAL):
if epoch < 0:
epoch = self.args.epoch_best if self.args.epoch_best > 0 else self.args.epoch_last
a_pred_dict = {}
a_ref_dict = {}
q_pred_dict = {}
q_ref_dict = {}
try:
while True:
batch = self.data_io.next_batch(self.args.batch_size, mode)
fetches = self.run_sess_op(batch, mode)
batch_a_pred_dict, batch_a_ref_dict, batch_q_pred_dict, batch_q_ref_dict = self.parse_result(batch,
fetches)
a_pred_dict.update(batch_a_pred_dict)
a_ref_dict.update(batch_a_ref_dict)
q_pred_dict.update(batch_q_pred_dict)
q_ref_dict.update(batch_q_ref_dict)
except EOFError:
qa_metric = compute_bleu_rouge(a_pred_dict, a_ref_dict)
qg_metric = compute_bleu_rouge(q_pred_dict, q_ref_dict)
qa_metric['type'] = 'qa'
qg_metric['type'] = 'qg'
self.save_metrics(qa_metric, epoch=epoch)
self.save_metrics(qg_metric, epoch=epoch)
self.save_eval_preds({"pred_dict": a_pred_dict, "ref_dict": a_ref_dict, 'type': "qa"}, epoch=epoch)
self.save_eval_preds({"pred_dict": q_pred_dict, "ref_dict": q_ref_dict, 'type': "qg"}, epoch=epoch)
self.logger.info('evaluation at epoch %d is done!' % epoch)
# self.logger.info(metric)
return qa_metric
def predict(self, inputs, mode=ModeKeys.EVAL):
raise NotImplementedError
def save_eval_preds(self, preds, epoch=-1, mode=ModeKeys.EVAL):
result_file = get_filename(self.args, mode)
with open(result_file, 'w', encoding='utf8') as fp:
json.dump(preds['pred_dict'], fp, ensure_ascii=False, sort_keys=True)
self.logger.info('sample preds')
sample_count = 20
p_type = preds['type']
for qid, pred in preds['pred_dict'].items():
if sample_count < 0:
break
ans = preds['ref_dict'][qid]
if p_type == 'qa':
self.logger.info("qid=%s" % qid)
self.logger.info("answer=%s" % ans)
self.logger.info("pred_answer=%s" % pred)
else:
self.logger.info("qid=%s" % qid)
self.logger.info("question=%s" % ans)
self.logger.info("pred_question=%s" % pred)
sample_count -= 1
def save_metrics(self, metrics, epoch=-1):
# log metrics
for k, v in metrics.items():
if not k.startswith('_'):
if isinstance(v, int):
self.logger.info('%-20s: %d' % (k, v))
elif isinstance(v, float):
self.logger.info('%-20s: %.4f' % (k, v))
else:
self.logger.info('%-20s: %s' % (k, v))
self.logger.info('prediction metric is added to %s' % self.args.out_metric_file)
# save to loss file
if not os.path.isfile(self.args.loss_csv_file):
with open(self.args.loss_csv_file, 'w') as fp:
fp.write('epoch %s\n' % (' '.join(k for k in metrics.keys() if not k.startswith('_'))))
with open(self.args.loss_csv_file, 'a') as fp:
fp.write('%d ' % epoch)
for k, v in metrics.items():
if not k.startswith('_'):
if isinstance(v, int):
fp.write('%d ' % v)
elif isinstance(v, float):
fp.write('%.4f ' % v)
else:
fp.write('%s ' % v)
fp.write('\n')
def parse_result(self, batch, fetches):
def get_pred_and_ture(logits, true_tokens, oovs):
pred = []
for tid in logits:
if tid != self.data_io.stop_token_id:
pred.append(self.data_io.vocab.get_token_with_oovs(tid, oovs))
else:
break
pred = " ".join(pred)
true = " ".join(true_tokens)
return pred, true
a_pred_dict = {}
a_ref_dict = {}
q_pred_dict = {}
q_ref_dict = {}
for qid, ans, questions, a_logits, q_logits, oov_tokens in zip(batch['qid'], batch['answer_tokens'],
batch['question_tokens'],
fetches['answer_decoder_logits'],
fetches['question_decoder_logits'],
batch['oovs']):
a_pred, a_true = get_pred_and_ture(a_logits, ans, oov_tokens)
a_pred_dict[qid] = [a_pred]
a_ref_dict[qid] = [a_true]
q_pred, q_true = get_pred_and_ture(q_logits, questions, oov_tokens)
q_pred_dict[qid] = [q_pred]
q_ref_dict[qid] = [q_true]
return a_pred_dict, a_ref_dict, q_pred_dict, q_ref_dict
| 7,115 | 42.656442 | 117 |
py
|
daanet
|
daanet-master/daanet/basic.py
|
"""Sequence-to-Sequence with attention model.
"""
import tensorflow as tf
from tensorflow.python.layers import core as layers_core
from gpu_env import ModeKeys, SummaryType
from model_utils.helper import mblock
from nlp.encode_blocks import LSTM_encode, CNN_encode
from nlp.match_blocks import dot_attention, Transformer_match
from nlp.nn import get_var, highway_network, linear_logit
from nlp.seq2seq.pointer_generator import PointerGeneratorDecoder, \
PointerGeneratorBahdanauAttention, PointerGeneratorAttentionWrapper
from nlp.seq2seq.rnn import multi_rnn_cell
from .base import RCBase
# import numpy as np
# import seq2seq_lib
class RCCore(RCBase):
def __init__(self, args):
super().__init__(args)
def _build_graph(self):
self._placeholders()
self._shortcuts()
self.loss = -1
self._masks()
self._embed()
self._encode()
self._decode()
if self.args.run_mode == ModeKeys.TRAIN.value:
self._model_loss()
# if self.args.run_mode != 'decode':
@mblock('Input_Layer')
def _placeholders(self):
# passage token input
self.ph_passage = tf.placeholder(tf.int32, [None, None], name="passage")
self.ph_passage_chars = tf.placeholder(tf.int32, [None, None, None], name="passage_chars")
# question token input
self.ph_question = tf.placeholder(tf.int32, [None, None], name="question")
self.ph_question_chars = tf.placeholder(tf.int32, [None, None, None], name="question_chars")
# answer
self.ph_answer = tf.placeholder(tf.int32, [None, None], name="answer") # answer token input
self.ph_answer_chars = tf.placeholder(tf.int32, [None, None, None], name="answer_chars")
# length
self.ph_passage_length = tf.placeholder(tf.int32, [None], name="passage_length")
self.ph_question_length = tf.placeholder(tf.int32, [None], name="question_length")
self.ph_answer_length = tf.placeholder(tf.int32, [None], name="answer_length")
# max number of oov words in this batch
self.ph_max_oov_length = tf.placeholder(
tf.int32,
shape=[],
name='source_oov_words')
# input tokens using source oov words and vocab
self.ph_passage_extend_tokens = tf.placeholder(
tf.int32,
shape=[None, None],
name='source_extend_tokens')
self.ph_q_decode_input = tf.placeholder(
tf.int32, [None, None],
name="question_decode_input")
self.ph_q_decode_target = tf.placeholder(
tf.int32, [None, None],
name="question_decode_target")
self.ph_q_decode_length = tf.placeholder(
tf.int32, [None],
name="question_decode_length")
self.ph_a_decode_input = tf.placeholder(
tf.int32, [None, None],
name="answer_decode_input"
)
self.ph_a_decode_target = tf.placeholder(
tf.int32, [None, None],
name="answer_decode_target"
)
self.ph_a_decode_length = tf.placeholder(
tf.int32, [None],
name="answer_decode_length"
)
self.ph_a_start_label = tf.placeholder(
tf.int32, [None],
name="answer_start_id")
self.ph_a_end_label = tf.placeholder(tf.int32, [None], name="answer_end_id")
if self.args.use_answer_masks:
self.ph_answer_masks = tf.placeholder(
tf.int32, [None, None, None],
name="answer_masks")
self.ph_dropout_keep_prob = tf.placeholder(
tf.float32,
name='dropout_keep_prob')
self.ph_word_emb = tf.placeholder(tf.float32,
[self.pretrain_vocab_size, self.vocab_dim],
name='word_embed_mat')
self.ph_char_emb = tf.placeholder(tf.float32,
[self.char_vocab_size, self.char_vocab_dim],
name='char_embed_mat')
self.ph_tokenid_2_charsid = tf.placeholder(tf.int32,
[self.vocab_size, self.args.max_token_len],
name='ph_tokenid_2_charids')
self.ph_is_train = tf.placeholder(tf.bool, [])
def _shortcuts(self):
self.batch_size = tf.shape(self.ph_passage)[0]
self.max_p_len = tf.shape(self.ph_passage)[1]
self.max_q_len = tf.shape(self.ph_question)[1]
self.max_a_len = tf.shape(self.ph_answer)[1]
self.max_q_char_len = tf.shape(self.ph_question_chars)[2]
self.max_p_char_len = tf.shape(self.ph_passage_chars)[2]
self.max_a_char_len = tf.shape(self.ph_answer_chars)[2]
@mblock('Input_Layer/Mask')
def _masks(self):
self.a_mask = tf.sequence_mask(
self.ph_answer_length, tf.shape(self.ph_answer)[1],
dtype=tf.float32,
name='answer_mask')
self.p_mask = tf.sequence_mask(
self.ph_passage_length, tf.shape(self.ph_passage)[1],
dtype=tf.float32,
name='passage_mask')
self.q_mask = tf.sequence_mask(
self.ph_question_length, tf.shape(self.ph_question)[1],
dtype=tf.float32,
name='question_mask')
self.decode_q_mask = tf.sequence_mask(
self.ph_q_decode_length, tf.shape(self.ph_q_decode_target)[1],
dtype=tf.float32,
name='decode_question_mask')
self.decode_a_mask = tf.sequence_mask(
self.ph_a_decode_length, tf.shape(self.ph_a_decode_target)[1],
dtype=tf.float32,
name='decode_answer_mask'
)
@mblock('Embedding')
def _embed(self):
self.pretrained_word_embeddings = get_var(
'pretrained_word_embeddings',
shape=[self.pretrain_vocab_size, self.vocab_dim],
trainable=self.args.embed_trainable)
self.init_tokens_embeddings = tf.get_variable(name="init_tokens_embeddings",
shape=[self.initial_tokens_size - 1, self.vocab_dim],
initializer=tf.random_normal_initializer())
self.pad_tokens_embeddings = tf.get_variable(name="pad_tokens_embeddings",
shape=[1, self.vocab_dim],
initializer=tf.zeros_initializer(), trainable=False)
self.pretrain_word_embed_init = self.pretrained_word_embeddings.assign(self.ph_word_emb)
# "unk, start end, pad" in the end of embeddings
self.word_embeddings = tf.concat(
[self.pretrained_word_embeddings, self.init_tokens_embeddings, self.pad_tokens_embeddings], axis=0,
name='word_embeddings')
self.word_embeddings = tf.nn.dropout(self.word_embeddings, self.ph_dropout_keep_prob)
self.char_emb = get_var('char_embeddings', shape=[self.char_vocab_size, self.args.char_embed_size],
trainable=True)
self.tokenid_2_charsid_map = tf.get_variable('tokenid_2_charsid_map', dtype=tf.int32,
shape=[self.vocab_size, self.args.max_token_len],
trainable=False, initializer=tf.zeros_initializer())
self.tokenid_2_charsid_map_init = self.tokenid_2_charsid_map.assign(self.ph_tokenid_2_charsid)
with tf.variable_scope("embedding", reuse=tf.AUTO_REUSE) as scope:
self.embedding_scope = scope
def emb_ff(ids):
"""
:param ids: shape of ids is [batch] or [batch,L]
:return: embedding [batch, D] or [batch, L, D]
"""
num_of_dim = ids.get_shape().ndims
if num_of_dim == 1:
ids = tf.reshape(ids, [self.batch_size, 1])
condition = tf.less(ids, self.vocab_size)
ids = tf.where(condition, ids, tf.ones_like(ids) * self.data_io.unk_id)
max_axis1_len = tf.shape(ids)[-1]
char_ids = tf.nn.embedding_lookup(self.tokenid_2_charsid_map, ids) # B,L,max_token_len
max_axis2_len = tf.shape(char_ids)[-1]
token_emb = tf.nn.embedding_lookup(self.word_embeddings, ids)
char_emb = tf.reshape(tf.nn.embedding_lookup(self.char_emb, char_ids), # B,L,max_token_len,D_char
[self.batch_size * max_axis1_len, max_axis2_len, self.args.char_embed_size])
char_emb = CNN_encode(char_emb, filter_size=self.args.embed_filter_size,
num_filters=self.args.char_embed_size, scope=scope, reuse=tf.AUTO_REUSE)
char_emb = tf.reshape(tf.reduce_max(char_emb, axis=1),
[self.batch_size, max_axis1_len, self.args.char_embed_size])
concat_emb = tf.concat([token_emb, char_emb], axis=-1)
concat_emb = linear_logit(concat_emb, self.args.embedding_output_dim, scope=scope, reuse=tf.AUTO_REUSE)
highway_out = highway_network(concat_emb, self.args.highway_layer_num, scope=scope, reuse=tf.AUTO_REUSE)
if num_of_dim == 1:
return tf.squeeze(highway_out, axis=1) # B,D
else:
return highway_out # B,L,D
self.embedding_func = emb_ff
self.p_emb = self.embedding_func(self.ph_passage)
self.q_emb = self.embedding_func(self.ph_question)
self.a_emb = self.embedding_func(self.ph_answer)
@mblock('Encoding')
def _encode(self):
with tf.variable_scope('Passage_Encoder'):
self.p_encodes_rnn = LSTM_encode(self.p_emb, num_layers=self.args.encode_num_layers,
num_units=self.args.encode_num_units, direction=self.args.encode_direction,
scope='p_encode')
all_p_encodes = [self.p_encodes_rnn]
if self.args.self_attention_encode:
self.p_encodes_trans = Transformer_match(self.p_emb, self.p_emb, self.p_mask, self.p_mask,
self.args.self_attention_num_units,
scope='Passage_Encoder_trans')
all_p_encodes.append(self.p_encodes_trans)
if self.args.highway_encode:
self.p_encodes_highway = highway_network(self.p_emb, 1, num_units=self.args.highway_num_units,
scope='Passage_Encoder')
all_p_encodes.append(self.p_encodes_highway)
self.p_encodes = tf.concat(all_p_encodes, -1) * tf.expand_dims(self.p_mask, -1)
with tf.variable_scope("Question_Encoder") as q_encode_scope:
self.q_encodes_rnn = LSTM_encode(self.q_emb, num_layers=self.args.encode_num_layers,
num_units=self.args.encode_num_units, direction=self.args.encode_direction,
scope='q_encode')
all_q_encodes = [self.q_encodes_rnn]
if self.args.self_attention_encode:
self.q_encodes_trans = Transformer_match(self.q_emb, self.q_emb, self.q_mask, self.q_mask,
self.args.self_attention_num_units,
scope=q_encode_scope, layer_norm_scope='2', causality=True)
all_q_encodes.append(self.q_encodes_trans)
if self.args.highway_encode:
self.q_encodes_highway = highway_network(self.q_emb, num_layers=1,
num_units=self.args.highway_num_units, scope=q_encode_scope)
all_q_encodes.append(self.q_encodes_highway)
self.q_encodes = tf.concat(all_q_encodes, -1) * tf.expand_dims(self.q_mask, -1)
def question_encoder_f(inputs):
all_e = []
if self.args.share_transformer_encode:
fake_q_mask = tf.ones(shape=tf.shape(inputs)[:2], dtype=tf.float32)
all_e.append(
Transformer_match(inputs, inputs, fake_q_mask, fake_q_mask, self.args.self_attention_num_units,
scope=q_encode_scope, reuse=True, layer_norm_scope='2', causality=True))
if self.args.share_highway_encode:
all_e.append(highway_network(inputs, 1, num_units=self.args.highway_num_units, scope=q_encode_scope,
reuse=True))
all_e = tf.concat(all_e, axis=-1)
return all_e[:, -1, :] # B,D
self.question_encoder_func = question_encoder_f
with tf.variable_scope("Answer_Encoder") as a_encode_scope:
self.a_encodes_rnn = LSTM_encode(self.a_emb, num_layers=self.args.encode_num_layers,
num_units=self.args.encode_num_units, direction=self.args.encode_direction,
scope='a_encode')
all_a_encodes = [self.a_encodes_rnn]
if self.args.self_attention_encode:
self.a_encodes_trans = Transformer_match(self.a_emb, self.a_emb, self.a_mask, self.a_mask,
self.args.self_attention_num_units,
scope=a_encode_scope, layer_norm_scope='2', causality=True)
all_a_encodes.append(self.a_encodes_trans)
if self.args.highway_encode:
self.a_encodes_highway = highway_network(self.a_emb, 1, num_units=self.args.highway_num_units,
scope=a_encode_scope)
all_a_encodes.append(self.a_encodes_highway)
self.a_encodes = tf.concat(all_a_encodes, -1) * tf.expand_dims(self.a_mask, -1)
def answer_encoder_f(inputs):
all_e = []
if self.args.share_transformer_encode:
fake_q_mask = tf.ones(shape=tf.shape(inputs)[:2], dtype=tf.float32)
all_e.append(
Transformer_match(inputs, inputs, fake_q_mask, fake_q_mask, self.args.self_attention_num_units,
scope=a_encode_scope, reuse=True, layer_norm_scope='2',
causality=True))
if self.args.share_highway_encode:
all_e.append(highway_network(inputs, 1, num_units=self.args.highway_num_units, scope=a_encode_scope,
reuse=True))
enc_res = tf.concat(all_e, -1)
return enc_res[:, -1, :] # B,D
self.answer_encoder_func = answer_encoder_f
self.encode_dim = self.args.encode_num_units * 2
with tf.variable_scope("Question_Passage_Attention"):
self.qp_att = dot_attention(self.q_encodes, self.p_encodes,
mask=self.p_mask,
hidden_size=self.encode_dim,
keep_prob=self.args.dropout_keep_prob,
is_train=self.ph_is_train,
scope="question_attention") # B, LQ, D
with tf.variable_scope("Answer_Passage_Attention"):
self.ap_att = dot_attention(self.a_encodes, self.p_encodes,
mask=self.p_mask,
hidden_size=self.encode_dim,
keep_prob=self.args.dropout_keep_prob,
is_train=self.ph_is_train,
scope="question_attention") # B, LQ, D
with tf.variable_scope("Question_Passage_Encode"):
self.question_encoder_cell = multi_rnn_cell(
"LSTM", self.args.decoder_num_units,
is_train=self.args.run_mode == ModeKeys.TRAIN.value,
keep_prob=self.args.dropout_keep_prob,
num_layers=self.args.lstm_num_layers)
_, self.qp_encoder_state = tf.nn.dynamic_rnn(
cell=self.question_encoder_cell,
inputs=self.qp_att,
sequence_length=self.ph_question_length,
dtype=tf.float32)
self.qp_encoder_outputs = dot_attention(
self.p_encodes, self.qp_att,
mask=self.q_mask,
hidden_size=self.encode_dim,
keep_prob=self.args.dropout_keep_prob,
is_train=self.ph_is_train,
scope="passage_attention")
with tf.variable_scope("Answer_Passage_Encode"):
self.answer_encoder_cell = multi_rnn_cell(
"LSTM", self.args.decoder_num_units,
is_train=self.args.run_mode == ModeKeys.TRAIN.value,
keep_prob=self.args.dropout_keep_prob,
num_layers=self.args.lstm_num_layers)
_, self.ap_encoder_state = tf.nn.dynamic_rnn(
cell=self.answer_encoder_cell,
inputs=self.ap_att,
sequence_length=self.ph_answer_length,
dtype=tf.float32)
self.ap_encoder_outputs = dot_attention(
self.p_encodes, self.ap_att,
mask=self.a_mask,
hidden_size=self.encode_dim,
keep_prob=self.args.dropout_keep_prob,
is_train=self.ph_is_train,
scope="passage_attention")
self.encode_dim = self.args.final_projection_num
self.add_tfboard('passage_encode', self.p_encodes,
SummaryType.HISTOGRAM)
self.add_tfboard('question_encode', self.q_encodes,
SummaryType.HISTOGRAM)
self.add_tfboard('qp_encoder_state', self.qp_encoder_state,
SummaryType.HISTOGRAM)
self.add_tfboard('qp_encoder_outputs', self.qp_encoder_outputs,
SummaryType.HISTOGRAM)
self.add_tfboard('ap_encoder_state', self.ap_encoder_state,
SummaryType.HISTOGRAM)
self.add_tfboard('ap_encoder_outputs', self.ap_encoder_outputs,
SummaryType.HISTOGRAM)
@mblock('Decoder')
def _decode(self):
vsize = self.vocab_size
with tf.variable_scope("decoder_output"):
projection_layer = layers_core.Dense(units=vsize, use_bias=False) # use_bias
answer_decoder_cell, answer_initial_state = self.get_decode_cell_state(self.qp_encoder_outputs,
self.qp_encoder_state,
encoder_func=self.answer_encoder_func,
scope="answer_decoder_cell_state")
question_decoder_cell, question_initial_state = self.get_decode_cell_state(self.ap_encoder_outputs,
self.ap_encoder_state,
encoder_func=self.question_encoder_func,
scope="question_decoder_cell_state")
if self.args.run_mode == ModeKeys.TRAIN.value:
# answer decoder
answer_training_decoder = self.get_training_decoder(self.ph_a_decode_input, self.ph_a_decode_length,
answer_decoder_cell, answer_initial_state,
projection_layer,
embedding_func=self.embedding_func,
scope="answer_training_decoder")
question_training_decoder = self.get_training_decoder(self.ph_q_decode_input, self.ph_q_decode_length,
question_decoder_cell, question_initial_state,
projection_layer,
embedding_func=self.embedding_func,
scope="question_training_decoder")
# Training decoding
# answer
self.answer_decoder_outputs, self.answer_decoder_state, _ = tf.contrib.seq2seq.dynamic_decode(
decoder=answer_training_decoder,
impute_finished=True,
scope="answer_decoder")
self.answer_decoder_logits = self.answer_decoder_outputs.rnn_output
self.add_fetch('answer_decoder_logits', self.answer_decoder_logits, [ModeKeys.TRAIN])
# question
self.question_decoder_outputs, self.question_decoder_state, _ = tf.contrib.seq2seq.dynamic_decode(
decoder=question_training_decoder,
impute_finished=True,
scope="question_decoder")
self.question_decoder_logits = self.question_decoder_outputs.rnn_output
self.add_fetch('question_decoder_logits', self.question_decoder_logits, [ModeKeys.TRAIN])
else:
answer_inference_decoder = self.get_inference_decoder(answer_decoder_cell, answer_initial_state,
projection_layer,
embedding_func=self.embedding_func,
scope="answer_inference_decoder")
question_inference_decoder = self.get_inference_decoder(question_decoder_cell, question_initial_state,
projection_layer,
embedding_func=self.embedding_func,
scope="question_inference_decoder")
# Inference Decoding
# Answer
self.answer_decoder_outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder=answer_inference_decoder,
maximum_iterations=100,
impute_finished=False,
scope="answer_decoder")
self.answer_decoder_logits = self.answer_decoder_outputs.sample_id # B, L
self.add_fetch('answer_decoder_logits', self.answer_decoder_logits, [ModeKeys.DECODE, ModeKeys.EVAL])
# Question
self.question_decoder_outputs, _, _ = tf.contrib.seq2seq.dynamic_decode(
decoder=question_inference_decoder,
maximum_iterations=100,
impute_finished=False,
scope="question_decoder")
self.question_decoder_logits = self.question_decoder_outputs.sample_id # B, L
self.add_fetch('question_decoder_logits', self.question_decoder_logits,
[ModeKeys.DECODE, ModeKeys.EVAL])
@mblock('Loss')
def _model_loss(self):
qa_loss = self._loss_calc_helper(self.ph_a_decode_length, self.ph_a_decode_target, self.decode_a_mask,
self.answer_decoder_logits, self.answer_decoder_state)
qg_loss = self._loss_calc_helper(self.ph_q_decode_length, self.ph_q_decode_target, self.decode_q_mask,
self.question_decoder_logits, self.question_decoder_state)
if self.args.task_name == 'qa':
self.loss = qa_loss
elif self.args.task_name == 'qg':
self.loss = qg_loss
else:
self.loss = qa_loss + qg_loss
self._loss['loss'] = self.loss
self._loss['qa_loss'] = qa_loss
self._loss['qg_loss'] = qg_loss
def load_embedding(self):
if self.args.embed_use_pretrained and not self.embed_loaded:
self.sess.run([self.pretrain_word_embed_init, self.tokenid_2_charsid_map_init],
feed_dict={
self.ph_word_emb: self.data_io.vocab.pretrained_embeddings,
self.ph_tokenid_2_charsid: self.data_io.tokenid2charsid,
})
self.embed_loaded = True
def get_tfboard_vars(self):
return {
SummaryType.HISTOGRAM: [
('answer_decoder_logits', self.answer_decoder_logits),
('question_decoder_logits', self.question_decoder_logits),
]
}
def get_training_decoder(self, decoder_inputs, decoder_length, decoder_cell, train_initial_state, projection_layer,
embedding_func, scope='training_decoder', reuse=False):
with tf.variable_scope(scope, reuse=reuse):
decoder_embedding_inputs = embedding_func(decoder_inputs)
training_helper = tf.contrib.seq2seq.TrainingHelper(decoder_embedding_inputs, decoder_length)
training_decoder = PointerGeneratorDecoder(
source_extend_tokens=self.ph_passage_extend_tokens,
source_oov_words=self.ph_max_oov_length,
coverage=self.args.use_coverage,
cell=decoder_cell,
helper=training_helper,
initial_state=train_initial_state,
output_layer=projection_layer)
return training_decoder
def get_inference_decoder(self, decoder_cell, train_initial_state, projection_layer, embedding_func,
scope='inference_decoder', reuse=False):
with tf.variable_scope(scope, reuse=reuse):
start_tokens = tf.tile(
tf.constant([self.data_io.start_token_id],
dtype=tf.int32), [self.batch_size])
# using greedying decoder right now
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(
embedding=embedding_func,
start_tokens=start_tokens,
end_token=self.data_io.stop_token_id)
inference_decoder = PointerGeneratorDecoder(
source_extend_tokens=self.ph_passage_extend_tokens,
source_oov_words=self.ph_max_oov_length,
coverage=self.args.use_coverage,
cell=decoder_cell,
helper=helper,
initial_state=train_initial_state,
output_layer=projection_layer)
return inference_decoder
def get_decode_cell_state(self, encoder_output, encoder_state, encoder_func=None, scope='decode_cell_state',
reuse=False):
with tf.variable_scope(scope, reuse=reuse):
_cell = multi_rnn_cell(
"LSTM", self.args.decoder_num_units,
is_train=self.args.run_mode == ModeKeys.TRAIN.value,
keep_prob=self.args.dropout_keep_prob,
num_layers=self.args.lstm_num_layers)
enc_lengths = self.ph_passage_length
attention_mechanism = PointerGeneratorBahdanauAttention(
self.encode_dim, encoder_output,
memory_sequence_length=enc_lengths,
coverage=self.args.use_coverage)
decoder_cell = PointerGeneratorAttentionWrapper(
cell=_cell,
encoder_func=encoder_func,
attention_mechanism=attention_mechanism,
attention_layer_size=self.args.decoder_num_units,
alignment_history=True,
coverage=self.args.use_coverage)
initial_state = decoder_cell.zero_state(self.batch_size, tf.float32)
initial_state = initial_state.clone(cell_state=encoder_state)
return decoder_cell, initial_state
def _loss_calc_helper(self, decode_length, decode_target, decode_mask, decoder_logits, decoder_state):
max_dec_len = tf.reduce_max(
decode_length,
name="max_dec_len")
# targets: [batch_size x max_dec_len]
# this is important, because we may have padded endings
targets = tf.slice(decode_target, [
0, 0
], [-1, max_dec_len], 'targets')
i1, i2 = tf.meshgrid(tf.range(self.batch_size),
tf.range(max_dec_len),
indexing="ij")
indices = tf.stack((i1, i2, targets), axis=2)
probs = tf.gather_nd(decoder_logits, indices)
# To prevent padding tokens got 0 prob, and get inf when calculating log(p), we set the lower bound of prob
# I spent a lot of time here to debug the nan losses, inf * 0 = nan
probs = tf.where(tf.less_equal(probs, 0),
tf.ones_like(probs) * 1e-10, probs)
crossent = -tf.log(probs)
loss = tf.reduce_sum(
crossent * decode_mask) / tf.to_float(self.batch_size)
if self.args.use_coverage:
# we got all the alignments from last state
# shape is: batch * atten_len * max_len
alignment_history = tf.transpose(decoder_state.alignment_history.stack(), [1, 2, 0])
coverage_loss = tf.minimum(alignment_history, tf.cumsum(
alignment_history,
axis=2,
exclusive=True))
coverage_loss = self.args.coverage_loss_weight * \
tf.reduce_sum(coverage_loss / tf.to_float(self.batch_size))
loss += coverage_loss
return loss
| 30,506 | 51.238014 | 127 |
py
|
daanet
|
daanet-master/base/base_model.py
|
import importlib
import json
import logging
import os
from collections import defaultdict
from math import ceil
import numpy as np
import tensorflow as tf
from ruamel.yaml import YAML
from gpu_env import ModeKeys, APP_NAME, SummaryType
from model_utils.helper import mblock, partial_restore, sample_element_from_var
class BaseModel:
def __init__(self, args):
self.logger = logging.getLogger(APP_NAME)
self.args = args
self.train_op = None
self.ema = None
self.is_var_ema = False
self.fetch_nodes = defaultdict(lambda: defaultdict(int))
self.monitored_non_vars = []
self.sess = None
self.loss = None
self._loss = {} # other auxiliary loss
self.embed_loaded = False
dataio = importlib.import_module(args.package_dataio)
self.data_io = dataio.DataIO(args)
self.vocab_size = self.data_io.vocab.size()
self.pretrain_vocab_size = self.data_io.vocab.pretraind_size()
self.initial_tokens_size = self.data_io.vocab.initial_tokens_size()
self.vocab_dim = self.data_io.vocab.embed_dim
try:
self.char_vocab_size = self.data_io.char_vocab.size()
self.char_vocab_dim = self.data_io.char_vocab.embed_dim
except:
self.char_vocab_size, self.char_vocab_dim = None, None
self._build_graph()
if self.args.run_mode == ModeKeys.TRAIN.value:
self._init_train_op()
self._init_tensorboard()
self._set_fetches()
self.init_session()
self.write_num_pars()
if self.args.run_mode == ModeKeys.TRAIN.value:
self.is_graph_valid()
def _build_graph(self):
raise NotImplementedError
def _set_learning_rate(self):
self.global_step = tf.get_variable('global_step', shape=[], dtype=tf.int32,
initializer=tf.constant_initializer(0),
trainable=False)
if self.args.learning_rate_strategy == 'FIXED':
self.lr = tf.minimum(self.args.learning_rate,
self.args.learning_rate / tf.log(999.) * tf.log(
tf.cast(self.global_step, tf.float32) + 1))
elif self.args.learning_rate_strategy == 'HALF_COSINE_MAX':
# from snapshot paper
t_m = tf.constant(ceil(self.args.learning_rate_reset_epoch * self.args.num_total_samples /
self.args.batch_size), dtype=tf.int32)
self.lr = (self.args.learning_rate / 2.0) * (
tf.cos(tf.constant(3.1415, tf.float32) *
tf.cast(tf.mod(self.global_step, t_m), tf.float32)
/ tf.cast(t_m, tf.float32)) + 1.0)
elif self.args.learning_rate_strategy == 'HALF_COSINE_ZERO':
# from snapshot paper
t_m = tf.constant(ceil(self.args.learning_rate_reset_epoch * self.args.num_total_samples /
self.args.batch_size), dtype=tf.int32)
self.lr = (self.args.learning_rate / 2.0) * (1.0 -
tf.cos(tf.constant(3.1415, tf.float32) *
tf.cast(tf.mod(self.global_step, t_m), tf.float32)
/ tf.cast(t_m, tf.float32)))
elif self.args.learning_rate_strategy == 'COSINE_ZERO':
t_m = tf.constant(ceil(self.args.learning_rate_reset_epoch * self.args.num_total_samples /
self.args.batch_size), dtype=tf.int32)
self.lr = (self.args.learning_rate / 2.0) * (1.0 -
tf.cos(tf.constant(2 * 3.1415, tf.float32) *
tf.cast(tf.mod(self.global_step, t_m), tf.float32)
/ tf.cast(t_m, tf.float32)))
elif self.args.learning_rate_strategy == 'COSINE_MAX':
t_m = tf.constant(ceil(self.args.learning_rate_reset_epoch * self.args.num_total_samples /
self.args.batch_size), dtype=tf.int32)
self.lr = (self.args.learning_rate / 2.0) * (1.0 +
tf.cos(tf.constant(2 * 3.1415, tf.float32) *
tf.cast(tf.mod(self.global_step, t_m), tf.float32)
/ tf.cast(t_m, tf.float32)))
elif self.args.learning_rate_strategy == 'COSINE_ZERO_DECAY':
t_m = tf.constant(ceil(self.args.learning_rate_reset_epoch * self.args.num_total_samples /
self.args.batch_size), dtype=tf.int32)
self.lr = (self.args.learning_rate /
tf.ceil(tf.cast(self.global_step, tf.float32) / tf.cast(t_m, tf.float32)) + 1) \
* (1.0 - tf.cos(tf.constant(2 * 3.1415, tf.float32) *
tf.cast(tf.mod(self.global_step, t_m), tf.float32)
/ tf.cast(t_m, tf.float32)))
elif self.args.learning_rate_strategy in ['CYCLE_LINEAR', 'CYCLE_SIN']:
self.lr = tf.get_variable('lr', shape=[], dtype=tf.float32,
initializer=tf.constant_initializer(self.args.learning_rate),
trainable=False)
else:
raise NotImplementedError
def _set_fetches(self):
self.add_fetch('loss', self.loss, [ModeKeys.TRAIN])
self.add_fetch('task_loss', self._loss, [ModeKeys.TRAIN])
self.add_fetch('_train_op', self.train_op, ModeKeys.TRAIN)
self.add_fetch('global_step', self.global_step, ModeKeys.TRAIN)
self.add_fetch('learning_rate', self.lr, ModeKeys.TRAIN)
if self.args.enable_tensorboard:
self.add_fetch('merged_summary', tf.summary.merge_all(), [ModeKeys.TRAIN])
def add_tfboard(self, name, value, mode):
if self.args.enable_tensorboard:
if isinstance(mode, list):
for m in mode:
self.add_tfboard(name, value, m)
elif mode == SummaryType.SCALAR:
tf.summary.scalar(name, value)
elif mode == SummaryType.HISTOGRAM:
tf.summary.histogram(name, value)
elif mode == SummaryType.SAMPLED:
self.monitored_non_vars.append(value)
else:
raise NotImplementedError
def add_fetch(self, name, value, mode):
if isinstance(mode, list):
for m in mode:
self.fetch_nodes[m][name] = value
elif isinstance(mode, ModeKeys):
self.fetch_nodes[mode][name] = value
else:
raise AttributeError('mode must be a list of ModeKeys or a ModeKeys!')
def get_fetch(self, name, mode):
return self.fetch_nodes[mode][name]
def init_session(self):
# session info
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.intra_op_parallelism_threads = 10
config.inter_op_parallelism_threads = 10
self.sess = tf.Session(config=config)
# initialize the model
self.sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver(max_to_keep=self.args.saver_max_to_keep)
self.tb_writer = tf.summary.FileWriter(self.args.summary_dir, self.sess.graph) if \
self.args.enable_tensorboard else None
def write_num_pars(self):
get_num_pars = lambda x: sum(list(map(np.prod, self.sess.run([tf.shape(v) for v in x]))))
total_num_pars = get_num_pars(tf.trainable_variables())
total_trained = 0
group_by_scope = defaultdict(list)
for v in tf.trainable_variables():
vscope = v.name.split('/')[0]
group_by_scope[vscope].append(v)
for k, v in group_by_scope.items():
n = get_num_pars(v)
if k in self.args.fixed_layers:
self.logger.info('%s%20s : %d' % ('|F|', k, n))
else:
self.logger.info('%s%20s : %d' % ('|V|', k, n))
total_trained += n
self.logger.info('trainable parameters: %d total: %d' % (total_trained, total_num_pars))
if 'num_parameters' not in self.args:
self.args.add_hparam('num_parameters', int(total_num_pars))
def is_graph_valid(self):
for v in [self.sess, self.saver, self.loss,
self.train_op, self.args, self.logger,
self.fetch_nodes, self.data_io]:
assert v is not None, '%s must be initialized' % v
self.logger.info('graph passed sanity check!')
def batch2feed_dict(self, batch, mode):
# add task-specific learning rate to batch
batch.update({'ph_dropout_keep_prob': self.args.dropout_keep_params if mode == ModeKeys.TRAIN else 1.0})
success_binds = []
ignored_binds = []
feed_dict = {}
allv = vars(self)
for k, v in batch.items():
if k in allv and isinstance(allv[k], tf.Tensor):
feed_dict[allv[k]] = v
success_binds.append(k)
else:
ignored_binds.append(k)
# self.logger.info('success bindings: %s' % success_binds)
# self.logger.warning('ignored bindings: %s' % ignored_binds)
return feed_dict
def run_sess_op(self, batch, mode):
feed_dict = self.batch2feed_dict(batch, mode)
return self.sess.run(self.fetch_nodes[mode], feed_dict)
def is_effective_epoch(self, metric, last_metric, best_metric):
is_effective = 0
for k, v in metric.items():
if v >= best_metric[k]:
is_effective += 1
if self.args.early_stop_metric in metric:
is_key_metric_effective = (metric[self.args.early_stop_metric] > best_metric[self.args.early_stop_metric])
else:
is_key_metric_effective = False
self.logger.info('%d/%d effective metrics! effective %s: %s' % (is_effective, len(last_metric),
self.args.early_stop_metric,
is_key_metric_effective))
return is_effective >= (len(best_metric) / 2) or is_key_metric_effective
def load_embedding(self):
raise NotImplementedError
def get_tfboard_vars(self):
raise NotImplementedError
def train(self, mode=ModeKeys.TRAIN):
"""
code example:
self.load_embedding()
loss_logger = xxxLoger()
for j in range(self.args.epoch_last, self.args.epoch_last + self.args.epoch_total + 1):
self.logger.info('start train epoch %d ...' % j)
try:
while True:
batch = self.data_io.next_batch(self.args.batch_size, mode)
fetches = self.run_sess_op(batch, mode)
loss_logger.record(fetches)
except EOFError:
self.logger.info('epoch %d is done!' % j)
metrics = self.evaluate(epoch=j)
cur_metric = metrics[self.args.metric_early_stop]
if not loss_logger.is_overfitted(cur_metric):
self.save(epoch=j)
else:
self.logger.info('early stop due to overfitting: %s %.4f -> %.4f' % (
self.args.metric_early_stop, loss_logger._last_metric, cur_metric))
break
"""
raise NotImplementedError
def predict(self, inputs, mode=ModeKeys.EVAL):
"""
inputs : dict
code example:
batch = self.data_io.single2batch(context, question)
fetches = self.run_sess_op(batch, mode)
s_id, e_id, raw = fetches['start_pos'][0], fetches['end_pos'][0], batch['raw'][0]
return ' '.join(raw['context'][s_id: (e_id + 1)])
"""
raise NotImplementedError
def evaluate(self, epoch=-1, mode=ModeKeys.EVAL):
"""
code example:
if epoch < 0:
epoch = self.args.epoch_best if self.args.epoch_best > 0 else self.args.epoch_last
preds = {}
try:
while True:
batch = self.data_io.next_batch(self.args.batch_size, mode)
fetches = self.run_sess_op(batch, mode)
# process fetches result
except EOFError:
self.logger.info('evaluation at epoch %d is done!' % epoch)
metric = self.save_eval_preds(preds, epoch=epoch)
return metric
"""
raise NotImplementedError
def save_eval_preds(self, preds, epoch=-1, mode=ModeKeys.EVAL):
"""
save eval result to files
code example:
result_file = get_filename(self.args, mode)
with open(result_file, 'w', encoding='utf8') as fp:
json.dump(preds, fp, ensure_ascii=False, sort_keys=True)
self.logger.info('prediction saved to %s' % result_file)
opt = namedtuple('OPT', 'id epoch pred ref out_file '
'na_prob_file na_prob_thresh out_image_dir verbose')(
self.args.model_id, epoch, result_file, self.args.dev_files[0],
self.args.out_metric_file, None, 1.0, None, True)
# evaluate predict result
# metrics = do_evaluation(opt)
# log metrics
for k, v in metrics.items():
if not k.startswith('_'):
if isinstance(v, int):
self.logger.info('%-20s: %d' % (k, v))
elif isinstance(v, float):
self.logger.info('%-20s: %.4f' % (k, v))
else:
self.logger.info('%-20s: %s' % (k, v))
self.logger.info('prediction metric is added to %s' % self.args.out_metric_file)
# save to loss file
if not os.path.isfile(self.args.loss_csv_file):
with open(self.args.loss_csv_file, 'w') as fp:
fp.write('epoch %s\n' % (' '.join(k for k in metrics.keys() if not k.startswith('_'))))
with open(self.args.loss_csv_file, 'a') as fp:
fp.write('%d ' % epoch)
for k, v in metrics.items():
if not k.startswith('_'):
if isinstance(v, int):
fp.write('%d ' % v)
elif isinstance(v, float):
fp.write('%.4f ' % v)
else:
fp.write('%s ' % v)
fp.write('\n')
return metrics
"""
raise NotImplementedError
def save_for_serving(self, epoch):
raise NotImplementedError
def save(self, epoch):
self.save_model(epoch)
self.args.epoch_last = epoch
self.save_args()
def save_model(self, epoch, save='save'):
self.saver.save(self.sess, os.path.join(self.args.save_dir, 'epoch%d' % epoch))
# tf.train.write_graph(self.sess.graph.as_graph_def(), FLAGS.save, "graph.pb")
if self.args.save_for_serving:
self.save_for_serving(epoch)
self.logger.info('model %sd in %s, at epoch %d' % (save, self.args.save_dir, epoch))
def save_args(self):
with open(os.path.join(self.args.save_dir, 'default.yaml'), 'w') as fp:
YAML().dump(json.loads(self.args.to_json()), fp)
def restore(self, epoch=-1, use_ema=True, use_partial_loader=False):
if epoch < 0:
epoch = self.args.epoch_best if self.args.epoch_best > 0 else self.args.epoch_last
model_file = os.path.join(self.args.save_dir, 'epoch%d' % epoch)
if use_partial_loader:
partial_restore(self.sess, model_file)
self.logger.info('partial restore variables without EMA!')
else:
if (self.ema is None) or (not use_ema):
self.saver.restore(self.sess, model_file)
self.is_var_ema = False
self.logger.info('restore variables without EMA!')
else:
variables_to_restore = self.ema.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
saver.restore(self.sess, model_file)
self.is_var_ema = True
self.logger.info('EMA variables are restored!')
self.logger.info('model restored from %s, at epoch %d' % (self.args.save_dir, epoch))
@mblock('Train_Op')
def _init_train_op(self):
"""
Selects the training algorithm and creates a train operation with it
"""
self._set_learning_rate()
all_params = [v for v in tf.trainable_variables() if v.name.split('/')[0] not in self.args.fixed_layers]
if not all_params:
self.train_op = tf.no_op()
self.ema = tf.train.ExponentialMovingAverage(decay=self.args.ema_decay)
self.logger.warning('No training variables! perform no_op while training')
return
with tf.name_scope('Regularization_Layer'):
if self.args.weight_decay > 0:
# ref. https://stats.stackexchange.com/questions/29130/
# difference-between-neural-net-weight-decay-and-learning-rate
with tf.variable_scope('l2_loss'):
l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in all_params])
self.loss += self.args.weight_decay * l2_loss
if self.args.optim == 'ADAM':
optimizer = tf.train.AdamOptimizer(learning_rate=self.lr, beta1=0.8, beta2=0.999, epsilon=1e-7)
elif self.args.optim == 'SGD':
optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.lr)
else:
optimizer = {
'RMSP': tf.train.RMSPropOptimizer,
'ADAGRAD': tf.train.AdagradOptimizer,
}[self.args.optim](learning_rate=self.args.learning_rate, epsilon=1e-8)
if self.args.gradient_clip:
# Calculate and clip gradients
gradients = tf.gradients(self.loss, all_params)
clipped_gradients, _ = tf.clip_by_global_norm(gradients, self.args.gradient_max_norm)
train_op = optimizer.apply_gradients(zip(clipped_gradients, all_params),
global_step=self.global_step)
else:
train_op = optimizer.minimize(self.loss, global_step=self.global_step)
if self.args.ema_decay > 0:
self.ema = tf.train.ExponentialMovingAverage(decay=self.args.ema_decay)
with tf.control_dependencies([train_op]):
train_op_ema = self.ema.apply(all_params)
self.train_op = train_op_ema
self.logger.info('EMA is added to training op!')
else:
self.train_op = train_op
self.all_trainable_vars = all_params
def _init_tensorboard(self):
if self.args.enable_tensorboard:
with tf.name_scope('Basic'):
self.add_tfboard('learning_rate', self.lr, SummaryType.SCALAR)
self.add_tfboard('loss', self.loss, SummaryType.SCALAR)
for k, v in self._loss.items():
self.add_tfboard('auxiliary_loss/%s' % k, v, SummaryType.SCALAR)
with tf.name_scope('Sampled_Vars'):
sampled = sample_element_from_var(self.all_trainable_vars)
for k, v in sampled.items():
self.add_tfboard(k, v, SummaryType.SCALAR)
with tf.name_scope('Sampled_NonVars'):
sampled = sample_element_from_var(self.monitored_non_vars)
for k, v in sampled.items():
self.add_tfboard(k, v, SummaryType.SCALAR)
other_vars = self.get_tfboard_vars()
if other_vars is not None:
for kk, vv in other_vars.items():
for k in vv:
self.add_tfboard('/'.join([k[1].name.split('/')[0], k[0]]), k[1], kk)
def reset(self):
reset_params = [v for v in tf.trainable_variables() if v.name.split('/')[0] in
self.args.reset_restored_layers]
if reset_params:
total_reset_num_par = sum(list(map(np.prod, self.sess.run([tf.shape(v) for v in reset_params]))))
self.logger.info('resetting %d parameters from %s layers' %
(total_reset_num_par, ','.join(s for s in
self.args.reset_restored_layers)))
self.sess.run(tf.variables_initializer(reset_params))
| 20,931 | 43.918455 | 118 |
py
|
daanet
|
daanet-master/base/base_io.py
|
import logging
from typing import List
from dataio_utils.helper import build_vocab
from gpu_env import APP_NAME, ModeKeys
class BaseDataIO:
def __init__(self, args):
self.args = args
self.logger = logging.getLogger(APP_NAME)
self.vocab = build_vocab(args.word_embedding_files)
self.pad_id = self.vocab.get_id(self.vocab.pad_token)
self.unk_id = self.vocab.get_id(self.vocab.unk_token)
self.start_token_id = self.vocab.get_id(self.vocab.start_token)
self.stop_token_id = self.vocab.get_id(self.vocab.stop_token)
self.datasets = {}
def next_batch(self, batch_size: int, mode: ModeKeys):
raise NotImplementedError
def load_data(self, file_paths: List[str], mode: ModeKeys):
raise NotImplementedError
def make_mini_batch(self, data):
raise NotImplementedError
| 866 | 31.111111 | 71 |
py
|
daanet
|
daanet-master/dataio_utils/helper.py
|
import copy
import json
import random
import re
def _tokenize(x):
tokens = [v for v in re.findall(r"\w+|[^\w]", x, re.UNICODE) if len(v)] # fix last hanging space
token_shifts = []
char_token_map = []
c, j = 0, 0
for v in tokens:
if v.strip():
token_shifts.append(j)
j += 1
else:
token_shifts.append(-1)
char_token_map += [token_shifts[-1]] * len(v)
# remove empty word and extra space in tokens
tokens = [v.strip() for v in tokens if v.strip()]
assert len(tokens) == max(char_token_map) + 1, \
'num tokens must equal to the max char_token_map, but %d vs %d' % (len(tokens), max(char_token_map))
assert len(char_token_map) == len(x), \
'length of char_token_map must equal to original string, but %d vs %d' % (len(char_token_map), len(x))
return tokens, char_token_map
def _char_token_start_end(char_start, answer_text, char_token_map, full_tokens=None):
# to get the tokens use [start: (end+1)]
start_id = char_token_map[char_start]
end_id = char_token_map[char_start + len(answer_text) - 1]
if full_tokens:
ans = ' '.join(full_tokens[start_id: (end_id + 1)])
ans_gold = ' '.join(_tokenize(answer_text)[0])
assert ans == ans_gold, 'answers are not identical "%s" vs "%s"' % (ans, ans_gold)
return start_id, end_id
def _dump_to_json(sample):
return json.dumps(sample).encode()
def _load_from_json(batch):
return [json.loads(d) for d in batch]
def _parse_line(line):
return json.loads(line.strip())
def _do_padding(token_ids, token_lengths, pad_id):
pad_len = max(token_lengths)
return [(ids + [pad_id] * (pad_len - len(ids)))[: pad_len] for ids in token_ids]
def _do_char_padding(char_ids, token_lengths, pad_id, char_pad_id):
pad_token_len = max(token_lengths)
pad_char_len = max(len(xx) for x in char_ids for xx in x)
pad_empty_token = [char_pad_id] * pad_char_len
return [[(ids + [pad_id] * (pad_char_len - len(ids)))[: pad_char_len] for ids in x] +
[pad_empty_token] * (pad_token_len - len(x)) for x in char_ids]
def _dropout_word(x, unk_id, dropout_keep_prob):
return [v if random.random() < dropout_keep_prob else unk_id for v in x]
def _fast_copy(x, ignore_keys):
y = {}
for k, v in x.items():
if k in ignore_keys:
y[k] = v
else:
y[k] = copy.deepcopy(v)
return y
def build_vocab(embd_files):
from utils.vocab import Vocab
if embd_files[0].endswith('pickle'):
return Vocab.load_from_pickle(embd_files[0])
return Vocab(embd_files, lower=True)
| 2,653 | 30.975904 | 110 |
py
|
daanet
|
daanet-master/dataio_utils/full_load_io.py
|
import random
from base import base_io
from gpu_env import ModeKeys
class DataIO(base_io.BaseDataIO):
def __init__(self, args):
super().__init__(args)
if args.is_serving:
self.logger.info('model is serving request, ignoring train & dev sets!')
else:
self.datasets = {
ModeKeys.TRAIN: self.load_data(self.args.train_files, ModeKeys.TRAIN),
ModeKeys.EVAL: self.load_data(self.args.dev_files, ModeKeys.EVAL),
}
self.data_pointer = {k: 0 for k in self.datasets.keys()}
self.post_process_fn = {
ModeKeys.TRAIN: self.post_process_train,
ModeKeys.EVAL: self.post_process_eval,
}
self.reset_pointer(ModeKeys.TRAIN, shuffle=True)
def reset_pointer(self, mode, shuffle=False):
self.data_pointer[mode] = 0
if shuffle:
random.shuffle(self.datasets[mode])
self.logger.info('%s data is shuffled' % mode.name)
def next_batch(self, batch_size: int, mode: ModeKeys):
batch = []
dataset = self.datasets[mode]
start_pointer = self.data_pointer[mode]
batch_data = dataset[start_pointer: (start_pointer + batch_size)]
if len(batch_data) == 0:
self.reset_pointer(mode, shuffle=(mode == ModeKeys.TRAIN))
raise EOFError('%s data is exhausted' % mode.name)
for sample in batch_data:
batch.append(self.post_process_fn[mode](sample))
start_pointer += 1
self.data_pointer[mode] = start_pointer
return self.make_mini_batch(batch)
def post_process_train(self, sample):
"""
# this is important! otherwise you always overwrite the samples
new_sample = copy.deepcopy(sample)
# process new sample
# for example, shuffle dropout.
return new_sample
"""
raise NotImplementedError
def post_process_eval(self, sample):
return sample
| 2,018 | 34.421053 | 86 |
py
|
daanet
|
daanet-master/dataio_utils/flow_io.py
|
import json
from typing import List
import tensorflow as tf
from base import base_io
from gpu_env import ModeKeys
# dataio controller
class FlowDataIO(base_io.BaseDataIO):
def __init__(self, args):
super().__init__(args)
if args.is_serving:
self.logger.info('model is serving request, ignoring train & dev sets!')
else:
self.datasets = {
ModeKeys.TRAIN: self.load_data(self.args.train_files, ModeKeys.TRAIN),
ModeKeys.EVAL: self.load_data(self.args.dev_files, ModeKeys.EVAL),
}
if 'test_files' in self.args:
self.datasets[ModeKeys.TEST] = self.load_data(self.args.test_files, ModeKeys.TEST)
self.data_node = {}
def make_node(self, mode: ModeKeys):
for k, v in self.datasets.items():
if k == mode:
self.data_node[k] = v.make_one_shot_iterator
def next_batch(self, batch_size: int, mode: ModeKeys):
return self.data_node[mode]().get_next()
def load_data(self, file_paths: List[str], mode: ModeKeys):
dataset = tf.data.TextLineDataset(file_paths) \
.shuffle(5000) \
.filter(lambda x: tf.py_func(self._filter_invalid_seq, [x], tf.bool)) \
.map(lambda x: tf.py_func(self.make_sample, [x], tf.string)) \
.batch(self.args.batch_size) \
.map(lambda x: tf.py_func(self.make_mini_batch, [x], tf.string)) \
.prefetch(self.args.batch_size * 5)
self.logger.info('loading data for %s' % mode.name)
return dataset
def _dump_to_json(self, sample):
try:
r = json.dumps(sample).encode()
except Exception as e:
print(e)
r = json.dumps({}).encode()
return r
def _load_from_json(self, batch):
return [json.loads(str(d, encoding='utf8')) for d in batch]
def _filter_invalid_seq(self, line):
raise NotImplementedError
def make_sample(self, line, mode=ModeKeys.TRAIN):
raise NotImplementedError
def make_mini_batch(self, data, mode=ModeKeys.TRAIN):
raise NotImplementedError
def single2batch(self, context):
raise NotImplementedError
| 2,238 | 31.449275 | 98 |
py
|
daanet
|
daanet-master/dataio_utils/__init__.py
| 0 | 0 | 0 |
py
|
|
daanet
|
daanet-master/utils/predictor.py
|
import os
import re
import sys
import tensorflow as tf
from gpu_env import MODEL_ID
MODEL_PATH = './ext'
print(sys.path)
class Predictor:
def __init__(self, batch_size=32):
self.batch_size = batch_size
print("Loading model..., please wait!", flush=True)
self.models, self.graphs, self.model_ids = load_models(MODEL_PATH)
print("Load finished!", flush=True)
def predict(self, model_id, inputs):
"""
model_id: model dir name
content : str
questions: list
"""
answers = ["no matching model"]
for i, mid in enumerate(self.model_ids):
if mid == model_id:
model = self.models[i]
answer = model.predict(inputs)
break
return answer
def import_class(import_str):
mod_str, _sep, class_str = import_str.rpartition('.')
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, cur_dir)
__import__(mod_str)
sys.path.remove(cur_dir)
try:
return getattr(sys.modules[mod_str], class_str)
except AttributeError:
raise ImportError('Class %s cannot be found (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))
def delete_module(modname):
from sys import modules
del_keys = []
for mod_key, mod_value in modules.items():
if modname in mod_key:
del_keys.append(mod_key)
elif modname in str(mod_value):
del_keys.append(mod_key)
for key in del_keys:
del modules[key]
def load_models(save_path):
model_ids = list_models(save_path)
cur_dir = os.path.dirname(os.path.abspath(__file__))
print('all available models: %s' % model_ids, flush=True)
all_models = []
all_graphs = [tf.Graph() for _ in range(len(model_ids))]
for m, g in zip(model_ids, all_graphs):
yaml_path = os.path.join(save_path, m, '%s.yaml' % m)
args = parse_args(yaml_path, MODEL_ID)
# args.del_hparam('is_serving')
if args.get('is_serving') is None:
args.add_hparam('is_serving', True)
args.set_hparam('is_serving', True)
with g.as_default():
model_path = cur_dir + '/%s' % (m)
sys.path.insert(0, model_path)
dync_build = import_class("%s.utils.helper.build_model" % (m))
model = dync_build(args, reset_graph=False)
model.restore()
all_models.append(model)
print('model %s is loaded!' % m, flush=True)
sys.path.remove(model_path)
delete_module(m)
return all_models, all_graphs, model_ids
def parse_args(yaml_path, model_id):
from tensorflow.contrib.training import HParams
from ruamel.yaml import YAML
hparams = HParams()
hparams.add_hparam('model_id', model_id)
with open(yaml_path) as fp:
customized = YAML().load(fp)
for k, v in customized.items():
if k in hparams:
hparams.set_hparam(k, v)
else:
hparams.add_hparam(k, v)
return hparams
def list_models(save_path):
model_ids = list(filter(lambda x: os.path.isdir(os.path.join(save_path, x))
and bool(re.match('[0-9]*-[0-9]*', x)), os.listdir(save_path)))
return model_ids
| 3,379 | 29.178571 | 101 |
py
|
daanet
|
daanet-master/utils/helper.py
|
import importlib
import logging
import math
import os
import re
import shutil
import subprocess
import sys
import time
import traceback
from collections import defaultdict
from random import shuffle
import GPUtil
import tensorflow as tf
from ruamel.yaml import YAML
from ruamel.yaml.comments import CommentedMap
from tensorflow.contrib.training import HParams
from tensorflow.python.ops.image_ops_impl import ResizeMethod
from gpu_env import APP_NAME, DEVICE_ID, IGNORE_PATTERNS
millnames = ['', ' K', ' M', ' BL', ' TL']
regex_title_source = re.compile(r'^([^_\-—]*).*?[_\-—]\s?([^_\-—]+)[\s_\-—]?$')
def set_logger(model_id=None):
logger = logging.getLogger(APP_NAME)
logger.setLevel(logging.INFO)
if model_id:
formatter = logging.Formatter(
'%(levelname)-.1s:' + model_id + ':[%(filename).3s:%(funcName).3s:%(lineno)3d]:%(message)s', datefmt=
'%m-%d %H:%M:%S')
else:
formatter = logging.Formatter(
'%(levelname)-.1s:[%(filename)s:%(lineno)d]:%(message)s', datefmt=
'%m-%d %H:%M:%S')
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(formatter)
logger.handlers = []
logger.addHandler(console_handler)
return logger
def touch(fname: str, times=None, create_dirs: bool = False):
import os
if create_dirs:
base_dir = os.path.dirname(fname)
if not os.path.exists(base_dir):
os.makedirs(base_dir)
with open(fname, 'a'):
os.utime(fname, times)
def touch_dir(base_dir: str) -> None:
import os
if not os.path.exists(base_dir):
os.makedirs(base_dir)
def millify(n):
n = float(n)
millidx = max(0, min(len(millnames) - 1,
int(math.floor(0 if n == 0 else math.log10(abs(n)) / 3))))
return '{:.0f}{}'.format(n / 10 ** (3 * millidx), millnames[millidx])
def args2hparam(args, vocab):
params = vars(args)
params['vocab'] = vocab
p = HParams()
for k, v in params.items():
p.add_hparam(k, v)
return p
def runner(main, *done):
logger = logging.getLogger(APP_NAME)
try:
main()
except (tf.errors.OutOfRangeError, IndexError) as e:
logger.warning('Data has been exhausted! Done!')
finally:
[f() for f in done]
def parse_yaml(yaml_path, model_id):
from tensorflow.contrib.training import HParams
from ruamel.yaml import YAML
hparams = HParams()
hparams.add_hparam('model_id', model_id)
with open(yaml_path) as fp:
customized = YAML().load(fp)
for k, v in customized.items():
if k in hparams:
hparams.set_hparam(k, v)
else:
hparams.add_hparam(k, v)
return hparams
def parse_args(yaml_path, model_id, default_set, followup=None):
logger = logging.getLogger(APP_NAME)
hparams = HParams()
hparams.add_hparam('model_id', model_id)
with open('default.yaml') as fp:
configs = YAML().load(fp)
default_cfg = configs[default_set]
add_param_recur(hparams, default_cfg)
if yaml_path:
logger.info('loading parameters...')
with open(yaml_path) as fp:
customized = YAML().load(fp)
for k, v in customized.items():
if k in hparams and hparams.get(k) != v:
logger.info('%20s: %20s -> %20s' % (k, hparams.get(k), v))
hparams.set_hparam(k, v)
elif k not in hparams: # add new parameter
hparams.add_hparam(k, v)
logger.info('%30s %20s: %20s' % ("[add from %s]" % yaml_path, k, hparams.get(k)))
if followup:
# useful when changing args for prediction
logger.info('override args with follow-up args...')
for k, v in followup.items():
if k in hparams and hparams.get(k) != v:
logger.info('%20s: %20s -> %20s' % (k, hparams.get(k), v))
hparams.set_hparam(k, v)
elif k not in hparams:
logger.warning('%s is not a valid attribute! ignore!' % k)
if 'save_dir' not in hparams:
hparams.add_hparam('save_dir', os.path.join(hparams.get('model_dir'), hparams.get('model_id')))
if 'code_dir' not in hparams:
hparams.add_hparam('code_dir', os.path.join(hparams.get('save_dir'), 'code'))
hparams.set_hparam('summary_dir', os.path.join(hparams.get('save_dir'), 'summary'))
# reset logger model id
logger = set_logger(model_id='%s:%s' % (DEVICE_ID, hparams.get('model_id')))
try:
shutil.copytree('./', hparams.get('code_dir'), ignore=shutil.ignore_patterns(*IGNORE_PATTERNS))
logger.info('current code base is copied to %s' % hparams.get('save_dir'))
except FileExistsError:
logger.info('code base exist, no need to copy!')
# if hparams.get('model_id') != model_id:
# logger.warning('model id is changed %s -> %s! '
# 'This happens when you train a pretrained model' % (
# hparams.get('model_id'), model_id))
# hparams.set_hparam('model_id', model_id)
if 'loss_csv_file' not in hparams:
hparams.add_hparam('loss_csv_file', os.path.join(hparams.get('save_dir'), 'loss.csv'))
if 'is_serving' not in hparams:
hparams.add_hparam('is_serving', False)
logger.info('current parameters')
for k, v in sorted(vars(hparams).items()):
if not k.startswith('_'):
logger.info('%20s = %-20s' % (k, v))
return hparams
def add_param_recur(root, p_tree):
for k, v in p_tree.items():
if isinstance(v, CommentedMap):
new_node = HParams()
add_param_recur(new_node, v)
root.add_hparam(k, new_node)
else:
root.add_hparam(k, v)
def fill_gpu_jobs(all_jobs, logger, job_parser,
wait_until_next=300, retry_delay=300, do_shuffle=False):
if do_shuffle:
shuffle(all_jobs)
all_procs = []
while all_jobs:
logger.info('number of jobs in the queue: %d' % len(all_jobs))
j = all_jobs.pop()
logger.info('will start the job: %s ...' % job_parser(j))
try:
GPUtil.getFirstAvailable()
# check if there is a free GPU!
process = subprocess.Popen(job_parser(j), shell=True)
all_procs.append((process, j))
time.sleep(wait_until_next)
except FileNotFoundError:
logger.warning('there is no gpu, running on cpu!')
process = subprocess.Popen(job_parser(j), shell=True)
all_procs.append((process, j))
except RuntimeError as e:
logger.error(str(e))
logger.warning('all gpus are busy! waiting for a free slot...')
# add job back
all_jobs.append(j)
time.sleep(retry_delay)
exit_codes = [(p.wait(), j) for p, j in all_procs]
return [v for p, v in exit_codes if p != 0]
def get_args_cli(args):
d = defaultdict(list)
if args:
for k, v in ((k.lstrip('-'), v) for k, v in (a.split('=') for a in args)):
d[k].append(v)
for k, v in d.items():
parsed_v = [s for s in (parse_arg(vv) for vv in v) if s is not None]
if len(parsed_v) > 1:
d[k] = parsed_v
if len(parsed_v) == 1:
d[k] = parsed_v[0]
return d
def parse_arg(v: str):
if v.startswith('[') and v.endswith(']'):
# function args must be immutable tuples not list
tmp = v.replace('[', '').replace(']', '').strip().split(',')
if len(tmp) > 0:
return [parse_arg(vv.strip()) for vv in tmp]
else:
return []
try:
v = int(v) # parse int parameter
except ValueError:
try:
v = float(v) # parse float parameter
except ValueError:
if len(v) == 0:
# ignore it when the parameter is empty
v = None
elif v.lower() == 'true': # parse boolean parameter
v = True
elif v.lower() == 'false':
v = False
return v
def get_scope_name():
return tf.get_variable_scope().name.split('/')[0]
def sparse_nll_loss(probs, labels, epsilon=1e-9, scope=None):
"""
negative log likelihood loss
"""
with tf.name_scope(scope, "log_loss"):
labels = tf.one_hot(labels, tf.shape(probs)[1], axis=1, dtype=tf.float32)
losses = - tf.reduce_sum(labels * tf.log(probs + epsilon), 1)
return losses
def normalize_distribution(p, eps=1e-9):
p += eps
norm = tf.reduce_sum(p, axis=1)
return tf.cast(p, tf.float32) / tf.reshape(norm, (-1, 1))
def kl_divergence(p, q, eps=1e-9):
p = normalize_distribution(p, eps)
q = normalize_distribution(q, eps)
return tf.reduce_sum(p * tf.log(p / q), axis=1)
def get_kl_loss(start_label, start_probs, bandwidth=1.0):
a = tf.reshape(tf.range(tf.shape(start_probs)[1]), (1, -1))
b = tf.reshape(start_label, (-1, 1))
start_true_probs = tf.exp(-tf.cast(tf.squared_difference(a, b), tf.float32) / bandwidth)
return sym_kl_divergence(start_true_probs, start_probs)
def sym_kl_divergence(p, q, eps=1e-9):
return (kl_divergence(p, q, eps) + kl_divergence(q, p, eps)) / 2.0
def get_conv1d(x, out_dim, window_len, name, act_fn):
return tf.layers.conv1d(x, out_dim, window_len, strides=1, padding='SAME', name=name, activation=act_fn)
def upsampling_a2b(a, b, D_a):
return tf.squeeze(tf.image.resize_images(tf.expand_dims(a, axis=-1), [tf.shape(b)[1], D_a],
method=ResizeMethod.NEAREST_NEIGHBOR), axis=-1)
def dropout(args, keep_prob, is_train, mode="recurrent"):
if keep_prob < 1.0:
noise_shape = None
scale = 1.0
shape = tf.shape(args)
if mode == "embedding":
noise_shape = [shape[0], 1]
scale = keep_prob
if mode == "recurrent" and len(args.get_shape().as_list()) == 3:
noise_shape = [shape[0], 1, shape[-1]]
args = tf.cond(is_train, lambda: tf.nn.dropout(
args, keep_prob, noise_shape=noise_shape) * scale, lambda: args)
return args
def get_tmp_yaml(par, prefix=None):
import tempfile
with tempfile.NamedTemporaryFile('w', delete=False, prefix=prefix) as tmp:
YAML().dump(par, tmp)
return tmp.name
def build_model(args, reset_graph=True):
rccore = importlib.import_module(args.package_rccore)
if reset_graph:
tf.reset_default_graph()
return rccore.RCCore(args)
def get_last_output(output, sequence_length, name):
"""Get the last value of the returned output of an RNN.
http://disq.us/p/1gjkgdr
output: [batch x number of steps x ... ] Output of the dynamic lstm.
sequence_length: [batch] Length of each of the sequence.
"""
rng = tf.range(0, tf.shape(sequence_length)[0])
indexes = tf.stack([rng, sequence_length - 1], 1)
return tf.gather_nd(output, indexes, name)
def import_class(import_str):
mod_str, _sep, class_str = import_str.rpartition('.')
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, cur_dir)
__import__(mod_str)
sys.path.remove(cur_dir)
try:
return getattr(sys.modules[mod_str], class_str)
except AttributeError:
raise ImportError('Class %s cannot be found (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))
def delete_module(modname):
from sys import modules
del_keys = []
for mod_key, mod_value in modules.items():
if modname in mod_key:
del_keys.append(mod_key)
elif modname in str(mod_value):
del_keys.append(mod_key)
for key in del_keys:
del modules[key]
| 11,978 | 32 | 113 |
py
|
daanet
|
daanet-master/utils/vocab.py
|
import logging
import pickle
import numpy as np
from gpu_env import APP_NAME
class Vocab:
@staticmethod
def load_from_pickle(fp):
with open(fp, 'rb') as fin:
return pickle.load(fin)
def __init__(self, embedding_files, lower=True):
self.logger = logging.getLogger(APP_NAME)
self.id2token = {}
self.token2id = {}
self.lower = lower
# pretrain里面可能有<unk>, 自定义的unk设成<_unk_>防止冲突。
self.pad_token = '<_pad_>'
self.unk_token = '<_unk_>'
self.start_token = '<_start_>'
self.stop_token = '<_stop_>'
self.initial_tokens = [self.unk_token, self.start_token, self.stop_token, self.pad_token]
self.embed_dim = 0
self.embeddings = None
self.pretrained_embeddings = None # 存储预训练向量
self.initial_tokens_embedding = None
if embedding_files is not None:
for w in embedding_files:
self.load_pretrained(w)
def size(self):
return len(self.id2token)
def pretraind_size(self):
if self.pretrained_embeddings is not None:
return self.pretrained_embeddings.shape[0]
return 0
def initial_tokens_size(self):
return len(self.initial_tokens)
def get_id(self, token, fallback_chars=False):
token = token.lower() if self.lower else token
if fallback_chars:
return self.token2id.get(token, self.token2id[self.unk_token] if len(token) == 1 else [self.get_id(c) for
c in token])
else:
return self.token2id.get(token, self.token2id[self.unk_token])
def get_token(self, idx):
return self.id2token.get(idx, self.unk_token)
def get_token_with_oovs(self, idx, oovs):
token = self.get_token(idx)
if idx >= self.size() and oovs is not None:
idx = idx - self.size()
try:
token = oovs[idx]
except Exception as e:
token = self.unk_token
return token
def add(self, token):
"""
adds the token to vocab
Args:
token: a string
cnt: a num indicating the count of the token to add, default is 1
"""
token = token.lower() if self.lower else token
if token in self.token2id:
idx = self.token2id[token]
else:
idx = len(self.id2token)
self.id2token[idx] = token
self.token2id[token] = idx
return idx
def load_pretrained(self, embedding_path):
self.logger.info('loading word embedding from %s' % embedding_path)
trained_embeddings = {}
num_line = 0
valid_dim = None
with open(embedding_path, 'r', encoding='utf8') as fin:
for line in fin:
contents = line.strip().split()
if len(contents) == 0:
continue
token = contents[0]
num_line += 1
if valid_dim and len(contents) != valid_dim + 1:
self.logger.debug('bad line: %d in embed files!' % num_line)
continue
trained_embeddings[token] = list(map(float, contents[1:]))
if valid_dim is None:
valid_dim = len(contents) - 1
# rebuild the token x id map
if not self.token2id:
self.logger.info('building token-id map...')
for token in trained_embeddings.keys():
self.add(token)
for token in self.initial_tokens: # initial tokens 放在后面
if token in trained_embeddings:
raise NameError('initial tokens "%s" in pretraind embedding!' % token)
self.add(token)
else:
self.logger.info('use existing token-id map')
# load inits tokens
self.initial_tokens_embedding = np.zeros([len(self.initial_tokens), valid_dim])
# load pretrained embeddings
embeddings = np.zeros([len(trained_embeddings), valid_dim])
for token in trained_embeddings.keys():
embeddings[self.get_id(token)] = trained_embeddings[token]
self.pretrained_embeddings = embeddings # 存储预训练向量
self.embeddings = np.concatenate([embeddings, self.initial_tokens_embedding], axis=0) # 所有emb
self.embed_dim = self.embeddings.shape[1]
self.logger.info('size of embedding %d x %d' % (self.embeddings.shape[0], self.embeddings.shape[1]))
self.logger.info('size of pretrain embedding %d x %d' % (
self.pretrained_embeddings.shape[0], self.pretrained_embeddings.shape[1]))
# self.logger.info('first 3 lines: %s', embeddings[2:5, :])
# self.logger.info('last 3 lines: %s', embeddings[-3:, :])
def tokens2ids(self, tokens):
return [self.get_id(x) for x in tokens]
def tokens2ids_with_oovs(self, tokens, init_oovs=[], dynamic_oovs=True):
# oovs = []
ids = []
ids_with_oov = []
oov_dict = {}
if not dynamic_oovs:
oov_dict = {v: i for i, v in enumerate(init_oovs)}
for x in tokens:
id = self.get_id(x)
ids.append(id)
lx = x.lower()
if id == self.get_id(self.unk_token):
if x.lower() in oov_dict:
id = self.size() + oov_dict[lx]
elif dynamic_oovs:
oov_dict[x.lower()] = len(oov_dict)
id = self.size() + oov_dict[lx]
ids_with_oov.append(id)
oovs = [0] * len(oov_dict)
for k, v in oov_dict.items():
oovs[v] = k
return ids, ids_with_oov, oovs
| 5,793 | 34.987578 | 117 |
py
|
daanet
|
daanet-master/utils/__init__.py
| 0 | 0 | 0 |
py
|
|
daanet
|
daanet-master/utils/eval_4/eval.py
|
from .bleu_metric.bleu import Bleu
from .exact_f1.exact_f1 import f1_exact_eval
from .meteor.meter import compute_meter_score
from .rouge_metric.rouge import Rouge
def normalize(s):
"""
Normalize strings to space joined chars.
Args:
s: a list of strings.
Returns:
A list of normalized strings.
"""
if not s:
return s
normalized = []
for ss in s:
tokens = [c for c in list(ss) if len(c.strip()) != 0]
normalized.append(' '.join(tokens))
return normalized
def compute_bleu_rouge(pred_dict, ref_dict, bleu_order=4):
"""
Compute bleu and rouge scores.
"""
assert set(pred_dict.keys()) == set(ref_dict.keys()), \
"missing keys: {}".format(set(ref_dict.keys()) - set(pred_dict.keys()))
scores = {}
bleu_scores, _ = Bleu(bleu_order).compute_score(ref_dict, pred_dict)
for i, bleu_score in enumerate(bleu_scores):
scores['Bleu-%d' % (i + 1)] = bleu_score
rouge_score, _ = Rouge().compute_score(ref_dict, pred_dict)
scores['Rouge-L'] = rouge_score
f1_exact = f1_exact_eval()
pred_list, ref_list = [], []
for k in pred_dict.keys():
pred_list.append(pred_dict[k][0])
ref_list.append(ref_dict[k][0])
f1_score, exact_score = f1_exact.compute_scores(pred_list, ref_list)
meter_score = compute_meter_score(pred_list, ref_list)
scores['f1'] = f1_score
scores['exact'] = exact_score
scores['meter'] = meter_score
return scores
| 1,497 | 28.96 | 79 |
py
|
daanet
|
daanet-master/utils/eval_4/__init__.py
| 0 | 0 | 0 |
py
|
|
daanet
|
daanet-master/utils/eval_4/meteor/__init__.py
|
__author__ = 'larryjfyan'
| 26 | 12.5 | 25 |
py
|
daanet
|
daanet-master/utils/eval_4/meteor/meter.py
|
import os
def compute_meter_score(pred, ref):
cwd = os.path.dirname(__file__)
test_path = '{}/test'.format(cwd)
ref_path = '{}/reference'.format(cwd)
jar_path = '{}/meteor-1.5.jar'.format(cwd)
save_path = '{}/res.txt'.format(cwd)
with open(test_path, 'w') as f:
f.write('\n'.join(pred))
with open(ref_path, 'w') as f:
f.write('\n'.join(ref))
os.system('java -Xmx2G -jar {} {} {} -l en -norm > {}'.format(jar_path, test_path, ref_path, save_path))
try:
score = open(save_path).read().split('\n')[-2]
return float(score.split(' ')[-1])
except:
return 0.0
| 635 | 30.8 | 108 |
py
|
daanet
|
daanet-master/utils/eval_4/exact_f1/exact_f1.py
|
"""Official evaluation script for SQuAD version 2.0.
In addition to basic functionality, we also compute additional statistics and
plot precision-recall curves if an additional na_prob.json file is provided.
This file is expected to map question ID's to the model's predicted probability
that a question is unanswerable.
"""
import collections
import re
import string
import numpy as np
class f1_exact_eval:
def __init__(self):
self.eval_exact = True
self.eval_f1 = True
def normalize_answer(self, s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(self, s):
if not s: return []
return self.normalize_answer(s).split()
def compute_exact(self, a_gold, a_pred):
return int(self.normalize_answer(a_gold) == self.normalize_answer(a_pred))
def compute_f1(self, a_gold, a_pred):
gold_toks = self.get_tokens(a_gold)
pred_toks = self.get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def compute_scores(self, res, ref):
assert (type(res) == list)
assert (type(ref) == list)
assert (len(res) == len(ref))
all_f1, all_exact = [], []
for a_gold, a_pred in zip(res, ref):
all_f1.append(self.compute_f1(a_gold, a_pred))
all_exact.append(self.compute_exact(a_gold, a_pred))
return np.mean(all_f1), np.mean(all_exact)
| 2,407 | 31.986301 | 82 |
py
|
daanet
|
daanet-master/utils/eval_4/exact_f1/__init__.py
| 0 | 0 | 0 |
py
|
|
daanet
|
daanet-master/utils/eval_4/rouge_metric/rouge.py
|
#!/usr/bin/env python
#
# File Name : rouge.py
#
# Description : Computes ROUGE-L metric as described by Lin and Hovey (2004)
#
# Creation Date : 2015-01-07 06:03
# Author : Ramakrishna Vedantam <[email protected]>
import numpy as np
def my_lcs(string, sub):
"""
Calculates longest common subsequence for a pair of tokenized strings
:param string : list of str : tokens from a string split using whitespace
:param sub : list of str : shorter string, also split using whitespace
:returns: length (list of int): length of the longest common subsequence between the two strings
Note: my_lcs only gives length of the longest common subsequence, not the actual LCS
"""
if (len(string) < len(sub)):
sub, string = string, sub
lengths = [[0 for i in range(0, len(sub) + 1)] for j in range(0, len(string) + 1)]
for j in range(1, len(sub) + 1):
for i in range(1, len(string) + 1):
if (string[i - 1] == sub[j - 1]):
lengths[i][j] = lengths[i - 1][j - 1] + 1
else:
lengths[i][j] = max(lengths[i - 1][j], lengths[i][j - 1])
return lengths[len(string)][len(sub)]
class Rouge():
'''
Class for computing ROUGE-L score for a set of candidate sentences for the MS COCO test set
'''
def __init__(self):
# vrama91: updated the value below based on discussion with Hovey
self.beta = 1.2
def calc_score(self, candidate, refs):
"""
Compute ROUGE-L score given one candidate and references for an image
:param candidate: str : candidate sentence to be evaluated
:param refs: list of str : COCO reference sentences for the particular image to be evaluated
:returns score: int (ROUGE-L score for the candidate evaluated against references)
"""
assert (len(candidate) == 1)
assert (len(refs) > 0)
prec = []
rec = []
# split into tokens
token_c = candidate[0].split(" ")
for reference in refs:
# split into tokens
token_r = reference.split(" ")
# compute the longest common subsequence
lcs = my_lcs(token_r, token_c)
prec.append(lcs / float(len(token_c)))
rec.append(lcs / float(len(token_r)))
prec_max = max(prec)
rec_max = max(rec)
if (prec_max != 0 and rec_max != 0):
score = ((1 + self.beta ** 2) * prec_max * rec_max) / float(rec_max + self.beta ** 2 * prec_max)
else:
score = 0.0
return score
def compute_score(self, gts, res):
"""
Computes Rouge-L score given a set of reference and candidate sentences for the dataset
Invoked by evaluate_captions.py
:param hypo_for_image: dict : candidate / test sentences with "image name" key and "tokenized sentences" as values
:param ref_for_image: dict : reference MS-COCO sentences with "image name" key and "tokenized sentences" as values
:returns: average_score: float (mean ROUGE-L score computed by averaging scores for all the images)
"""
assert (gts.keys() == res.keys())
score = [self.calc_score(res[id], ref) for id, ref in gts.items()]
average_score = np.mean(np.array(score))
return average_score, np.array(score)
def method(self):
return "Rouge"
| 3,391 | 35.085106 | 123 |
py
|
daanet
|
daanet-master/utils/eval_4/rouge_metric/__init__.py
| 0 | 0 | 0 |
py
|
|
daanet
|
daanet-master/utils/eval_4/bleu_metric/bleu_scorer.py
|
#!/usr/bin/env python
# bleu_scorer.py
# David Chiang <[email protected]>
# Copyright (c) 2004-2006 University of Maryland. All rights
# reserved. Do not redistribute without permission from the
# author. Not for commercial use.
# Modified by:
# Hao Fang <[email protected]>
# Tsung-Yi Lin <[email protected]>
'''Provides:
cook_refs(refs, n=4): Transform a list of reference sentences as strings into a form usable by cook_test().
cook_test(test, refs, n=4): Transform a test sentence as a string (together with the cooked reference sentences) into a form usable by score_cooked().
'''
import copy
import logging
import math
from collections import defaultdict
def precook(s, n=4, out=False):
"""Takes a string as input and returns an object that can be given to
either cook_refs or cook_test. This is optional: cook_refs and cook_test
can take string arguments as well."""
words = s.split()
counts = defaultdict(int)
for k in range(1, n + 1):
for i in range(len(words) - k + 1):
ngram = tuple(words[i:i + k])
counts[ngram] += 1
return (len(words), counts)
def cook_refs(refs, eff=None, n=4): ## lhuang: oracle will call with "average"
'''Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.'''
reflen = []
maxcounts = {}
for ref in refs:
rl, counts = precook(ref, n)
reflen.append(rl)
for (ngram, count) in counts.items():
maxcounts[ngram] = max(maxcounts.get(ngram, 0), count)
# Calculate effective reference sentence length.
if eff == "shortest":
reflen = min(reflen)
elif eff == "average":
reflen = float(sum(reflen)) / len(reflen)
## lhuang: N.B.: leave reflen computaiton to the very end!!
## lhuang: N.B.: in case of "closest", keep a list of reflens!! (bad design)
return (reflen, maxcounts)
def cook_test(test, xxx_todo_changeme, eff=None, n=4):
'''Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it.'''
(reflen, refmaxcounts) = xxx_todo_changeme
testlen, counts = precook(test, n, True)
result = {}
# Calculate effective reference sentence length.
if eff == "closest":
result["reflen"] = min((abs(l - testlen), l) for l in reflen)[1]
else: ## i.e., "average" or "shortest" or None
result["reflen"] = reflen
result["testlen"] = testlen
result["guess"] = [max(0, testlen - k + 1) for k in range(1, n + 1)]
result['correct'] = [0] * n
for (ngram, count) in counts.items():
result["correct"][len(ngram) - 1] += min(refmaxcounts.get(ngram, 0), count)
return result
class BleuScorer(object):
"""Bleu scorer.
"""
__slots__ = "n", "crefs", "ctest", "_score", "_ratio", "_testlen", "_reflen", "special_reflen"
# special_reflen is used in oracle (proportional effective ref len for a node).
def copy(self):
''' copy the refs.'''
new = BleuScorer(n=self.n)
new.ctest = copy.copy(self.ctest)
new.crefs = copy.copy(self.crefs)
new._score = None
return new
def __init__(self, test=None, refs=None, n=4, special_reflen=None):
''' singular instance '''
self.n = n
self.crefs = []
self.ctest = []
self.cook_append(test, refs)
self.special_reflen = special_reflen
def cook_append(self, test, refs):
'''called by constructor and __iadd__ to avoid creating new instances.'''
if refs is not None:
self.crefs.append(cook_refs(refs))
if test is not None:
cooked_test = cook_test(test, self.crefs[-1])
self.ctest.append(cooked_test) ## N.B.: -1
else:
self.ctest.append(None) # lens of crefs and ctest have to match
self._score = None ## need to recompute
def ratio(self, option=None):
self.compute_score(option=option)
return self._ratio
def score_ratio(self, option=None):
'''return (bleu, len_ratio) pair'''
return (self.fscore(option=option), self.ratio(option=option))
def score_ratio_str(self, option=None):
return "%.4f (%.2f)" % self.score_ratio(option)
def reflen(self, option=None):
self.compute_score(option=option)
return self._reflen
def testlen(self, option=None):
self.compute_score(option=option)
return self._testlen
def retest(self, new_test):
if type(new_test) is str:
new_test = [new_test]
assert len(new_test) == len(self.crefs), new_test
self.ctest = []
for t, rs in zip(new_test, self.crefs):
self.ctest.append(cook_test(t, rs))
self._score = None
return self
def rescore(self, new_test):
''' replace test(s) with new test(s), and returns the new score.'''
return self.retest(new_test).compute_score()
def size(self):
assert len(self.crefs) == len(self.ctest), "refs/test mismatch! %d<>%d" % (len(self.crefs), len(self.ctest))
return len(self.crefs)
def __iadd__(self, other):
'''add an instance (e.g., from another sentence).'''
if type(other) is tuple:
## avoid creating new BleuScorer instances
self.cook_append(other[0], other[1])
else:
assert self.compatible(other), "incompatible BLEUs."
self.ctest.extend(other.ctest)
self.crefs.extend(other.crefs)
self._score = None ## need to recompute
return self
def compatible(self, other):
return isinstance(other, BleuScorer) and self.n == other.n
def single_reflen(self, option="average"):
return self._single_reflen(self.crefs[0][0], option)
def _single_reflen(self, reflens, option=None, testlen=None):
if option == "shortest":
reflen = min(reflens)
elif option == "average":
reflen = float(sum(reflens)) / len(reflens)
elif option == "closest":
reflen = min((abs(l - testlen), l) for l in reflens)[1]
else:
assert False, "unsupported reflen option %s" % option
return reflen
def recompute_score(self, option=None, verbose=0):
self._score = None
return self.compute_score(option, verbose)
def compute_score(self, option=None, verbose=0):
logger = logging.getLogger("brc")
n = self.n
small = 1e-9
tiny = 1e-15 ## so that if guess is 0 still return 0
bleu_list = [[] for _ in range(n)]
if self._score is not None:
return self._score
if option is None:
option = "average" if len(self.crefs) == 1 else "closest"
self._testlen = 0
self._reflen = 0
totalcomps = {'testlen': 0, 'reflen': 0, 'guess': [0] * n, 'correct': [0] * n}
# for each sentence
for comps in self.ctest:
testlen = comps['testlen']
self._testlen += testlen
if self.special_reflen is None: ## need computation
reflen = self._single_reflen(comps['reflen'], option, testlen)
else:
reflen = self.special_reflen
self._reflen += reflen
for key in ['guess', 'correct']:
for k in range(n):
totalcomps[key][k] += comps[key][k]
# append per image bleu score
bleu = 1.
for k in range(n):
bleu *= (float(comps['correct'][k]) + tiny) \
/ (float(comps['guess'][k]) + small)
bleu_list[k].append(bleu ** (1. / (k + 1)))
ratio = (testlen + tiny) / (reflen + small) ## N.B.: avoid zero division
if ratio < 1:
for k in range(n):
bleu_list[k][-1] *= math.exp(1 - 1 / ratio)
if verbose > 1:
logger.info(comps, reflen)
totalcomps['reflen'] = self._reflen
totalcomps['testlen'] = self._testlen
bleus = []
bleu = 1.
for k in range(n):
bleu *= float(totalcomps['correct'][k] + tiny) \
/ (totalcomps['guess'][k] + small)
bleus.append(bleu ** (1. / (k + 1)))
ratio = (self._testlen + tiny) / (self._reflen + small) ## N.B.: avoid zero division
if ratio < 1:
for k in range(n):
bleus[k] *= math.exp(1 - 1 / ratio)
if verbose > 0:
logger.info(totalcomps)
logger.info("ratio: {}".format(ratio))
self._score = bleus
return self._score, bleu_list
| 8,791 | 31.442804 | 150 |
py
|
daanet
|
daanet-master/utils/eval_4/bleu_metric/bleu.py
|
#!/usr/bin/env python
#
# File Name : bleu.py
#
# Description : Wrapper for BLEU scorer.
#
# Creation Date : 06-01-2015
# Last Modified : Thu 19 Mar 2015 09:13:28 PM PDT
# Authors : Hao Fang <[email protected]> and Tsung-Yi Lin <[email protected]>
from .bleu_scorer import BleuScorer
class Bleu:
def __init__(self, n=4):
# default compute Blue score up to 4
self._n = n
self._hypo_for_image = {}
self.ref_for_image = {}
def compute_score(self, gts, res):
# assert (list(gts.keys()) == list(res.keys()))
bleu_scorer = BleuScorer(n=self._n)
for id, ref in gts.items():
bleu_scorer += (res[id][0], ref)
# score, scores = bleu_scorer.compute_score(option='shortest')
score, scores = bleu_scorer.compute_score(option='closest', verbose=1)
# score, scores = bleu_scorer.compute_score(option='average', verbose=1)
# return (bleu, bleu_info)
return score, scores
def compute_score_single_answer(self, res, ref):
bleu_scorer = BleuScorer(n=self._n)
for m, n in zip(res, ref):
bleu_scorer += (m, [n])
score, scores = bleu_scorer.compute_score(option='closest', verbose=1)
return score, scores
def method(self):
return "Bleu"
| 1,297 | 27.844444 | 80 |
py
|
daanet
|
daanet-master/utils/eval_4/bleu_metric/__init__.py
| 0 | 0 | 0 |
py
|
|
daanet
|
daanet-master/model_utils/helper.py
|
import json
import logging
import os
import time
import tensorflow as tf
from gpu_env import APP_NAME, ModeKeys
def sample_element_from_var(all_var):
result = {}
for v in all_var:
try:
v_rank = len(v.get_shape())
v_ele1, v_ele2 = v, v
for j in range(v_rank):
v_ele1, v_ele2 = v_ele1[0], v_ele2[-1]
result['sampled/1_%s' + v.name], result['sampled/2_%s' + v.name] = v_ele1, v_ele2
except:
pass
return result
def partial_restore(session, save_file):
reader = tf.train.NewCheckpointReader(save_file)
saved_shapes = reader.get_variable_to_shape_map()
var_names = sorted([(var.name, var.name.split(':')[0]) for var in tf.global_variables()
if var.name.split(':')[0] in saved_shapes])
restore_vars = []
name2var = dict(zip(map(lambda x: x.name.split(':')[0], tf.global_variables()), tf.global_variables()))
with tf.variable_scope('', reuse=True):
for var_name, saved_var_name in var_names:
curr_var = name2var[saved_var_name]
var_shape = curr_var.get_shape().as_list()
if var_shape == saved_shapes[saved_var_name]:
restore_vars.append(curr_var)
saver = tf.train.Saver(restore_vars)
saver.restore(session, save_file)
def mblock(scope_name, device_name=None, reuse=None):
def f2(f):
def f2_v(self, *args, **kwargs):
start_t = time.time()
if device_name:
with tf.device(device_name), tf.variable_scope(scope_name, reuse=reuse):
f(self, *args, **kwargs)
else:
with tf.variable_scope(scope_name, reuse=reuse):
f(self, *args, **kwargs)
self.logger.info('%s is build in %.4f secs' % (scope_name, time.time() - start_t))
return f2_v
return f2
def get_filename(args, mode: ModeKeys):
return os.path.join(args.result_dir,
'-'.join(v for v in [args.model_id, mode.name, args.suffix_output] if
v.strip()) + '.json')
def write_dev_json(f, pred_answers):
with open(f, 'w', encoding='utf8') as fp:
for p in pred_answers:
fp.write(json.dumps(p, ensure_ascii=False) + '\n')
class LossCounter:
def __init__(self, task_names, log_interval, batch_size, tb_writer):
self._task_names = task_names
self._log_interval = log_interval
self._start_t = time.time()
self._num_step = 1
self._batch_size = batch_size
self._n_steps_loss = 0
self._n_batch_task_loss = {k: 0.0 for k in self._task_names}
self._reset_step_loss()
self._tb_writer = tb_writer
self._logger = logging.getLogger(APP_NAME)
self._last_metric = 0
def _reset_step_loss(self):
self._last_n_steps_loss = self._n_steps_loss / self._log_interval
self._n_steps_loss = 0
self._n_batch_task_loss = {k: 0.0 for k in self._task_names}
def record(self, fetches):
self._num_step += 1
self._n_steps_loss += fetches['loss']
for k, v in fetches['task_loss'].items():
self._n_batch_task_loss[k] += v
if self._trigger():
self.show_status()
self._reset_step_loss()
if self._tb_writer:
self._tb_writer.add_summary(fetches['merged_summary'], self._num_step)
def is_overfitted(self, metric):
if metric - self._last_metric < 1e-6:
return True
else:
self._last_metric = metric
return False
def _trigger(self):
return (self._log_interval > 0) and (self._num_step % self._log_interval == 0)
def show_status(self):
cur_loss = self._n_steps_loss / self._log_interval
self._logger.info('%-4d->%-4d L: %.3f -> %.3f %d/s %s' % (
self._num_step - self._log_interval + 1, self._num_step,
self._last_n_steps_loss, cur_loss,
round(self._num_step * self._batch_size / (time.time() - self._start_t)),
self._get_multitask_loss_str(self._n_batch_task_loss, normalizer=self._log_interval)
))
@staticmethod
def _get_multitask_loss_str(loss_dict, normalizer=1.0, show_key=True, show_value=True):
if show_key and not show_value:
to_str = lambda x, y: '%s' % x
elif show_key and show_value:
to_str = lambda x, y: '%s: %.3f' % (x, y)
elif not show_key and show_value:
to_str = lambda x, y: '%.3f' % y
else:
to_str = lambda x, y: ''
return ' '.join([to_str(k, v / normalizer) for k, v in loss_dict.items()])
| 4,728 | 35.099237 | 107 |
py
|
daanet
|
daanet-master/model_utils/__init__.py
| 0 | 0 | 0 |
py
|
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/test_tasnet.py
|
import os
import torch
from data_loader.AudioReader import AudioReader, write_wav
import argparse
from model.model import TSENet,TSENet_one_hot
from logger.set_logger import setup_logger
import logging
from config.option import parse
import torchaudio
from utils.util import handle_scp, handle_scp_inf
def read_wav(fname, return_rate=False):
'''
Read wavfile using Pytorch audio
input:
fname: wav file path
return_rate: Whether to return the sampling rate
output:
src: output tensor of size C x L
L is the number of audio frames
C is the number of channels.
sr: sample rate
'''
src, sr = torchaudio.load(fname, channels_first=True)
if return_rate:
return src.squeeze(), sr
else:
return src.squeeze()
class Separation():
def __init__(self, mix_path, s1_path, ref_path, inf_path, yaml_path, model, gpuid):
super(Separation, self).__init__()
self.mix = handle_scp(mix_path)
self.s1 = handle_scp(s1_path)
self.ref = handle_scp(ref_path)
self.clss, self.onsets, self.offsets = handle_scp_inf(inf_path)
self.key = list(self.mix.keys())
opt = parse(yaml_path)
net = TSENet(N=opt['TSENet']['N'],
B=opt['TSENet']['B'],
H=opt['TSENet']['H'],
P=opt['TSENet']['P'],
X=opt['TSENet']['X'],
R=opt['TSENet']['R'],
norm=opt['TSENet']['norm'],
num_spks=opt['TSENet']['num_spks'],
causal=opt['TSENet']['causal'],
cls_num=opt['TSENet']['class_num'],
nFrameLen=opt['datasets']['audio_setting']['nFrameLen'],
nFrameShift=opt['datasets']['audio_setting']['nFrameShift'],
nFFT=opt['datasets']['audio_setting']['nFFT'],
fusion=opt['TSENet']['fusion'],
usingEmb=opt['TSENet']['usingEmb'],
usingTsd=opt['TSENet']['usingTsd'],
CNN10_settings=opt['TSENet']['CNN10_settings'],
fixCNN10=opt['TSENet']['fixCNN10'],
fixTSDNet=opt['TSENet']['fixTSDNet'],
pretrainedCNN10=opt['TSENet']['pretrainedCNN10'],
pretrainedTSDNet=opt['TSENet']['pretrainedTSDNet'],
threshold=opt['TSENet']['threshold'])
dicts = torch.load(model, map_location='cpu')
net.load_state_dict(dicts["model_state_dict"])
setup_logger(opt['logger']['name'], opt['logger']['path'],
screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
self.logger = logging.getLogger(opt['logger']['name'])
self.logger.info('Load checkpoint from {}, epoch {: d}'.format(model, dicts["epoch"]))
self.net=net.cuda()
self.device=torch.device('cuda:{}'.format(
gpuid[0]) if len(gpuid) > 0 else 'cpu')
self.gpuid=tuple(gpuid)
self.sr = opt['datasets']['audio_setting']['sample_rate']
self.audio_length = opt['datasets']['audio_setting']['audio_length']
self.cls_num = opt['TSENet']['class_num']
self.nFrameShift = opt['datasets']['audio_setting']['nFrameShift']
def inference(self, file_path, max_num=8000):
with torch.no_grad():
for i in range(min(len(self.key), max_num)):
index = self.key[i]
s1_index = index.replace('.wav', '_lab.wav')
ref_index = index.replace('.wav', '_re.wav')
mix = read_wav(self.mix[index])
ref = read_wav(self.ref[ref_index])
s1 = read_wav(self.s1[s1_index])
cls = torch.zeros(self.cls_num)
cls[self.clss[index]] = 1.
cls_index = cls.argmax(0)
cls_index = cls_index.to(self.device)
onset = self.onsets[index]
offset = self.offsets[index]
max_frame = self.sr * self.audio_length // self.nFrameShift - 2
onset_frame = round(onset * (self.sr // self.nFrameShift - 1)) if round(
onset * (self.sr // self.nFrameShift - 1)) >= 0 else 0
offset_frame = round(offset * (self.sr // self.nFrameShift - 1)) if round(
offset * (self.sr // self.nFrameShift - 1)) < max_frame else max_frame
framelab = torch.zeros(max_frame + 1)
for i in range(onset_frame, offset_frame + 1):
framelab[i] = 1.
framelab = framelab[None,:]
self.logger.info("Compute on utterance {}...".format(index))
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = s1.to(self.device)
framelab = framelab.to(self.device)
if mix.dim() == 1:
mix = torch.unsqueeze(mix, 0)
if ref.dim() == 1:
ref = torch.unsqueeze(ref, 0)
if s1.dim() == 1:
s1 = torch.unsqueeze(s1, 0)
#out, lps, lab, est_cls
#ests, lps, lab, est_cls = self.net(mix, ref,cls_index.long(), s1)
ests, lps, lab, est_cls = self.net(mix, ref, s1)
spks=[torch.squeeze(s.detach().cpu()) for s in ests]
a = 0
for s in spks:
s = s[:mix.shape[1]]
s = s.unsqueeze(0)
a += 1
os.makedirs(file_path+'/sound'+str(a), exist_ok=True)
filename=file_path+'/sound'+str(a)+'/'+index
write_wav(filename, s, 16000)
self.logger.info("Compute over {:d} utterances".format(len(self.mix)))
def main():
parser=argparse.ArgumentParser()
parser.add_argument(
'-mix_scp', type=str, default='/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_mix.scp', help='Path to mix scp file.')
parser.add_argument(
'-s1_scp', type=str, default='/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_s1.scp', help='Path to s1 scp file.')
parser.add_argument(
'-ref_scp', type=str, default='/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_re.scp', help='Path to ref scp file.')
parser.add_argument(
'-inf_scp', type=str, default='/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_inf.scp', help='Path to inf file.')
parser.add_argument(
'-yaml', type=str, default='./config/TSENet/train.yml', help='Path to yaml file.')
parser.add_argument(
'-model', type=str, default='/apdcephfs/share_1316500/donchaoyang/tsss/TSE_exp/checkpoint_fsd2018_audio/TSENet_loss_one_hot_loss_7/best.pt', help="Path to model file.")
parser.add_argument(
'-max_num', type=str, default=20, help="Max number for testing samples.")
parser.add_argument(
'-gpuid', type=str, default='0', help='Enter GPU id number')
parser.add_argument(
'-save_path', type=str, default='/apdcephfs/share_1316500/donchaoyang/tsss/TSENet/result/TSENet/baseline', help='save result path')
args=parser.parse_args()
gpuid=[int(i) for i in args.gpuid.split(',')]
separation=Separation(args.mix_scp, args.s1_scp, args.ref_scp, args.inf_scp, args.yaml, args.model, gpuid)
separation.inference(args.save_path, args.max_num)
if __name__ == "__main__":
main()
| 7,559 | 48.090909 | 176 |
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/train_Tasnet.py
|
import sys
sys.path.append('./')
from torch.utils.data import DataLoader as Loader
from data_loader.Dataset import Datasets
from model.model import TSENet,TSENet_one_hot
from logger import set_logger
import logging
from config import option
import argparse
import torch
from trainer import trainer_Tasnet,trainer_Tasnet_one_hot
import torch.optim.lr_scheduler as lr_scheduler
def make_dataloader(opt):
# make training dataloader
train_dataset = Datasets(
opt['datasets']['train']['dataroot_mix'],
opt['datasets']['train']['dataroot_targets'][0], # s1
opt['datasets']['train']['dataroot_targets'][1], # ref
opt['datasets']['train']['dataroot_targets'][2], # time information
opt['datasets']['audio_setting']['sample_rate'],
opt['datasets']['audio_setting']['class_num'],
opt['datasets']['audio_setting']['audio_length'],
opt['datasets']['audio_setting']['nFrameShift'])
train_dataloader = Loader(train_dataset,
batch_size=opt['datasets']['dataloader_setting']['batch_size'],
num_workers=opt['datasets']['dataloader_setting']['num_workers'],
shuffle=opt['datasets']['dataloader_setting']['shuffle'])
# make validation dataloader
val_dataset = Datasets(
opt['datasets']['val']['dataroot_mix'],
opt['datasets']['val']['dataroot_targets'][0],
opt['datasets']['val']['dataroot_targets'][1],
opt['datasets']['val']['dataroot_targets'][2],
opt['datasets']['audio_setting']['sample_rate'],
opt['datasets']['audio_setting']['class_num'],
opt['datasets']['audio_setting']['audio_length'],
opt['datasets']['audio_setting']['nFrameShift'])
val_dataloader = Loader(val_dataset,
batch_size=opt['datasets']['dataloader_setting']['batch_size'],
num_workers=opt['datasets']['dataloader_setting']['num_workers'],
shuffle=opt['datasets']['dataloader_setting']['shuffle'])
# make test dataloader
test_dataset = Datasets(
opt['datasets']['test']['dataroot_mix'],
opt['datasets']['test']['dataroot_targets'][0],
opt['datasets']['test']['dataroot_targets'][1],
opt['datasets']['test']['dataroot_targets'][2],
opt['datasets']['audio_setting']['sample_rate'],
opt['datasets']['audio_setting']['class_num'],
opt['datasets']['audio_setting']['audio_length'],
opt['datasets']['audio_setting']['nFrameShift'])
test_dataloader = Loader(test_dataset,
batch_size=opt['datasets']['dataloader_setting']['batch_size'],
num_workers=opt['datasets']['dataloader_setting']['num_workers'],
shuffle=opt['datasets']['dataloader_setting']['shuffle'])
return train_dataloader, val_dataloader, test_dataloader
def make_optimizer(params, opt):
optimizer = getattr(torch.optim, opt['optim']['name'])
if opt['optim']['name'] == 'Adam':
optimizer = optimizer(
params, lr=opt['optim']['lr'], weight_decay=opt['optim']['weight_decay'])
else:
optimizer = optimizer(params, lr=opt['optim']['lr'], weight_decay=opt['optim']
['weight_decay'], momentum=opt['optim']['momentum'])
return optimizer
def train():
parser = argparse.ArgumentParser(description='Parameters for training TSENet')
parser.add_argument('--opt', type=str, help='Path to option YAML file.')
args = parser.parse_args()
opt = option.parse(args.opt)
set_logger.setup_logger(opt['logger']['name'], opt['logger']['path'],
screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
logger = logging.getLogger(opt['logger']['name'])
# build model
logger.info("Building the model of TSENet")
logger.info(opt['logger']['experimental_description'])
if opt['one_hot'] == 1:
net = TSENet_one_hot(N=opt['TSENet']['N'],
B=opt['TSENet']['B'],
H=opt['TSENet']['H'],
P=opt['TSENet']['P'],
X=opt['TSENet']['X'],
R=opt['TSENet']['R'],
norm=opt['TSENet']['norm'],
num_spks=opt['TSENet']['num_spks'],
causal=opt['TSENet']['causal'],
cls_num=opt['TSENet']['class_num'],
nFrameLen=opt['datasets']['audio_setting']['nFrameLen'],
nFrameShift=opt['datasets']['audio_setting']['nFrameShift'],
nFFT=opt['datasets']['audio_setting']['nFFT'],
fusion=opt['TSENet']['fusion'],
usingEmb=opt['TSENet']['usingEmb'],
usingTsd=opt['TSENet']['usingTsd'],
CNN10_settings=opt['TSENet']['CNN10_settings'],
fixCNN10=opt['TSENet']['fixCNN10'],
fixTSDNet=opt['TSENet']['fixTSDNet'],
pretrainedCNN10=opt['TSENet']['pretrainedCNN10'],
pretrainedTSDNet=opt['TSENet']['pretrainedTSDNet'],
threshold=opt['TSENet']['threshold'])
else:
net = TSENet(N=opt['TSENet']['N'],
B=opt['TSENet']['B'],
H=opt['TSENet']['H'],
P=opt['TSENet']['P'],
X=opt['TSENet']['X'],
R=opt['TSENet']['R'],
norm=opt['TSENet']['norm'],
num_spks=opt['TSENet']['num_spks'],
causal=opt['TSENet']['causal'],
cls_num=opt['TSENet']['class_num'],
nFrameLen=opt['datasets']['audio_setting']['nFrameLen'],
nFrameShift=opt['datasets']['audio_setting']['nFrameShift'],
nFFT=opt['datasets']['audio_setting']['nFFT'],
fusion=opt['TSENet']['fusion'],
usingEmb=opt['TSENet']['usingEmb'],
usingTsd=opt['TSENet']['usingTsd'],
CNN10_settings=opt['TSENet']['CNN10_settings'],
fixCNN10=opt['TSENet']['fixCNN10'],
fixTSDNet=opt['TSENet']['fixTSDNet'],
pretrainedCNN10=opt['TSENet']['pretrainedCNN10'],
pretrainedTSDNet=opt['TSENet']['pretrainedTSDNet'],
threshold=opt['TSENet']['threshold'])
# build optimizer
logger.info("Building the optimizer of TSENet")
optimizer = make_optimizer(net.parameters(), opt)
# build dataloader
logger.info('Building the dataloader of TSENet')
train_dataloader, val_dataloader, test_dataloader = make_dataloader(opt)
logger.info('Train Datasets Length: {}, Val Datasets Length: {}, Test Datasets Length: {}'.format(
len(train_dataloader), len(val_dataloader), len(test_dataloader)))
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, opt['train']['epoch'])
# build trainer
logger.info('Building the Trainer of TSENet')
if opt['one_hot'] == 1:
trainer = trainer_Tasnet_one_hot.Trainer(train_dataloader, val_dataloader, test_dataloader, net, optimizer, scheduler, opt)
else:
trainer = trainer_Tasnet.Trainer(train_dataloader, val_dataloader, test_dataloader, net, optimizer, scheduler, opt)
trainer.run()
#trainer.only_test()
if __name__ == "__main__":
train()
| 7,435 | 48.245033 | 131 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.