repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
finmag
|
finmag-master/doc/tailored_tutorials/basics.py
|
conf = {'nameshort': 'basics',
'names': ('Finmag Users',),
'tutorials' : ['tutorial-using-ipython-notebook',
'tutorial-example2',
'tutorial-saving-averages-demo',
'tutorial-scheduling-events',
'tutorial-relaxations-of-a-single-nanodisk',
'tutorial-coupled-relaxation-of-two-nanodisks',
#'tutorial-running-simulations-with-dmi',
'tutorial-sampling-m-at-arbitrary-positions',
'ref-restarting-simulations',
'tutorial-use-of-logging',
'ref-using-timings',
]}
| 724 | 47.333333 | 70 |
py
|
finmag
|
finmag-master/doc/tailored_tutorials/2015-Hagen-Fuchs.py
|
conf = {'nameshort': 'HagenFuchs',
'names': ('Hagen Fuchs', 'Ulrich Roessler', 'Denys Makarov'),
'tutorials' : ['tutorial-using-ipython-notebook',
'tutorial-example2',
'tutorial-saving-averages-demo',
'tutorial-use-of-logging',
'tutorial-running-simulations-with-dmi',
'tutorial-sampling-m-at-arbitrary-positions',
'ref-restarting-simulations',
'tutorial-domain-wall-relaxation-example'
],
'creationdate': "22-12-2014"}
| 628 | 47.384615 | 69 |
py
|
finmag
|
finmag-master/doc/tailored_tutorials/hesjedahl.py
|
conf = {'nameshort': 'Hesjedahl',
'names': ('Thorsten Hesjedahl', 'Shilei Zhang'),
'tutorials' : ['tutorial-using-ipython-notebook',
'tutorial-example2',
'tutorial-saving-averages-demo',
'tutorial-use-of-logging',
'tutorial-running-simulations-with-dmi',
'tutorial-sampling-m-at-arbitrary-positions',
'ref-restarting-simulations'
]}
| 510 | 45.454545 | 68 |
py
|
CVD-Physiological-Measurement
|
CVD-Physiological-Measurement-master/test.py
|
########################################################
# This is an example of the training and test procedure
# You need to adjust the training and test dataloader based on your data
# CopyRight @ Xuesong Niu
########################################################
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import os
import shutil
import sys
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import scipy.io as sio
import torchvision.models as models
from torch.optim.lr_scheduler import MultiStepLR
sys.path.append('..');
from utils.database.Pixelmap import PixelMap_fold_STmap
from utils.model.model_disentangle import HR_disentangle_cross;
from utils.loss.loss_cross import Cross_loss;
from utils.loss.loss_r import Neg_Pearson;
from utils.loss.loss_SNR import SNR_loss;
batch_size_num = 2;
epoch_num = 70;
learning_rate = 0.001;
test_batch_size = 5;
toTensor = transforms.ToTensor();
resize = transforms.Resize(size = (320,320));
#######################################################
lambda_hr = 1;
lambda_img = 0.0000025;
lambda_low_rank = 10;
lambda_ecg = 0.02;
lambda_snr = 1;
lambda_cross_fhr = 0.000005;
lambda_cross_fn = 0.000005;
lambda_cross_hr = 1;
video_length = 300;
########################################################################
### This is only a simple toy example dataloader (utils/database/PixelMap.py)
### This dataloader do not include the cross-validation division and training/test division.
### You need to adjust your dataloader based on your own data.
### parameter: root_dir: location of the MSTmaps
### VerticalFlip: random vertical flip for data augmentation
########################################################################
train_dataset = PixelMap_fold_STmap(root_dir='./MSTmaps/',
Training = True, transform=transforms.Compose([resize, toTensor]), VerticalFlip = True,
video_length = video_length);
train_loader = DataLoader(train_dataset, batch_size=batch_size_num,
shuffle=True, num_workers=4);
test_dataset = PixelMap_fold_STmap(root_dir='./MSTmaps/',
Training = False, transform=transforms.Compose([resize, toTensor]), VerticalFlip = False,
video_length = video_length);
test_loader = DataLoader(test_dataset, batch_size=test_batch_size,
shuffle=False, num_workers=4);
#########################################################################
#########################################################################
#########################################################################
net = HR_disentangle_cross();
net.cuda();
#########################################################################
lossfunc_HR = nn.L1Loss();
lossfunc_img = nn.L1Loss();
lossfunc_cross = Cross_loss(lambda_cross_fhr = lambda_cross_fhr, lambda_cross_fn = lambda_cross_fn, lambda_cross_hr = lambda_cross_hr);
lossfunc_ecg = Neg_Pearson(downsample_mode = 0);
lossfunc_SNR = SNR_loss(clip_length = video_length, loss_type = 7);
optimizer = torch.optim.Adam([{'params': net.parameters(), 'lr': 0.0005}]);
def train():
net.train();
train_loss = 0;
for batch_idx, (data, bpm, fps, bvp, idx) in enumerate(train_loader):
data = Variable(data);
bvp = Variable(bvp);
bpm = Variable(bpm.view(-1,1));
fps = Variable(fps.view(-1,1));
data, bpm = data.cuda(), bpm.cuda();
fps = fps.cuda()
bvp = bvp.cuda()
print(bvp)
feat_hr, feat_n, output, img_out, feat_hrf1, feat_nf1, hrf1, idx1, feat_hrf2, feat_nf2, hrf2, idx2, ecg, ecg1, ecg2 = net(data);
loss_hr = lossfunc_HR(output, bpm)*lambda_hr;
loss_img = lossfunc_img(data, img_out)*lambda_img;
loss_ecg = lossfunc_ecg(ecg, bvp)*lambda_ecg;
print(loss_ecg)
loss_SNR, tmp = lossfunc_SNR(ecg, bpm, fps, pred = output, flag = None)*lambda_snr;
loss = loss_hr + loss_ecg + loss_img + loss_SNR;
loss_cross, loss_hr1, loss_hr2, loss_fhr1, loss_fhr2, loss_fn1, loss_fn2, loss_hr_dis1, loss_hr_dis2 = lossfunc_cross(feat_hr, feat_n, output,
feat_hrf1, feat_nf1,
hrf1, idx1,
feat_hrf2, feat_nf2,
hrf2, idx2, bpm)
loss = loss + loss_cross;
train_loss += loss.item();
optimizer.zero_grad()
loss.backward()
optimizer.step();
print('Train epoch: {:.0f}, it: {:.0f}, loss: {:.4f}, loss_hr: {:.4f}, loss_img: {:.4f}, loss_cross: {:.4f}, loss_snr: {:.4f}'.format(epoch, batch_idx,
loss, loss_hr, loss_img, loss_cross, loss_SNR));
def test():
net.eval()
test_loss = 0;
for (data, hr, fps, bvp, idx) in test_loader:
data = Variable(data);
hr = Variable(hr.view(-1,1));
data, hr = data.cuda(), hr.cuda();
feat_hr, feat_n, output, img_out, feat_hrf1, feat_nf1, hrf1, idx1, feat_hrf2, feat_nf2, hrf2, idx2, ecg, ecg1, ecg2 = net(data);
loss = lossfunc_HR(output, hr);
test_loss += loss.item();
begin_epoch = 1;
scheduler = MultiStepLR(optimizer, milestones=[30,80], gamma=0.5)
for epoch in range(begin_epoch, epoch_num + 1):
if epoch > 20:
train_dataset.transform = transforms.Compose([resize, toTensor]);
train_dataset.VerticalFlip = False;
train_loader = DataLoader(train_dataset, batch_size=batch_size_num,
shuffle=True, num_workers=4);
train();
test();
| 6,206 | 39.835526 | 159 |
py
|
CVD-Physiological-Measurement
|
CVD-Physiological-Measurement-master/utils/__init__.py
| 1 | 0 | 0 |
py
|
|
CVD-Physiological-Measurement
|
CVD-Physiological-Measurement-master/utils/database/__init__.py
| 1 | 0 | 0 |
py
|
|
CVD-Physiological-Measurement
|
CVD-Physiological-Measurement-master/utils/database/Pixelmap.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import os
import shutil
import numpy as np
from torch.utils.data import Dataset, DataLoader
import scipy.io as sio
from PIL import Image
import torchvision.transforms.functional as transF
import random;
# from skimage import io, transform
class PixelMap_fold_STmap(Dataset):
def __init__(self, root_dir, Training=True, transform=None, VerticalFlip = False, video_length = 300):
self.train = Training;
self.root_dir = root_dir;
self.transform = transform;
self.video_length = video_length;
self.VerticalFlip = VerticalFlip;
def __len__(self):
count = 0;
for fn in os.listdir(self.root_dir):
count = count + 1;
return count;
def __getitem__(self, idx):
dir_idx = idx + 1;
img_name1 = str(dir_idx) + '/img_rgb.png';
img_name2 = str(dir_idx) + '/img_yuv.png';
img_path1 = os.path.join(self.root_dir, img_name1);
img_path2 = os.path.join(self.root_dir, img_name2);
feature_map1 = Image.open(img_path1).convert('RGB');
feature_map2 = Image.open(img_path2).convert('RGB');
if self.VerticalFlip:
if random.random() < 0.5:
feature_map1 = transF.vflip(feature_map1);
feature_map2 = transF.vflip(feature_map2);
if self.transform:
feature_map1 = self.transform(feature_map1)
feature_map2 = self.transform(feature_map2)
feature_map = torch.cat((feature_map1, feature_map2), dim = 0);
bpm_path = self.root_dir + str(dir_idx) + '/bpm.mat';
bpm = sio.loadmat(bpm_path)['bpm'];
bpm = bpm.astype('float32');
fps_path = self.root_dir + str(dir_idx) + '/fps.mat';
fps = sio.loadmat(fps_path)['fps'];
fps = fps.astype('float32');
bvp_path = self.root_dir + str(dir_idx) + '/bvp.mat';
bvp = sio.loadmat(bvp_path)['bvp'];
bvp = bvp.astype('float32');
bvp = bvp[0];
return (feature_map, bpm, fps, bvp, idx);
| 2,154 | 30.231884 | 106 |
py
|
CVD-Physiological-Measurement
|
CVD-Physiological-Measurement-master/utils/loss/loss_cross.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable, Function
import os
import shutil
import numpy as np
import scipy.io as sio
from scipy.stats import norm
class Cross_loss(nn.Module):
def __init__(self, lambda_cross_fhr = 0.000005, lambda_cross_fn = 0.000005, lambda_cross_hr = 1):
super(Cross_loss, self).__init__()
self.lossfunc_HR = nn.L1Loss();
self.lossfunc_feat = nn.L1Loss();
self.lambda_fhr = lambda_cross_fhr;
self.lambda_fn = lambda_cross_fn;
self.lambda_hr = lambda_cross_hr;
def forward(self, feat_hr, feat_n, hr, feat_hrf1, feat_nf1, hrf1, idx1, feat_hrf2, feat_nf2, hrf2, idx2, gt):
loss_hr1 = self.lossfunc_HR(hrf1, gt[idx1, :]);
loss_hr2 = self.lossfunc_HR(hrf2, gt[idx2, :]);
loss_fhr1 = self.lossfunc_feat(feat_hrf1, feat_hr[idx1, :, :, :]);
loss_fhr2 = self.lossfunc_feat(feat_hrf2, feat_hr[idx2, :, :, :]);
loss_fn1 = self.lossfunc_feat(feat_nf1, feat_n[idx1, :, :, :]);
loss_fn2 = self.lossfunc_feat(feat_nf2, feat_n[idx2, :, :, :]);
loss_hr_dis1 = self.lossfunc_HR(hrf1, hr[idx1, :]);
loss_hr_dis2 = self.lossfunc_HR(hrf2, hr[idx2, :]);
loss = self.lambda_hr * (loss_hr1 + loss_hr2) / 2 + self.lambda_fhr * (loss_fhr1 + loss_fhr2) / 2 + self.lambda_fn * (loss_fn1 + loss_fn2) / 2;
return loss, loss_hr1, loss_hr2, loss_fhr1, loss_fhr2, loss_fn1, loss_fn2, loss_hr_dis1, loss_hr_dis2
| 1,533 | 36.414634 | 151 |
py
|
CVD-Physiological-Measurement
|
CVD-Physiological-Measurement-master/utils/loss/__init__.py
| 1 | 0 | 0 |
py
|
|
CVD-Physiological-Measurement
|
CVD-Physiological-Measurement-master/utils/loss/loss_SNR.py
|
import math
import torch
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
import torch.nn as nn
class SNR_loss(nn.Module):
def __init__(self, clip_length = 300, delta = 3, loss_type = 1, use_wave = False):
super(SNR_loss, self).__init__()
self.clip_length = clip_length;
self.time_length = 300;
self.delta = delta;
self.delta_distribution = [0.4, 0.25, 0.05];
self.low_bound = 40;
self.high_bound = 150;
self.bpm_range = torch.arange(self.low_bound, self.high_bound, dtype = torch.float).cuda()
self.bpm_range = self.bpm_range / 60.0;
self.pi = 3.14159265;
two_pi_n = Variable(2 * self.pi * torch.arange(0, self.time_length, dtype = torch.float))
hanning = Variable(torch.from_numpy(np.hanning(self.time_length)).type(torch.FloatTensor), requires_grad=True).view(1, -1)
self.two_pi_n = two_pi_n.cuda();
self.hanning = hanning.cuda();
self.cross_entropy = nn.CrossEntropyLoss();
self.nll = nn.NLLLoss();
self.l1 = nn.L1Loss();
self.loss_type = loss_type;
self.eps = 0.0001;
self.lambda_l1 = 0.1;
self.use_wave = use_wave;
def forward(self, wave, gt, fps, pred = None, flag = None): # all variable operation
if flag is not None:
idx = flag.eq(1);
wave = wave[idx,:];
gt = gt[idx,:];
fps = fps[idx,:];
pred = pred[idx,:];
if(gt.shape[0] == 0):
loss = 0.0;
return loss, 0;
hr = torch.mul(gt, fps);
hr = hr*60/self.clip_length;
hr[hr.ge(self.high_bound)] = self.high_bound-1;
hr[hr.le(self.low_bound)] = self.low_bound;
if pred is not None:
pred = torch.mul(pred, fps);
pred = pred * 60 / self.clip_length;
batch_size = wave.shape[0];
f_t = self.bpm_range / fps;
preds = wave * self.hanning;
preds = preds.view(batch_size, 1, -1);
f_t = f_t.view(batch_size, -1, 1);
tmp = self.two_pi_n.repeat(batch_size, 1);
tmp = tmp.view(batch_size, 1, -1)
complex_absolute = torch.sum(preds * torch.sin(f_t*tmp), dim=-1) ** 2 \
+ torch.sum(preds * torch.cos(f_t*tmp), dim=-1) ** 2
target = hr - self.low_bound;
target = target.type(torch.long).view(batch_size);
whole_max_val, whole_max_idx = complex_absolute.max(1)
whole_max_idx = whole_max_idx + self.low_bound;
if self.loss_type == 1:
loss = self.cross_entropy(complex_absolute, target);
elif self.loss_type == 7:
norm_t = (torch.ones(batch_size).cuda() / torch.sum(complex_absolute, dim = 1));
norm_t = norm_t.view(-1,1);
complex_absolute = complex_absolute * norm_t;
loss = self.cross_entropy(complex_absolute, target);
idx_l = target - self.delta;
idx_l[idx_l.le(0)] = 0;
idx_r = target + self.delta;
idx_r[idx_r.ge(self.high_bound - self.low_bound - 1)] = self.high_bound - self.low_bound - 1;
loss_snr = 0.0;
for i in range(0, batch_size):
loss_snr = loss_snr + 1 - torch.sum(complex_absolute[i, idx_l[i]:idx_r[i]]);
loss_snr = loss_snr / batch_size;
loss = loss + loss_snr;
return loss, whole_max_idx
| 3,482 | 31.858491 | 130 |
py
|
CVD-Physiological-Measurement
|
CVD-Physiological-Measurement-master/utils/loss/loss_r.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable, Function
import os
import shutil
import numpy as np
import scipy.io as sio
from scipy.stats import norm
class Neg_Pearson(nn.Module): # Pearson range [-1, 1] so if < 0, abs|loss| ; if >0, 1- loss
def __init__(self, downsample_mode = 0):
super(Neg_Pearson, self).__init__()
self.downsample_mode = downsample_mode;
return
def forward(self, preds, labels): # all variable operation
loss = 0.0
for i in range(preds.shape[0]):
a = preds[i,:];
b = labels[i,:];
if self.downsample_mode == 1:
b = b[0::2]
sum_x = torch.sum(a) # x
sum_y = torch.sum(b) # y
sum_xy = torch.sum(torch.mul(a, b)) # xy
sum_x2 = torch.sum(torch.mul(a, a)) # x^2
sum_y2 = torch.sum(torch.mul(b, b)) # y^2
N = preds.shape[1]
pearson = (N * sum_xy - sum_x * sum_y)/(torch.sqrt((N*sum_x2-sum_x*sum_x)*(N*sum_y2-sum_y*sum_y)))
loss += 1 - pearson
if not preds.shape[0] == 0:
loss = loss / preds.shape[0]
return loss
| 1,249 | 29.487805 | 110 |
py
|
CVD-Physiological-Measurement
|
CVD-Physiological-Measurement-master/utils/model/resnet.py
|
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
from torch.autograd import Variable
import torch
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, ave_size=7, num_output = 1):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(ave_size, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.num_output = num_output
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
conv1 = self.maxpool(x)
conv2 = self.layer1(conv1)
conv3 = self.layer2(conv2) # B*128*28*28
conv4 = self.layer3(conv3) # B*256*14*14
conv5 = self.layer4(conv4) # B*512*7*7
x = self.avgpool(conv5)
feat = x.view(x.size(0), -1)
x = self.fc(feat)
if self.num_output == 34:
return x, conv3, conv4
else:
return x;
class ResNet_layer4(nn.Module):
def __init__(self, block, layers, num_classes=1000, ave_size=7):
self.inplanes = 256
super(ResNet_layer4, self).__init__()
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(ave_size, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
conv5 = self.layer4(x) # B*512*7*7
x = self.avgpool(conv5)
feat = x.view(x.size(0), -1)
x = self.fc(feat)
return x
class ResNet_layer34(nn.Module):
def __init__(self, block, layers, num_classes=1000, ave_size=7):
self.inplanes = 128
super(ResNet_layer34, self).__init__()
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(ave_size, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
conv4 = self.layer3(x) # B*256*14*14
conv5 = self.layer4(conv4) # B*512*7*7
x = self.avgpool(conv5)
feat = x.view(x.size(0), -1)
x = self.fc(feat)
return x, feat
def resnet18(pretrained=False, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(**kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
class ResNet_part(nn.Module):
def __init__(self, block, layers, num_classes=1000, ave_size=7, num_output = 1):
self.inplanes = 64
super(ResNet_part, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
conv1 = self.maxpool(x)
conv2 = self.layer1(conv1)
conv3 = self.layer2(conv2)
return conv3
def resnet18_part(**kwargs):
model = ResNet_part(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
class ResNet_part1(nn.Module):
def __init__(self, block, layers, num_classes=1000, ave_size=7, num_output = 1):
self.inplanes = 64
super(ResNet_part1, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
conv1 = self.maxpool(x)
conv2 = self.layer1(conv1)
return conv2
def resnet18_part1(**kwargs):
model = ResNet_part1(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def resnet34_part(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet_part(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
ckp_path = '../model/pretrain/step_390000.model'
checkpoint = torch.load(ckp_path)
pretrained_dict = checkpoint['net_state_dict'];
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model
class ResNet_part_cov3(nn.Module):
def __init__(self, block, layers, num_classes=1000, ave_size=7, num_output = 1):
self.inplanes = 64
super(ResNet_part_cov3, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
# print(self.inplanes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
conv1 = self.maxpool(x)
conv2 = self.layer1(conv1)
conv3 = self.layer2(conv2)
return conv3
def resnet34_part_cov3(pretrained=False, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet_part_cov3(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
ckp_path = '../model/pretrain/step_390000.model'
checkpoint = torch.load(ckp_path)
pretrained_dict = checkpoint['net_state_dict'];
model_dict = model.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model
| 16,942 | 33.577551 | 87 |
py
|
CVD-Physiological-Measurement
|
CVD-Physiological-Measurement-master/utils/model/model.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import os, sys
import shutil
import numpy as np
import scipy.io as sio
sys.path.append('..');
from utils.model.resnet import resnet18, resnet_small;
from utils.model.resnet_stconv import resnet18_stconv;
import time
| 353 | 16.7 | 54 |
py
|
CVD-Physiological-Measurement
|
CVD-Physiological-Measurement-master/utils/model/__init__.py
| 1 | 0 | 0 |
py
|
|
CVD-Physiological-Measurement
|
CVD-Physiological-Measurement-master/utils/model/model_disentangle.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import os, sys
import shutil
import numpy as np
import scipy.io as sio
sys.path.append('..');
from utils.model.resnet import resnet18, resnet18_part;
import time
class ResidualBlock(nn.Module):
"""Residual Block."""
def __init__(self, dim_in, dim_out):
super(ResidualBlock, self).__init__()
self.main = nn.Sequential(
nn.Conv2d(dim_in, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True),
nn.ReLU(inplace=True),
nn.Conv2d(dim_out, dim_out, kernel_size=3, stride=1, padding=1, bias=False),
nn.InstanceNorm2d(dim_out, affine=True))
def forward(self, x):
return x + self.main(x)
class Generator(nn.Module):
def __init__(self, conv_dim=64, repeat_num=2, img_mode = 3, up_time = 3):
super(Generator, self).__init__()
curr_dim = conv_dim;
# Bottleneck
layers = []
for i in range(repeat_num):
layers.append(ResidualBlock(dim_in=curr_dim, dim_out=curr_dim))
# Up-Sampling
for i in range(up_time):
layers.append(nn.ConvTranspose2d(curr_dim, curr_dim//2, kernel_size=3, stride=2, padding=1, output_padding = 1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim//2, affine=True))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim // 2
self.main = nn.Sequential(*layers)
layers = []
if img_mode == 3:
layers.append(nn.Conv2d(curr_dim, 6, kernel_size=7, stride=1, padding=3, bias=False))
elif img_mode == 1:
layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))
elif img_mode == 4:
layers.append(nn.Conv2d(curr_dim, 9, kernel_size=7, stride=1, padding=3, bias=False))
elif img_mode == 0:
layers.append(nn.Conv2d(curr_dim, 3, kernel_size=7, stride=1, padding=3, bias=False))
layers.append(nn.Tanh())
self.img_reg = nn.Sequential(*layers)
def forward(self, x):
features = self.main(x)
x = self.img_reg(features);
return x
class HR_estimator_multi_task_STmap(nn.Module):
def __init__(self, video_length = 300):
super(HR_estimator_multi_task_STmap, self).__init__()
self.extractor = resnet18(pretrained=False, num_classes=1, num_output=34);
self.extractor.avgpool = nn.AdaptiveAvgPool2d((1,1))
self.extractor.conv1 = nn.Conv2d(6, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.feature_pool = nn.AdaptiveAvgPool2d((1, 10));
self.upsample1 = nn.Sequential(
nn.ConvTranspose2d(in_channels=256, out_channels=64, kernel_size=[1, 3], stride=[1, 3],
padding=[0, 0]), # [1, 128, 32]
nn.BatchNorm2d(64),
nn.ELU(),
)
self.upsample2 = nn.Sequential(
nn.ConvTranspose2d(in_channels=64, out_channels=32, kernel_size=[1, 5], stride=[1, 5],
padding=[0, 0]), # [1, 128, 32]
nn.BatchNorm2d(32),
nn.ELU(),
)
self.video_length = video_length;
self.poolspa = nn.AdaptiveAvgPool2d((1, int(self.video_length)))
self.ecg_conv = nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0)
def forward(self, x):
hr, feat_out, feat = self.extractor(x);
x = self.feature_pool(feat);
x = self.upsample1(x);
x = self.upsample2(x);
x = self.poolspa(x);
x = self.ecg_conv(x)
ecg = x.view(-1, int(self.video_length));
return hr, ecg, feat_out;
class HR_disentangle(nn.Module):
def __init__(self, video_length = 300, decov_num = 1):
super(HR_disentangle, self).__init__()
self.extractor = HR_estimator_multi_task_STmap();
self.Noise_encoder = resnet18_part()
self.Noise_encoder.conv1 = nn.Conv2d(6, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.decoder = Generator(conv_dim=128, repeat_num=decov_num, img_mode = 3)
self.video_length = video_length;
self.poolspa = nn.AdaptiveAvgPool2d((1, int(self.video_length/2)))
self.ecg_conv = nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0)
def forward(self, img):
hr, ecg, feat_hr = self.extractor(img);
feat_n = self.Noise_encoder(img);
feat = feat_hr + feat_n;
img = self.decoder(feat);
return feat_hr, feat_n, hr, img, ecg
class HR_disentangle_cross(nn.Module):
def __init__(self, video_length = 300):
super(HR_disentangle_cross, self).__init__()
self.encoder_decoder = HR_disentangle(decov_num = 1);
self.video_length = video_length;
self.poolspa = nn.AdaptiveAvgPool2d((1, int(self.video_length)))
self.ecg_conv = nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0)
def forward(self, img):
batch_size = img.size(0);
feat_hr, feat_n, hr, img_out, ecg = self.encoder_decoder(img);
idx1 = torch.randint(batch_size, (batch_size,))
idx2 = torch.randint(batch_size, (batch_size,))
idx1 = idx1.long();
idx2 = idx2.long();
feat_hr1 = feat_hr[idx1, :, :, :];
feat_hr2 = feat_hr[idx2, :, :, :];
feat_n1 = feat_n[idx1, :, :, :];
feat_n2 = feat_n[idx2, :, :, :];
featf1 = feat_hr1 + feat_n2;
featf2 = feat_hr2 + feat_n1;
imgf1 = self.encoder_decoder.decoder(featf1);
imgf2 = self.encoder_decoder.decoder(featf2);
feat_hrf1, feat_nf2, hrf1, img_outf1, ecg1 = self.encoder_decoder(imgf1);
feat_hrf2, feat_nf1, hrf2, img_outf2, ecg2 = self.encoder_decoder(imgf2);
return feat_hr, feat_n, hr, img_out, feat_hrf1, feat_nf1, hrf1, idx1, feat_hrf2, feat_nf2, hrf2, idx2, ecg, ecg1, ecg2
| 5,936 | 34.76506 | 136 |
py
|
CREPE
|
CREPE-master/crepe_prod_eval_cyclip.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import ast
import argparse
import logging
import os
from PIL import Image, ImageFile
from dataclasses import dataclass
from time import time
import json
import torch
import torchvision.transforms.functional as TF
from pkgs.openai.clip import load
from torch import nn
from torch.utils.data import DataLoader, Dataset
import numpy as np
import pandas as pd
from crepe_eval_utils import BaseCsvDataset, get_one2many_rank, get_one2many_metrics, DataInfo
from crepe_params import setup_args
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
def collator(batch):
texts = []
images = torch.stack([x[0] for x in batch], dim=0)
texts = torch.cat([x[1] for x in batch], dim=0)
attention_masks = torch.cat([x[2] for x in batch], dim=0)
return images, texts, attention_masks
### DATASET CONSTRUCTION
class CsvDataset(BaseCsvDataset):
def __init__(self, input_filename, args, processor):
super().__init__(input_filename, args)
self.processor = processor
def __getitem__(self, idx):
raw_image = self.get_image_by_id(self.images[idx])
if self.crop:
raw_image = TF.crop(raw_image, self.ys[idx], self.xs[idx], self.heights[idx], self.widths[idx])
image = torch.tensor(self.processor.process_image(raw_image))
return_dict = self.processor.process_text([str(self.captions[idx])] + list(self.hard_negs[idx]))
input_ids = return_dict['input_ids']
attention_mask = return_dict['attention_mask']
return image, input_ids, attention_mask
def get_data(args, retrieval_data_path, processor):
# Get CSVDataset
input_filename = retrieval_data_path
dataset = CsvDataset(
input_filename,
args,
processor)
num_samples = len(dataset)
sampler = None
shuffle=False
dataloader = DataLoader(
dataset,
batch_size=16,
shuffle=shuffle,
num_workers=1,
pin_memory=True,
sampler=sampler,
drop_last=False,
collate_fn=collator
)
dataloader.num_samples = num_samples
dataloader.num_batches = len(dataloader)
return DataInfo(dataloader)
### EVALUATION
def evaluate(model, data, complexity, negative_type):
metrics = {}
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dataloader = data.dataloader
# num_samples = 0
# samples_per_val = dataloader.num_samples
# cumulative_loss = 0.0
# all_image_features, all_text_features = [], []
one2many = dataloader.dataset.one2many
if one2many:
all_ranks = []
with torch.no_grad():
for i, batch in enumerate(dataloader):
images, texts, attention_mask = batch
images = images.to(device=device, non_blocking=True)
texts = texts.to(device=device, non_blocking=True)
attention_mask = attention_mask.to(device=device, non_blocking=True)
if one2many:
image_emb = model.get_image_features(images)
image_emb /= image_emb.norm(dim = -1, keepdim = True)
text_emb = model.get_text_features(input_ids = texts, attention_mask = attention_mask)
text_emb /= text_emb.norm(dim = -1, keepdim = True)
set_size = text_emb.shape[0] // image_emb.shape[0]
for j in range(image_emb.shape[0]):
curr_image_emb = image_emb[j:j+1, :]
curr_text_emb = text_emb[j*set_size:(j+1)*set_size, :]
rank = get_one2many_rank(curr_image_emb, curr_text_emb)
all_ranks.append(rank)
print(f'Processed example {i*16}')
metrics = get_one2many_metrics(np.array(all_ranks))
# Alter output here
logging.info(
"\t".join([f"{k}: {round(v, 4):.4f}" for k, v in metrics.items()])
)
return metrics
def main():
args = setup_args()
if args.output_dir:
output_dir = os.path.join(args.output_dir, 'cyclip')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Load the model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model, processor = load(name = args.model_name, pretrained = args.pretrained)
checkpoint = torch.load('best.pt', map_location=device)
state_dict = checkpoint['state_dict']
if(next(iter(state_dict.items()))[0].startswith("module")):
state_dict = {key[len("module."):]: value for key, value in state_dict.items()}
model.load_state_dict(state_dict)
model = model.to(device)
model.eval()
for hard_neg_type in args.hard_neg_types:
all_metrics = {}
# Iterate over each complexity
for i in range(4, 13):
print('\n' + '*' * 45 + f' Evaluating on complexity {i} ' + '*' * 45 + '\n')
start_time = time()
retrieval_data_path = os.path.join(args.input_dir, f'{hard_neg_type}/prod_vg_hard_negs_{hard_neg_type}_complexity_{i}.csv')
data = get_data(args, retrieval_data_path, processor)
metrics = evaluate(model, data, i, hard_neg_type)
print(f'Complexity {i} took {time() - start_time} seconds')
all_metrics[i] = metrics
if args.output_dir:
output = os.path.join(output_dir, f'productivity_cyclip_{args.model_name}_{hard_neg_type}_metrics.json')
print("saving results to:", output)
with open(output, 'w') as f:
json.dump(all_metrics, f)
if __name__ == "__main__":
main()
| 5,815 | 32.045455 | 135 |
py
|
CREPE
|
CREPE-master/crepe_prod_eval_albef.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
from PIL import Image
from time import time
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import transforms
import torch.nn.functional as F
import torchvision.transforms.functional as TF
import numpy as np
import json
# ALBEF:
# from torchmultimodal.transforms.flava_transform import FLAVAImageTransform
import ruamel.yaml as yaml
from models.model_retrieval import ALBEF
from models.vit import interpolate_pos_embed
# from transformers import BertTokenizer
from models.tokenization_bert import BertTokenizer
from crepe_eval_utils import BaseCsvDataset, get_one2many_rank, get_one2many_metrics, DataInfo
from crepe_params import setup_args
import pandas as pd
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
max_text_length = 512
TEXT_DEFAULT_TOKENIZER = "bert-base-uncased"
text_tokenizer = BertTokenizer.from_pretrained(TEXT_DEFAULT_TOKENIZER)
def collator(batch):
images = torch.stack([x[0] for x in batch], dim=0)
texts = torch.cat([x[1] for x in batch], dim=0)
masks = torch.cat([x[2] for x in batch], dim=0)
return images, texts, masks
### DATASET CONSTRUCTION
def default_text_transform(texts):
# Expect a list of texts
tokenized_texts = []
attention_masks = []
start_time = time()
for text in texts:
tokenized = text_tokenizer(text, padding="max_length",
max_length=max_text_length, truncation=True, return_tensors='pt')
tokenized_texts.append(tokenized['input_ids'])
attention_masks.append(tokenized['attention_mask'])
tokenized_texts = torch.cat(tokenized_texts, dim=0)
attention_masks = torch.cat(attention_masks, dim=0)
return tokenized_texts, attention_masks
class CsvDataset(BaseCsvDataset):
def __init__(self, input_filename, args, config):
super().__init__(input_filename, args)
# albef transform:
normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
test_transform = transforms.Compose([
transforms.Resize((config['image_res'],config['image_res']),interpolation=Image.BICUBIC),
transforms.ToTensor(),
normalize,
])
self.image_transform = test_transform
self.text_transform = default_text_transform
def __getitem__(self, idx):
raw_image = self.get_image_by_id(self.images[idx])
if self.crop:
raw_image = TF.crop(raw_image, self.ys[idx], self.xs[idx], self.heights[idx], self.widths[idx])
image = self.transforms(raw_image)
texts, attn_mask = self.text_transform([str(self.captions[idx])] + list(self.hard_negs[idx]))
return image, texts, attn_mask
def get_data(args, retrieval_data_path, config):
# Get CSVDataset
input_filename = retrieval_data_path
dataset = CsvDataset(
input_filename,
args,
config=config)
num_samples = len(dataset)
sampler = None
shuffle=False
dataloader = DataLoader(
dataset,
batch_size=16,
shuffle=shuffle,
num_workers=1,
pin_memory=True,
sampler=sampler,
drop_last=False,
collate_fn=collator
)
dataloader.num_samples = num_samples
dataloader.num_batches = len(dataloader)
return DataInfo(dataloader)
### EVALUATION
def evaluate(model, data, complexity, negative_type):
metrics = {}
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dataloader = data.dataloader
# num_samples = 0
# samples_per_val = dataloader.num_samples
# cumulative_loss = 0.0
# all_image_features, all_text_features = [], []
one2many = dataloader.dataset.one2many
assert(one2many, "Not one2many?")
if one2many:
all_ranks = []
with torch.no_grad():
for i, batch in enumerate(dataloader):
images, texts, masks = batch
images = images.to(device=device, non_blocking=True)
texts = texts.to(device=device, non_blocking=True)
masks = masks.to(device=device, non_blocking=True)
if one2many:
image_feat = model.visual_encoder(images)
image_embed = model.vision_proj(image_feat[:,0,:])
image_embed = F.normalize(image_embed,dim=-1)
text_out = model.text_encoder(texts, attention_mask = masks, mode='text')
text_feat = text_out.last_hidden_state
text_emb = F.normalize(model.text_proj(text_feat[:,0,:]))
set_size = text_emb.shape[0] // image_embed.shape[0]
for j in range(image_embed.shape[0]):
curr_image_emb = image_embed[j:j+1, :]
curr_text_emb = text_emb[j*set_size:(j+1)*set_size, :]
rank = get_one2many_rank(curr_image_emb, curr_text_emb)
all_ranks.append(rank)
print(f'Processed example {i*16}')
metrics = get_one2many_metrics(np.array(all_ranks))
# Alter output here
logging.info(
"\t".join([f"{k}: {round(v, 4):.4f}" for k, v in metrics.items()])
)
return metrics
def main():
args = setup_args()
if args.output_dir:
output_dir = os.path.join(args.output_dir, 'albef')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# LOAD ALBEF
config_str = './configs/Retrieval_coco.yaml'
config = yaml.load(open(config_str, 'r'), Loader=yaml.Loader)
tokenizer = BertTokenizer.from_pretrained(TEXT_DEFAULT_TOKENIZER)
albef = ALBEF(config=config, text_encoder=TEXT_DEFAULT_TOKENIZER, tokenizer=tokenizer)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info(f"Using device: {device}")
# MODEL CHECKPOINT
checkpoint = torch.load('./ALBEF.pth', map_location='cpu')
state_dict = checkpoint['model']
# reshape positional embedding to accomodate for image resolution change
pos_embed_reshaped = interpolate_pos_embed(state_dict['visual_encoder.pos_embed'],albef.visual_encoder)
state_dict['visual_encoder.pos_embed'] = pos_embed_reshaped
m_pos_embed_reshaped = interpolate_pos_embed(state_dict['visual_encoder_m.pos_embed'],albef.visual_encoder_m)
state_dict['visual_encoder_m.pos_embed'] = m_pos_embed_reshaped
for key in list(state_dict.keys()):
if 'bert' in key:
encoder_key = key.replace('bert.','')
state_dict[encoder_key] = state_dict[key]
del state_dict[key]
msg = albef.load_state_dict(state_dict,strict=False)
albef = albef.to(device)
albef.eval()
for hard_neg_type in args.hard_neg_types:
all_metrics = {}
# Iterate over each complexity
for i in range(4, 13):
print('\n' + '*' * 45 + f' Evaluating on complexity {i} ' + '*' * 45 + '\n')
start_time = time()
retrieval_data_path = os.path.join(args.input_dir, f'{hard_neg_type}/prod_vg_hard_negs_{hard_neg_type}_complexity_{i}.csv')
data = get_data(args, retrieval_data_path, config)
metrics = evaluate(albef, data, i, hard_neg_type)
print(f'Complexity {i} took {time() - start_time} seconds')
all_metrics[i] = metrics
if args.output_dir:
output = os.path.join(output_dir, f'productivity_albef_{hard_neg_type}_metrics.json')
print("saving results to:", output)
with open(output, 'w') as f:
json.dump(all_metrics, f)
if __name__ == "__main__":
main()
| 7,966 | 34.887387 | 135 |
py
|
CREPE
|
CREPE-master/crepe_prod_eval_flava.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import ast
import logging
import os
from PIL import Image
from dataclasses import dataclass
from time import time
import json
import torch
from torchmultimodal.transforms.flava_transform import FLAVAImageTransform
from torch import nn
from torch.utils.data import DataLoader, Dataset
from torchmultimodal.models.flava.model import flava_model
from transformers import BertTokenizer
import torchvision.transforms.functional as TF
import numpy as np
import pandas as pd
from crepe_eval_utils import BaseCsvDataset, get_one2many_rank, get_one2many_metrics, DataInfo
from crepe_params import setup_args
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
max_text_length = 512
TEXT_DEFAULT_TOKENIZER = "bert-base-uncased"
text_tokenizer = BertTokenizer.from_pretrained(TEXT_DEFAULT_TOKENIZER)
def collator(batch):
texts = []
images = torch.stack([x[0]["image"] for x in batch], dim=0)
texts = torch.cat([x[1] for x in batch], dim=0)
return images, texts
### DATASET CONSTRUCTION
def default_text_transform(texts):
# Expect a list of texts
tokenized_texts = []
start_time = time()
for text in texts:
tokenized = text_tokenizer(text, padding="max_length",
max_length=max_text_length, truncation=True, return_tensors='pt')
tokenized_texts.append(torch.LongTensor(tokenized['input_ids']))
tokenized_texts = torch.cat(tokenized_texts, dim=0)
return tokenized_texts
class CsvDataset(BaseCsvDataset):
def __init__(self, input_filename, args):
super().__init__(input_filename, args)
self.image_transform = FLAVAImageTransform(is_train=False)
self.text_transform = default_text_transform
def __getitem__(self, idx):
raw_image = self.get_image_by_id(self.images[idx])
if self.crop:
raw_image = TF.crop(raw_image, self.ys[idx], self.xs[idx], self.heights[idx], self.widths[idx])
image = self.image_transform(raw_image)
if self.one2many:
texts = self.text_transform([str(self.captions[idx])] + list(self.hard_negs[idx]))
else:
texts = self.text_transform([str(self.captions[idx])])[0]
return image, texts
def get_data(args, retrieval_data_path):
# Get CSVDataset
input_filename = retrieval_data_path
dataset = CsvDataset(
input_filename,
args)
num_samples = len(dataset)
sampler = None
shuffle=False
dataloader = DataLoader(
dataset,
batch_size=8,
shuffle=shuffle,
num_workers=1,
pin_memory=True,
sampler=sampler,
drop_last=False,
collate_fn=collator
)
dataloader.num_samples = num_samples
dataloader.num_batches = len(dataloader)
return DataInfo(dataloader)
### EVALUATION
def evaluate(model, data, complexity, negative_type, output_path):
metrics = {}
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
dataloader = data.dataloader
# num_samples = 0
# samples_per_val = dataloader.num_samples
# cumulative_loss = 0.0
# all_image_features, all_text_features = [], []
one2many = dataloader.dataset.one2many
assert(one2many, "Not one2many?")
if one2many:
all_ranks = []
with torch.no_grad():
for i, batch in enumerate(dataloader):
images, texts = batch
images = images.to(device=device, non_blocking=True)
texts = texts.to(device=device, non_blocking=True)
if one2many:
_, image_emb = model.encode_image(images, projection=True)
image_emb = nn.functional.normalize(image_emb, dim=-1)
_, text_emb = model.encode_text(texts, projection=True)
text_emb = nn.functional.normalize(text_emb)
set_size = text_emb.shape[0] // image_emb.shape[0]
for j in range(image_emb.shape[0]):
curr_image_emb = image_emb[j:j+1, :]
curr_text_emb = text_emb[j*set_size:(j+1)*set_size, :]
rank = get_one2many_rank(curr_image_emb, curr_text_emb)
all_ranks.append(rank)
# print(f'Processed example {i*8}')
metrics = get_one2many_metrics(np.array(all_ranks))
# Alter output here
logging.info(
"\t".join([f"{k}: {round(v, 4):.4f}" for k, v in metrics.items()])
)
return metrics
def main():
args = setup_args()
if args.output_dir:
output_dir = os.path.join(args.output_dir, 'flava')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Load the model
flava = flava_model(pretrained=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logger.info(f"Using device: {device}")
flava = flava.to(device)
flava.eval()
for hard_neg_type in args.hard_neg_types:
all_metrics = {}
# Iterate over each complexity
for i in range(4, 13):
print('\n' + '*' * 45 + f' Evaluating on complexity {i} ' + '*' * 45 + '\n')
start_time = time()
retrieval_data_path = os.path.join(args.input_dir, f'{hard_neg_type}/prod_vg_hard_negs_{hard_neg_type}_complexity_{i}.csv')
data = get_data(args, retrieval_data_path)
metrics = evaluate(flava, data, i, hard_neg_type)
print(f'Complexity {i} took {time() - start_time} seconds')
all_metrics[i] = metrics
if args.output_dir:
output = os.path.join(output_dir, f'productivity_flava_{hard_neg_type}_metrics.json')
print("saving results to:", output)
with open(output, 'w') as f:
json.dump(all_metrics, f)
if __name__ == "__main__":
main()
| 6,056 | 31.918478 | 135 |
py
|
CREPE
|
CREPE-master/crepe_prod_eval_clip.py
|
import logging
import os
from time import time
import json
import torch
import torchvision.transforms.functional as TF
import clip
from torch.utils.data import DataLoader
import numpy as np
import pandas as pd
from crepe_eval_utils import BaseCsvDataset, get_one2many_rank, get_one2many_metrics, DataInfo
from crepe_params import setup_args
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
def collator(batch):
images = torch.stack([x[0] for x in batch], dim=0)
texts = torch.cat([x[1] for x in batch], dim=0)
return images, texts
### DATASET CONSTRUCTION
class CsvDataset(BaseCsvDataset):
def __init__(self, input_filename, args, processor, device):
super().__init__(input_filename, args)
self.processor = processor
self.device = device
def __getitem__(self, idx):
raw_image = self.get_image_by_id(self.images[idx])
if self.crop:
raw_image = TF.crop(raw_image, self.ys[idx], self.xs[idx], self.heights[idx], self.widths[idx])
image = self.processor(raw_image)
texts = self.process_text([str(self.captions[idx])] + list(self.hard_negs[idx]))
return image, texts
def process_text(self, texts):
proc_text = [clip.tokenize(text, truncate=True) for text in texts]
return torch.cat(proc_text)
def get_data(args, retrieval_data_path, processor, device):
# Get CSVDataset
input_filename = retrieval_data_path
dataset = CsvDataset(
input_filename,
args,
processor,
device)
num_samples = len(dataset)
sampler = None
shuffle=False
dataloader = DataLoader(
dataset,
batch_size=16,
shuffle=shuffle,
num_workers=1,
pin_memory=True,
sampler=sampler,
drop_last=False,
collate_fn=collator
)
dataloader.num_samples = num_samples
dataloader.num_batches = len(dataloader)
return DataInfo(dataloader)
### EVALUATION
def evaluate(model, data, complexity, negative_type, device):
metrics = {}
dataloader = data.dataloader
# num_samples = 0
# samples_per_val = dataloader.num_samples
# cumulative_loss = 0.0
# all_image_features, all_text_features = [], []
one2many = dataloader.dataset.one2many
if one2many:
all_ranks = []
with torch.no_grad():
for i, batch in enumerate(dataloader):
images, texts = batch
images = images.to(device)
texts = texts.to(device)
if one2many:
image_emb = model.encode_image(images)
image_emb /= image_emb.norm(dim = -1, keepdim = True)
text_emb = model.encode_text(texts)
text_emb /= text_emb.norm(dim = -1, keepdim = True)
set_size = text_emb.shape[0] // image_emb.shape[0]
for j in range(image_emb.shape[0]):
curr_image_emb = image_emb[j:j+1, :]
curr_text_emb = text_emb[j*set_size:(j+1)*set_size, :]
rank = get_one2many_rank(curr_image_emb, curr_text_emb)
all_ranks.append(rank)
print(f'Processed example {i*16}')
metrics = get_one2many_metrics(np.array(all_ranks))
# Alter output here
logging.info(
"\t".join([f"{k}: {round(v, 4):.4f}" for k, v in metrics.items()])
)
return metrics
def main():
args = setup_args()
if args.output_dir:
output_dir = os.path.join(args.output_dir, 'open_ai_clip')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Load the model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model, preprocess = clip.load(name = args.model_name, device=device)
model = model.to(device)
model.eval()
for hard_neg_type in args.hard_neg_types:
all_metrics = {}
# Iterate over each complexity
for i in range(4, 13):
print('\n' + '*' * 45 + f' Evaluating on complexity {i} ' + '*' * 45 + '\n')
start_time = time()
retrieval_data_path = os.path.join(args.input_dir, f'{hard_neg_type}/prod_vg_hard_negs_{hard_neg_type}_complexity_{i}.csv')
if args.model_name == "RN50" or args.model_name == "RN101":
model_save_name = args.model_name
elif args.model_name == "ViT-B/32":
model_save_name = 'vit_b32'
elif args.model_name == "ViT-B/16":
model_save_name = 'vit_b16'
elif args.model_name == "ViT-L/14":
model_save_name = 'vit_l14'
data = get_data(args, retrieval_data_path, preprocess, device)
metrics = evaluate(model, data, i, hard_neg_type, device)
print(f'Complexity {i} took {time() - start_time} seconds')
all_metrics[i] = metrics
if args.output_dir:
output = os.path.join(output_dir, f'productivity_clip_{model_save_name}_{hard_neg_type}_metrics.json')
print("saving results to:", output)
with open(output, 'w') as f:
json.dump(all_metrics, f)
if __name__ == '__main__':
main()
| 5,220 | 30.642424 | 135 |
py
|
CREPE
|
CREPE-master/crepe_compo_eval_open_clip.py
|
import os
import json
import logging
import torch
import numpy as np
import torch.nn.functional as F
import torchvision.transforms.functional as TF
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from dataclasses import dataclass
from open_clip import tokenize, create_model_and_transforms
from crepe_eval_utils import BaseCsvDataset, get_one2many_metrics, get_one2many_rank, get_metrics
from crepe_params import setup_args
DATA2MODEL = {
'cc12m': {
'RN50-quickgelu': 'rn50-quickgelu-cc12m-f000538c.pt'
},
'yfcc': {
'RN50-quickgelu': 'rn50-quickgelu-yfcc15m-455df137.pt',
'RN101-quickgelu': 'rn101-quickgelu-yfcc15m-3e04b30e.pt'
},
'laion': {
'ViT-B-16':'vit_b_16-laion400m_e32-55e67d44.pt',
'ViT-B-16-plus-240': 'vit_b_16_plus_240-laion400m_e32-699c4b84.pt',
'ViT-B-32-quickgelu': 'vit_b_32-quickgelu-laion400m_e32-46683a32.pt',
'ViT-L-14': 'vit_l_14-laion400m_e32-3d133497.pt',
}
}
COMPO_SPLITS = ['seen_compounds', 'unseen_compounds']
COMPLEXITIES = list(range(4, 13))
@dataclass
class DataInfo:
dataloader: DataLoader
sampler: DistributedSampler
class CsvDataset(BaseCsvDataset):
def __init__(self, input_filename, args, transforms):
super().__init__(input_filename, args, transforms=transforms)
def __getitem__(self, idx):
raw_image = self.get_image_by_id(self.images[idx])
if self.crop:
raw_image = TF.crop(raw_image, self.ys[idx], self.xs[idx], self.heights[idx], self.widths[idx])
image = self.transforms(raw_image)
if self.one2many:
texts = tokenize([str(self.captions[idx])] + list(self.hard_negs[idx]))
else:
texts = tokenize([str(self.captions[idx])])[0]
return image, texts
def get_csv_dataset(args, preprocess_fn, is_train):
input_filename = args.val_data
assert input_filename
dataset = CsvDataset(
input_filename,
args,
preprocess_fn)
num_samples = len(dataset)
sampler = None
shuffle = is_train and sampler is None
dataloader = DataLoader(
dataset,
batch_size=args.batch_size,
shuffle=shuffle,
num_workers=1,
pin_memory=True,
sampler=sampler,
drop_last=is_train,
)
dataloader.num_samples = num_samples
dataloader.num_batches = len(dataloader)
return DataInfo(dataloader, sampler)
def get_data(args, preprocess_fns):
preprocess_train, preprocess_val = preprocess_fns
data = {}
data["val"] = get_csv_dataset(
args, preprocess_val, is_train=False)
return data
def evaluate(model, data, args):
metrics = {}
device = torch.device(args.device)
model.eval()
autocast = torch.cuda.amp.autocast
dataloader = data['val'].dataloader
# FIXME this does not scale past small eval datasets
# all_image_features @ all_text_features will blow up memory and compute very quickly
all_image_features, all_text_features = [], []
one2many = dataloader.dataset.one2many
if one2many:
all_ranks = []
with torch.no_grad():
for i, batch in enumerate(dataloader):
images, texts = batch
images = images.to(device=device, non_blocking=True)
texts = texts.to(device=device, non_blocking=True)
if one2many:
image_features = model.encode_image(images)
image_features = F.normalize(image_features, dim=-1)
texts = torch.squeeze(texts, dim=0)
text_features = model.encode_text(texts)
text_features = F.normalize(text_features, dim=-1)
rank = get_one2many_rank(image_features, text_features)
all_ranks.append(rank)
else:
with autocast():
image_features, text_features, logit_scale = model(images, texts)
# features are accumulated in CPU tensors, otherwise GPU memory exhausted quickly
# however, system RAM is easily exceeded and compute time becomes problematic
all_image_features.append(image_features.cpu())
all_text_features.append(text_features.cpu())
if one2many:
val_metrics = get_one2many_metrics(np.array(all_ranks))
metrics.update(
{**val_metrics}
)
else:
val_metrics = get_metrics(
image_features=torch.cat(all_image_features),
text_features=torch.cat(all_text_features)
)
metrics.update(
{**val_metrics}
)
logging.info("\t".join([f"{k}: {round(v, 4):.4f}" for k, v in metrics.items()]))
return metrics
def gather_params(args, hard_neg_type, split):
if args.compo_type == 'systematicity':
if hard_neg_type in ['atom', 'comp', 'combined']:
hard_neg_key = f'valid_hard_negs_{hard_neg_type}'
else:
raise NotImplementedError
retrieval_data_path = os.path.join(args.input_dir, f'syst_vg_hard_negs_{split}_in_{args.train_dataset}.csv')
elif args.compo_type == 'productivity':
hard_neg_key = 'hard_negs'
if hard_neg_type in ['atom', 'negate', 'swap']:
input_dir = os.path.join(args.input_dir, hard_neg_type)
retrieval_data_path = os.path.join(input_dir, f'prod_vg_hard_negs_{hard_neg_type}_complexity_{split}.csv')
else:
raise NotImplementedError
else:
raise NotImplementedError
args.val_data = retrieval_data_path
args.one2many = True
args.crop = True
args.hard_neg_key = hard_neg_key
args.batch_size = 1
return args
def main():
args = setup_args()
models = DATA2MODEL[args.train_dataset].keys()
if args.compo_type == 'systematicity':
splits = COMPO_SPLITS
elif args.compo_type == 'productivity':
splits = COMPLEXITIES
if args.output_dir:
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
if torch.cuda.is_available():
device = 'cuda:0'
torch.cuda.set_device(device)
else:
device = 'cpu'
args.device = device
device = torch.device(device)
for model_name in models:
pretrained = os.path.join(args.model_dir, DATA2MODEL[args.train_dataset][model_name])
model, preprocess_train, preprocess_val = create_model_and_transforms(
model_name,
pretrained,
precision='amp',
device=device
)
for hard_neg_type in args.hard_neg_types:
all_metrics = {}
for split in splits:
# params = gather_params(args, model, split)
print('\n' + '*' * 45 + f' Evaluating {model_name} {args.compo_type} on HN-{hard_neg_type.upper()} test set split {split} ' + '*' * 45 + '\n')
args = gather_params(args, hard_neg_type, split)
# initialize datasets
data = get_data(args, (preprocess_train, preprocess_val))
assert len(data), 'At least one dataset must be specified.'
metrics = evaluate(model, data, args)
all_metrics[split] = metrics
if args.output_dir:
output = os.path.join(args.output_dir, f'{args.compo_type}_{args.train_dataset}_{model_name}_{hard_neg_type}_metrics.json')
print("saving results to:", output)
with open(output, 'w') as f:
json.dump(all_metrics, f)
if __name__ == "__main__":
main()
| 7,732 | 34.15 | 160 |
py
|
CREPE
|
CREPE-master/crepe_params.py
|
import argparse
def setup_args():
parser = argparse.ArgumentParser(description="Run image2text retrieval eval.")
parser.add_argument("--compo-type", required=True, type=str, default="systematicity", help="Either systematicity or productivity")
parser.add_argument("--input-dir", required=True, type=str, default="/vision/group/CLIPComp/crepe/prod_hard_negatives")
parser.add_argument('--hard-neg-types', required=True, type=str, nargs='+', help="The type(s) of hard negatives to include in the retrieval set.")
parser.add_argument("--model-dir", type=str, default="/vision/group/clip")
parser.add_argument("--output-dir", type=str, default="log/")
parser.add_argument("--csv-img-key", type=str, default="image_id")
parser.add_argument("--csv-caption-key", type=str, default="caption")
parser.add_argument("--hard-neg-key", type=str, default="hard_negs", help="The column name of the hard negative captions.")
parser.add_argument("--crop", type=bool, default=True, help="Whether to crop the image input.")
parser.add_argument("--one2many", type=bool, default=True, help="Whether each image query has a different retrieval text set.")
# For systematicity eval on open_clip's pretrained models with known training dataset
parser.add_argument("--train-dataset", type=str, default="cc12m")
# For CLIP & CyCLIP
parser.add_argument("--model-name", type=str, default="RN50")
# For CyCLIP
parser.add_argument("--pretrained", default=False, action="store_true", help="Use the OpenAI pretrained models")
args = parser.parse_args()
return args
| 1,608 | 72.136364 | 150 |
py
|
CREPE
|
CREPE-master/crepe_eval_utils.py
|
import ast
import logging
import os
from PIL import Image
from dataclasses import dataclass
import torch
from torch.utils.data import DataLoader, Dataset
import numpy as np
import pandas as pd
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
### DATASET CONSTRUCTION
class BaseCsvDataset(Dataset):
def __init__(self, input_filename, args, transforms=None):
logging.debug(f'Loading csv data from {input_filename}.')
df = pd.read_csv(input_filename)
# print(f"Total number of examples: {len(df)}.")
self.crop = args.crop
if self.crop:
assert 'x' in df.columns and 'y' in df.columns and 'width' in df.columns and 'height' in df.columns, "missing x, y, width, or height."
self.xs = df['x'].tolist()
self.ys = df['y'].tolist()
self.heights = df['height'].tolist()
self.widths = df['width'].tolist()
# print("cropping:", self.crop)
self.one2many = args.one2many
# print("one2many:", self.one2many)
if self.one2many:
self.hard_negs = [ast.literal_eval(ls_str) for ls_str in df[args.hard_neg_key]]
self.images = df[args.csv_img_key].tolist()
self.captions = df[args.csv_caption_key].tolist()
self.transforms = transforms
def __len__(self):
return len(self.captions)
def get_image_by_id(self, image_id):
vg_image_paths = ['/nlp/scr/irena/data/visual_genome/img/VG_100K', '/nlp/scr/irena/data/visual_genome/img/VG_100K_2']
for p in vg_image_paths:
path = os.path.join(p, f"{image_id}.jpg")
if os.path.exists(path):
return Image.open(path).convert("RGB")
raise FileNotFoundError(f'The image with id {image_id} is not found.')
def __getitem__(self, idx):
print("Not yet implemented.")
assert(False)
@dataclass
class DataInfo:
dataloader: DataLoader
# EVALUATION UTILITIES
def get_one2many_rank(image_features, text_features):
logits_per_image = (image_features @ text_features.t()).detach().cpu()
ground_truth = 0 # because the grountruth caption is placed first, see CsvDataset.__getitem__() in data.py
ranking = torch.argsort(logits_per_image, descending=True)
pred = torch.where(ranking == ground_truth)[1].detach().cpu().numpy()
return pred
def get_one2many_metrics(preds, name='image_to_text'):
metrics = {}
metrics[f"{name}_mean_rank"] = preds.mean() + 1
metrics[f"{name}_rank_std"] = preds.std()
metrics[f"{name}_median_rank"] = np.floor(np.median(preds)) + 1
for k in [1, 3, 5, 10]:
metrics[f"{name}_R@{k}"] = np.mean(preds < k)
metrics[f"{name}_R@{k}_std"] = np.std(preds < k)
return metrics
def get_metrics(image_features, text_features):
metrics = {}
logits_per_image = (image_features @ text_features.t()).detach().cpu()
logits_per_text = logits_per_image.t().detach().cpu()
logits = {"image_to_text": logits_per_image, "text_to_image": logits_per_text}
ground_truth = torch.arange(len(text_features)).view(-1, 1)
for name, logit in logits.items():
ranking = torch.argsort(logit, descending=True)
preds = torch.where(ranking == ground_truth)[1]
preds = preds.detach().cpu().numpy()
metrics[f"{name}_mean_rank"] = preds.mean() + 1
metrics[f"{name}_median_rank"] = np.floor(np.median(preds)) + 1
for k in [1, 3, 5, 10]:
metrics[f"{name}_R@{k}"] = np.mean(preds < k)
return metrics
| 3,542 | 35.525773 | 146 |
py
|
CREPE
|
CREPE-master/open_clip/openai.py
|
""" OpenAI pretrained model functions
Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
"""
import os
import warnings
from typing import Union, List
import torch
from .model import build_model_from_openai_state_dict
from .pretrained import get_pretrained_url, list_pretrained_tag_models, download_pretrained
__all__ = ["list_openai_models", "load_openai_model"]
def list_openai_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list_pretrained_tag_models('openai')
def load_openai_model(
name: str,
device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
jit=True,
):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model (default) or more hackable non-JIT model.
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if get_pretrained_url(name, 'openai'):
model_path = download_pretrained(get_pretrained_url(name, 'openai'))
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {list_openai_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
try:
model = build_model_from_openai_state_dict(state_dict or model.state_dict()).to(device)
except KeyError:
sd = {k[7:]: v for k, v in state_dict["state_dict"].items()}
model = build_model_from_openai_state_dict(sd).to(device)
if str(device) == "cpu":
model.float()
return model
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
# ensure image_size attr available at consistent location for both jit and non-jit
model.visual.image_size = model.input_resolution.item()
return model
| 4,503 | 34.464567 | 117 |
py
|
CREPE
|
CREPE-master/open_clip/transform.py
|
from torchvision.transforms import Normalize, Compose, RandomResizedCrop, ToTensor, Resize, \
CenterCrop
from PIL import Image
def _convert_to_rgb(image):
return image.convert('RGB')
def image_transform(
image_size: int,
is_train: bool,
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)
):
normalize = Normalize(mean=mean, std=std)
if is_train:
return Compose([
RandomResizedCrop(image_size, scale=(0.9, 1.0), interpolation=Image.BICUBIC),
_convert_to_rgb,
ToTensor(),
normalize,
])
else:
return Compose([
Resize(image_size, interpolation=Image.BICUBIC),
CenterCrop(image_size),
_convert_to_rgb,
ToTensor(),
normalize,
])
| 850 | 26.451613 | 93 |
py
|
CREPE
|
CREPE-master/open_clip/loss.py
|
import torch
import torch.distributed.nn
from torch import distributed as dist, nn as nn
from torch.nn import functional as F
try:
import horovod.torch as hvd
except ImportError:
hvd = None
def gather_features(
image_features,
text_features,
local_loss=False,
gather_with_grad=False,
rank=0,
world_size=1,
use_horovod=False
):
if use_horovod:
assert hvd is not None, 'Please install horovod'
if gather_with_grad:
all_image_features = hvd.allgather(image_features)
all_text_features = hvd.allgather(text_features)
else:
with torch.no_grad():
all_image_features = hvd.allgather(image_features)
all_text_features = hvd.allgather(text_features)
if not local_loss:
# ensure grads for local rank when all_* features don't have a gradient
gathered_image_features = list(all_image_features.chunk(world_size, dim=0))
gathered_text_features = list(all_text_features.chunk(world_size, dim=0))
gathered_image_features[rank] = image_features
gathered_text_features[rank] = text_features
all_image_features = torch.cat(gathered_image_features, dim=0)
all_text_features = torch.cat(gathered_text_features, dim=0)
else:
# We gather tensors from all gpus
if gather_with_grad:
all_image_features = torch.cat(torch.distributed.nn.all_gather(image_features), dim=0)
all_text_features = torch.cat(torch.distributed.nn.all_gather(text_features), dim=0)
else:
gathered_image_features = [torch.zeros_like(image_features) for _ in range(world_size)]
gathered_text_features = [torch.zeros_like(text_features) for _ in range(world_size)]
dist.all_gather(gathered_image_features, image_features)
dist.all_gather(gathered_text_features, text_features)
if not local_loss:
# ensure grads for local rank when all_* features don't have a gradient
gathered_image_features[rank] = image_features
gathered_text_features[rank] = text_features
all_image_features = torch.cat(gathered_image_features, dim=0)
all_text_features = torch.cat(gathered_text_features, dim=0)
return all_image_features, all_text_features
class ClipLoss(nn.Module):
def __init__(
self,
local_loss=False,
gather_with_grad=False,
cache_labels=False,
rank=0,
world_size=1,
use_horovod=False,
):
super().__init__()
self.local_loss = local_loss
self.gather_with_grad = gather_with_grad
self.cache_labels = cache_labels
self.rank = rank
self.world_size = world_size
self.use_horovod = use_horovod
# cache state
self.prev_num_logits = 0
self.labels = {}
def forward(self, image_features, text_features, logit_scale):
device = image_features.device
if self.world_size > 1:
all_image_features, all_text_features = gather_features(
image_features, text_features,
self.local_loss, self.gather_with_grad, self.rank, self.world_size, self.use_horovod)
if self.local_loss:
logits_per_image = logit_scale * image_features @ all_text_features.T
logits_per_text = logit_scale * text_features @ all_image_features.T
else:
logits_per_image = logit_scale * all_image_features @ all_text_features.T
logits_per_text = logits_per_image.T
else:
logits_per_image = logit_scale * image_features @ text_features.T
logits_per_text = logit_scale * text_features @ image_features.T
# calculated ground-truth and cache if enabled
num_logits = logits_per_image.shape[0]
if self.prev_num_logits != num_logits or device not in self.labels:
labels = torch.arange(num_logits, device=device, dtype=torch.long)
if self.world_size > 1 and self.local_loss:
labels = labels + num_logits * self.rank
if self.cache_labels:
self.labels[device] = labels
self.prev_num_logits = num_logits
else:
labels = self.labels[device]
total_loss = (
F.cross_entropy(logits_per_image, labels) +
F.cross_entropy(logits_per_text, labels)
) / 2
return total_loss
| 4,658 | 39.513043 | 101 |
py
|
CREPE
|
CREPE-master/open_clip/utils.py
|
from torch import nn as nn
from torchvision.ops.misc import FrozenBatchNorm2d
def freeze_batch_norm_2d(module, module_match={}, name=''):
"""
Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is
itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and
returned. Otherwise, the module is walked recursively and submodules are converted in place.
Args:
module (torch.nn.Module): Any PyTorch module.
module_match (dict): Dictionary of full module names to freeze (all if empty)
name (str): Full module name (prefix)
Returns:
torch.nn.Module: Resulting module
Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762
"""
res = module
is_match = True
if module_match:
is_match = name in module_match
if is_match and isinstance(module, (nn.modules.batchnorm.BatchNorm2d, nn.modules.batchnorm.SyncBatchNorm)):
res = FrozenBatchNorm2d(module.num_features)
res.num_features = module.num_features
res.affine = module.affine
if module.affine:
res.weight.data = module.weight.data.clone().detach()
res.bias.data = module.bias.data.clone().detach()
res.running_mean.data = module.running_mean.data
res.running_var.data = module.running_var.data
res.eps = module.eps
else:
for child_name, child in module.named_children():
full_child_name = '.'.join([name, child_name]) if name else child_name
new_child = freeze_batch_norm_2d(child, module_match, full_child_name)
if new_child is not child:
res.add_module(child_name, new_child)
return res
| 1,850 | 44.146341 | 131 |
py
|
CREPE
|
CREPE-master/open_clip/model.py
|
""" CLIP Model
Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
"""
from collections import OrderedDict
from dataclasses import dataclass
from typing import Tuple, Union, Callable, Optional
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.utils.checkpoint import checkpoint
from .timm_model import TimmModel
from .utils import freeze_batch_norm_2d
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu2 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu3 = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.relu1(self.bn1(self.conv1(x)))
out = self.relu2(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu3(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, image_size=224, width=64):
super().__init__()
self.output_dim = output_dim
self.image_size = image_size
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.relu3 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(2)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim)
self.init_parameters()
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def init_parameters(self):
if self.attnpool is not None:
std = self.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.attnpool.c_proj.weight, std=std)
for resnet_block in [self.layer1, self.layer2, self.layer3, self.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
assert unlocked_groups == 0, 'partial locking not currently supported for this model'
for param in self.parameters():
param.requires_grad = False
if freeze_bn_stats:
freeze_batch_norm_2d(self)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
# FIXME support for non-transformer
pass
def stem(self, x):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.avgpool(x)
return x
def forward(self, x):
x = self.stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
return x.to(orig_type)
class QuickGELU(nn.Module):
# NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, mlp_ratio: float = 4.0, act_layer: Callable = nn.GELU):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
mlp_width = int(d_model * mlp_ratio)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, mlp_width)),
("gelu", act_layer()),
("c_proj", nn.Linear(mlp_width, d_model))
]))
self.ln_2 = LayerNorm(d_model)
def attention(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask)[0]
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
x = x + self.attention(self.ln_1(x), attn_mask=attn_mask)
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, mlp_ratio: float = 4.0, act_layer: Callable = nn.GELU):
super().__init__()
self.width = width
self.layers = layers
self.grad_checkpointing = False
self.resblocks = nn.ModuleList([
ResidualAttentionBlock(width, heads, mlp_ratio, act_layer=act_layer)
for _ in range(layers)
])
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
for r in self.resblocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(r, x, attn_mask)
else:
x = r(x, attn_mask=attn_mask)
return x
class VisualTransformer(nn.Module):
def __init__(
self, image_size: int, patch_size: int, width: int, layers: int, heads: int, mlp_ratio: float,
output_dim: int, act_layer: Callable = nn.GELU):
super().__init__()
self.image_size = image_size
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn((image_size // patch_size) ** 2 + 1, width))
self.ln_pre = LayerNorm(width)
self.transformer = Transformer(width, layers, heads, mlp_ratio, act_layer=act_layer)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
assert unlocked_groups == 0, 'partial locking not currently supported for this model'
for param in self.parameters():
param.requires_grad = False
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.transformer.grad_checkpointing = enable
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat(
[self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),
x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.proj is not None:
x = x @ self.proj
return x
@dataclass
class CLIPVisionCfg:
layers: Union[Tuple[int, int, int, int], int] = 12
width: int = 768
head_width: int = 64
mlp_ratio: float = 4.0
patch_size: int = 16
image_size: Union[Tuple[int, int], int] = 224
timm_model_name: str = None # a valid model name overrides layers, width, patch_size
timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model
timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '')
timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '')
@dataclass
class CLIPTextCfg:
context_length: int = 77
vocab_size: int = 49408
width: int = 512
heads: int = 8
layers: int = 12
class CLIP(nn.Module):
def __init__(
self,
embed_dim: int,
vision_cfg: CLIPVisionCfg,
text_cfg: CLIPTextCfg,
quick_gelu: bool = False,
):
super().__init__()
if isinstance(vision_cfg, dict):
vision_cfg = CLIPVisionCfg(**vision_cfg)
if isinstance(text_cfg, dict):
text_cfg = CLIPTextCfg(**text_cfg)
self.context_length = text_cfg.context_length
# OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more
# memory efficient in recent PyTorch releases (>= 1.10).
# NOTE: timm models always use native GELU regardless of quick_gelu flag.
act_layer = QuickGELU if quick_gelu else nn.GELU
if vision_cfg.timm_model_name:
self.visual = TimmModel(
vision_cfg.timm_model_name,
pretrained=vision_cfg.timm_model_pretrained,
pool=vision_cfg.timm_pool,
proj=vision_cfg.timm_proj,
embed_dim=embed_dim,
image_size=vision_cfg.image_size
)
act_layer = nn.GELU # so that text transformer doesn't use QuickGELU w/ timm models
elif isinstance(vision_cfg.layers, (tuple, list)):
vision_heads = vision_cfg.width * 32 // vision_cfg.head_width
self.visual = ModifiedResNet(
layers=vision_cfg.layers,
output_dim=embed_dim,
heads=vision_heads,
image_size=vision_cfg.image_size,
width=vision_cfg.width
)
else:
vision_heads = vision_cfg.width // vision_cfg.head_width
self.visual = VisualTransformer(
image_size=vision_cfg.image_size,
patch_size=vision_cfg.patch_size,
width=vision_cfg.width,
layers=vision_cfg.layers,
heads=vision_heads,
mlp_ratio=vision_cfg.mlp_ratio,
output_dim=embed_dim,
act_layer=act_layer,
)
self.transformer = Transformer(
width=text_cfg.width,
layers=text_cfg.layers,
heads=text_cfg.heads,
act_layer=act_layer,
)
self.vocab_size = text_cfg.vocab_size
self.token_embedding = nn.Embedding(text_cfg.vocab_size, text_cfg.width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, text_cfg.width))
self.ln_final = LayerNorm(text_cfg.width)
self.text_projection = nn.Parameter(torch.empty(text_cfg.width, embed_dim))
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.register_buffer('attn_mask', self.build_attention_mask(), persistent=False)
self.init_parameters()
def init_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
nn.init.constant_(self.logit_scale, np.log(1 / 0.07))
if hasattr(self.visual, 'init_parameters'):
self.visual.init_parameters()
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False):
# lock image tower as per LiT - https://arxiv.org/abs/2111.07991
self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.visual.set_grad_checkpointing(enable)
self.transformer.grad_checkpointing = enable
def encode_image(self, image):
return self.visual(image)
def encode_text(self, text):
# print('text before embedding:', text)
x = self.token_embedding(text) # [batch_size, n_ctx, d_model]
# print('text after embedding:', x)
x = x + self.positional_embedding
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x, attn_mask=self.attn_mask)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return x
def forward(self, image, text):
if image is None:
return self.encode_text(text)
elif text is None:
return self.encode_image(image)
image_features = self.encode_image(image)
image_features = F.normalize(image_features, dim=-1)
text_features = self.encode_text(text)
text_features = F.normalize(text_features, dim=-1)
return image_features, text_features, self.logit_scale.exp()
def convert_weights_to_fp16(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
def build_model_from_openai_state_dict(state_dict: dict):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len(
[k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_size = vision_patch_size * grid_size
else:
counts: list = [
len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_size = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
vision_cfg = CLIPVisionCfg(
layers=vision_layers,
width=vision_width,
patch_size=vision_patch_size,
image_size=image_size,
)
text_cfg = CLIPTextCfg(
context_length=context_length,
vocab_size=vocab_size,
width=transformer_width,
heads=transformer_heads,
layers=transformer_layers
)
model = CLIP(
embed_dim,
vision_cfg=vision_cfg,
text_cfg=text_cfg,
quick_gelu=True, # OpenAI models were trained with QuickGELU
)
for key in ["input_resolution", "context_length", "vocab_size"]:
state_dict.pop(key, None)
convert_weights_to_fp16(model)
model.load_state_dict(state_dict)
return model.eval()
def trace_model(model, batch_size=256, device=torch.device('cpu')):
model.eval()
image_size = model.visual.image_size
example_images = torch.ones((batch_size, 3, image_size, image_size), device=device)
example_text = torch.zeros((batch_size, model.context_length), dtype=torch.int, device=device)
model = torch.jit.trace_module(
model,
inputs=dict(
forward=(example_images, example_text),
encode_text=(example_text,),
encode_image=(example_images,)
))
model.visual.image_size = image_size
return model
| 21,811 | 37.95 | 120 |
py
|
CREPE
|
CREPE-master/open_clip/version.py
|
__version__ = '1.3.0'
| 22 | 10.5 | 21 |
py
|
CREPE
|
CREPE-master/open_clip/factory.py
|
import json
import logging
import os
import pathlib
import re
from copy import deepcopy
from pathlib import Path
import torch
from .model import CLIP, convert_weights_to_fp16
from .openai import load_openai_model
from .pretrained import get_pretrained_url, download_pretrained
from .transform import image_transform
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
def _rescan_model_configs():
global _MODEL_CONFIGS
config_ext = ('.json',)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f'*{ext}'))
for cf in config_files:
with open(cf, 'r') as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {k: v for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))}
_rescan_model_configs() # initial populate of model config registry
def load_state_dict(checkpoint_path: str, map_location='cpu'):
checkpoint = torch.load(checkpoint_path, map_location=map_location)
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
if next(iter(state_dict.items()))[0].startswith('module'):
state_dict = {k[7:]: v for k, v in state_dict.items()}
return state_dict
def create_model(
model_name: str,
pretrained: str = '',
precision: str = 'fp32',
device: torch.device = torch.device('cpu'),
jit: bool = False,
force_quick_gelu: bool = False,
pretrained_image: bool = False,
):
model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names
if pretrained.lower() == 'openai':
logging.info(f'Loading pretrained {model_name} from OpenAI.')
model = load_openai_model(model_name, device=device, jit=jit)
# See https://discuss.pytorch.org/t/valueerror-attemting-to-unscale-fp16-gradients/81372
if precision == "amp" or precision == "fp32":
model = model.float()
else:
if model_name in _MODEL_CONFIGS:
logging.info(f'Loading {model_name} model config.')
model_cfg = deepcopy(_MODEL_CONFIGS[model_name])
else:
logging.error(f'Model config for {model_name} not found; available models {list_models()}.')
raise RuntimeError(f'Model config for {model_name} not found.')
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
if pretrained_image:
if 'timm_model_name' in model_cfg.get('vision_cfg', {}):
# pretrained weight loading for timm models set via vision_cfg
model_cfg['vision_cfg']['timm_model_pretrained'] = True
else:
assert False, 'pretrained image towers currently only supported for timm models'
model = CLIP(**model_cfg)
if pretrained:
checkpoint_path = ''
url = get_pretrained_url(model_name, pretrained)
if url:
checkpoint_path = download_pretrained(url)
elif os.path.exists(pretrained):
checkpoint_path = pretrained
if checkpoint_path:
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
model.load_state_dict(load_state_dict(checkpoint_path))
else:
logging.warning(f'Pretrained weights ({pretrained}) not found for model {model_name}.')
raise RuntimeError(f'Pretrained weights ({pretrained}) not found for model {model_name}.')
model.to(device=device)
if precision == "fp16":
assert device.type != 'cpu'
convert_weights_to_fp16(model)
if jit:
model = torch.jit.script(model)
return model
def create_model_and_transforms(
model_name: str,
pretrained: str = '',
precision: str = 'fp32',
device: torch.device = torch.device('cpu'),
jit: bool = False,
force_quick_gelu: bool = False,
pretrained_image: bool = False,
):
model = create_model(
model_name, pretrained, precision, device, jit,
force_quick_gelu=force_quick_gelu,
pretrained_image=pretrained_image)
preprocess_train = image_transform(model.visual.image_size, is_train=True)
preprocess_val = image_transform(model.visual.image_size, is_train=False)
return model, preprocess_train, preprocess_val
def list_models():
""" enumerate available model architectures based on config files """
return list(_MODEL_CONFIGS.keys())
def add_model_config(path):
""" add model config path or file and update registry """
if not isinstance(path, Path):
path = Path(path)
_MODEL_CONFIG_PATHS.append(path)
_rescan_model_configs()
| 5,455 | 34.660131 | 106 |
py
|
CREPE
|
CREPE-master/open_clip/tokenizer.py
|
""" CLIP tokenizer
Copied from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
"""
import gzip
import html
import os
from functools import lru_cache
from typing import Union, List
import ftfy
import regex as re
import torch
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe(), special_tokens=None):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
# print("merges len:", len(merges))
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
if not special_tokens:
special_tokens = ['<start_of_text>', '<end_of_text>']
else:
special_tokens = ['<start_of_text>', '<end_of_text>'] + special_tokens
vocab.extend(special_tokens)
# print("vocab:", len(vocab))
self.encoder = dict(zip(vocab, range(len(vocab))))
# print("encoder:", self.encoder)
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {t:t for t in special_tokens}
special = "|".join(special_tokens)
self.pat = re.compile(special + r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
self.vocab_size = len(self.encoder)
self.all_special_ids = [self.encoder[t] for t in special_tokens]
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
# print("token, bpe:", token, self.bpe(token))
# print(self.bpe(token).split(' '))
# for bpe_token in self.bpe(token).split(' '):
# print('token:', bpe_token )
# print('bpe:', self.encoder[bpe_token])
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
# print("overall bpe:", bpe_tokens)
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
_tokenizer = SimpleTokenizer()
def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<start_of_text>"]
eot_token = _tokenizer.encoder["<end_of_text>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
tokens = tokens[:context_length] # Truncate
result[i, :len(tokens)] = torch.tensor(tokens)
return result
| 6,637 | 33.936842 | 121 |
py
|
CREPE
|
CREPE-master/open_clip/pretrained.py
|
import hashlib
import os
import urllib
import warnings
from tqdm import tqdm
_RN50 = dict(
openai="https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-yfcc15m-455df137.pt",
cc12m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-cc12m-f000538c.pt"
)
_RN50_quickgelu = dict(
openai="https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-yfcc15m-455df137.pt",
cc12m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-cc12m-f000538c.pt"
)
_RN101 = dict(
openai="https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn101-quickgelu-yfcc15m-3e04b30e.pt"
)
_RN101_quickgelu = dict(
openai="https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn101-quickgelu-yfcc15m-3e04b30e.pt"
)
_RN50x4 = dict(
openai="https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
)
_RN50x16 = dict(
openai="https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
)
_RN50x64 = dict(
openai="https://openaipublic.azureedge.net/clip/models/be1cfb55d75a9666199fb2206c106743da0f6468c9d327f3e0d0a543a9919d9c/RN50x64.pt",
)
_VITB32 = dict(
openai="https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
laion2b_e16="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-laion2b_e16-af8dbd0c.pth",
laion400m_e31="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt",
laion400m_e32="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt",
)
_VITB32_quickgelu = dict(
openai="https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
laion400m_e31="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt",
laion400m_e32="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt",
)
_VITB16 = dict(
openai="https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
laion400m_e31="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16-laion400m_e31-00efa78f.pt",
laion400m_e32="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16-laion400m_e32-55e67d44.pt",
)
_VITB16_PLUS_240 = dict(
laion400m_e31="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16_plus_240-laion400m_e31-8fb26589.pt",
laion400m_e32="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16_plus_240-laion400m_e32-699c4b84.pt",
)
_VITL14 = dict(
openai="https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
laion400m_e31='https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_l_14-laion400m_e31-69988bb6.pt',
laion400m_e32='https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_l_14-laion400m_e32-3d133497.pt',
)
_VITL14_336 = dict(
openai="https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt"
)
_PRETRAINED = {
"RN50": _RN50,
"RN50-quickgelu": _RN50_quickgelu,
"RN101": _RN101,
"RN101-quickgelu": _RN101_quickgelu,
"RN50x4": _RN50x4,
"RN50x16": _RN50x16,
"RN50x64": _RN50x64,
"ViT-B-32": _VITB32,
"ViT-B-32-quickgelu": _VITB32_quickgelu,
"ViT-B-16": _VITB16,
"ViT-B-16-plus-240": _VITB16_PLUS_240,
"ViT-L-14": _VITL14,
"ViT-L-14-336": _VITL14_336,
}
def list_pretrained(as_str: bool = False):
""" returns list of pretrained models
Returns a tuple (model_name, pretrain_tag) by default or 'name:tag' if as_str == True
"""
return [':'.join([k, t]) if as_str else (k, t) for k in _PRETRAINED.keys() for t in _PRETRAINED[k].keys()]
def list_pretrained_tag_models(tag: str):
""" return all models having the specified pretrain tag """
models = []
for k in _PRETRAINED.keys():
if tag in _PRETRAINED[k]:
models.append(k)
return models
def list_pretrained_model_tags(model: str):
""" return all pretrain tags for the specified model architecture """
tags = []
if model in _PRETRAINED:
tags.extend(_PRETRAINED[model].keys())
return tags
def get_pretrained_url(model: str, tag: str):
if model not in _PRETRAINED:
return ''
model_pretrained = _PRETRAINED[model]
tag = tag.lower()
if tag not in model_pretrained:
return ''
return model_pretrained[tag]
def download_pretrained(url: str, root: str = os.path.expanduser("~/.cache/clip")):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
if 'openaipublic' in url:
expected_sha256 = url.split("/")[-2]
else:
expected_sha256 = ''
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if expected_sha256:
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
else:
return download_target
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if expected_sha256 and hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
| 7,174 | 42.75 | 142 |
py
|
CREPE
|
CREPE-master/open_clip/__init__.py
|
from .factory import list_models, create_model, create_model_and_transforms, add_model_config
from .loss import ClipLoss
from .model import CLIP, CLIPTextCfg, CLIPVisionCfg, convert_weights_to_fp16, trace_model
from .openai import load_openai_model, list_openai_models
from .pretrained import list_pretrained, list_pretrained_tag_models, list_pretrained_model_tags,\
get_pretrained_url, download_pretrained
from .tokenizer import SimpleTokenizer, tokenize
from .transform import image_transform
| 499 | 54.555556 | 97 |
py
|
CREPE
|
CREPE-master/open_clip/timm_model.py
|
""" timm model adapter
Wraps timm (https://github.com/rwightman/pytorch-image-models) models for use as a vision tower in CLIP model.
"""
from collections import OrderedDict
import torch.nn as nn
try:
import timm
from timm.models.layers import Mlp, to_2tuple
from timm.models.layers.attention_pool2d import RotAttentionPool2d
from timm.models.layers.attention_pool2d import AttentionPool2d as AbsAttentionPool2d
except ImportError as e:
timm = None
from .utils import freeze_batch_norm_2d
class TimmModel(nn.Module):
""" timm model adapter
# FIXME this adapter is a work in progress, may change in ways that break weight compat
"""
def __init__(
self,
model_name,
embed_dim,
image_size=224,
pool='avg',
proj='linear',
drop=0.,
pretrained=False):
super().__init__()
if timm is None:
raise RuntimeError("Please `pip install timm` to use timm models.")
self.image_size = to_2tuple(image_size)
self.trunk = timm.create_model(model_name, pretrained=pretrained)
feat_size = self.trunk.default_cfg.get('pool_size', None)
feature_ndim = 1 if not feat_size else 2
if pool in ('abs_attn', 'rot_attn'):
assert feature_ndim == 2
# if attn pooling used, remove both classifier and default pool
self.trunk.reset_classifier(0, global_pool='')
else:
# reset global pool if pool config set, otherwise leave as network default
reset_kwargs = dict(global_pool=pool) if pool else {}
self.trunk.reset_classifier(0, **reset_kwargs)
prev_chs = self.trunk.num_features
head_layers = OrderedDict()
if pool == 'abs_attn':
head_layers['pool'] = AbsAttentionPool2d(prev_chs, feat_size=feat_size, out_features=embed_dim)
prev_chs = embed_dim
elif pool == 'rot_attn':
head_layers['pool'] = RotAttentionPool2d(prev_chs, out_features=embed_dim)
prev_chs = embed_dim
else:
assert proj, 'projection layer needed if non-attention pooling is used.'
# NOTE attention pool ends with a projection layer, so proj should usually be set to '' if such pooling is used
if proj == 'linear':
head_layers['drop'] = nn.Dropout(drop)
head_layers['proj'] = nn.Linear(prev_chs, embed_dim)
elif proj == 'mlp':
head_layers['mlp'] = Mlp(prev_chs, 2 * embed_dim, embed_dim, drop=drop)
self.head = nn.Sequential(head_layers)
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
""" lock modules
Args:
unlocked_groups (int): leave last n layer groups unlocked (default: 0)
"""
if not unlocked_groups:
# lock full model
for param in self.trunk.parameters():
param.requires_grad = False
if freeze_bn_stats:
freeze_batch_norm_2d(self.trunk)
else:
# NOTE: partial freeze requires latest timm (master) branch and is subject to change
try:
# FIXME import here until API stable and in an official release
from timm.models.helpers import group_parameters, group_modules
except ImportError:
raise RuntimeError(
'Please install latest timm `pip install git+https://github.com/rwightman/pytorch-image-models`')
matcher = self.trunk.group_matcher()
gparams = group_parameters(self.trunk, matcher)
max_layer_id = max(gparams.keys())
max_layer_id = max_layer_id - unlocked_groups
for group_idx in range(max_layer_id + 1):
group = gparams[group_idx]
for param in group:
self.trunk.get_parameter(param).requires_grad = False
if freeze_bn_stats:
gmodules = group_modules(self.trunk, matcher, reverse=True)
gmodules = {k for k, v in gmodules.items() if v <= max_layer_id}
freeze_batch_norm_2d(self.trunk, gmodules)
def forward(self, x):
x = self.trunk(x)
x = self.head(x)
return x
| 4,300 | 39.196262 | 119 |
py
|
DNN_Rover
|
DNN_Rover-master/ModelTrain.py
|
# author = michael teti
from __future__ import division, print_function, absolute_import
import numpy as np
import tflearn
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.normalization import local_response_normalization
from tflearn.layers.estimator import regression
from tflearn.helpers.trainer import Trainer
import h5py
from tflearn.metrics import *
from tflearn.objectives import categorical_crossentropy
import glob
import matplotlib.pyplot as plt
import sys, os
import cv2
from NetworkSwitch import *
from sklearn.preprocessing import scale
from scipy.misc import imshow, imresize
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
required=True,
help='What to save the model as.')
parser.add_argument(
'--network_name',
type=str,
required=True,
help='The name of the neural network you want to train.')
parser.add_argument(
'--dropout_prob',
type=float,
required=False,
default=0.5,
help='What dropout probability to use if needed')
parser.add_argument(
'--training_iters',
type=int,
default=10000,
help='How many iterations of gradient descent to do')
parser.add_argument(
'--data_path',
type=str,
default=os.path.join(os.getcwd(), 'RoverData/Right2'),
help='The path to where the training data is located.')
args = parser.parse_args()
m_save = args.model_name + '_'
dropout_keep_prob = args.dropout_prob
training_iterations=args.training_iters
network_name = args.network_name
data_path = args.data_path
if 'Color' in m_save:
im_method = 0
num_stack = 1
channs = 3
elif '3frames5,15_GrayCropped' in m_save:
im_method = 1
num_stack = 3
channs = 3
elif '1frame_GrayCropped' in m_save:
im_method = 2
num_stack = 1
channs = 1
# start tensorboard
os.system('tensorboard --logdir=/tmp/tflearn_logs/ &')
# define useful variables
os.chdir(data_path)
fnames = glob.glob('*.h5') # datasets to train on
batch_sz = 150 # training batch size
val_name = 'Run_218seconds_Michael_Sheri.h5' # Dataset to use for validation
num_classes = 4
stack_nums = [5, 15]
learn_rate = 3e-5
def create_framestack(x, y, f_args):
f_args.sort()
X_ = []
Y_ = []
for ex_num in range(x.shape[0]-1, max(f_args), -1):
xf = x[ex_num, ...]
for i in range(len(f_args)):
xf = np.concatenate((xf,
x[ex_num-f_args[i], ...]),
axis=2)
X_.append(xf)
Y_.append(y[ex_num, :])
return np.asarray(X_), np.asarray(Y_)
def random_crop(x, padlen=30):
h, w = x.shape[1], x.shape[2]
X = np.zeros(x.shape)
x = np.pad(x,
((0,0),
(padlen//2,padlen//2),
(padlen//2,padlen//2),
(0,0)), 'constant')
for i in range(x.shape[0]):
h_ind, w_ind = np.random.randint(0, padlen, 2)
X[i,...] = x[i, h_ind:h_ind+h, w_ind:w_ind+w, :]
return X
def feature_scale(x):
b, h, w, c = x.shape
x = scale(x.reshape([b, -1]), 1)
return x.reshape([b, h, w, c])
def batch_get(filename, batch_size, channs, num_classes):
f = h5py.File(filename, 'r')
X = f['X']
Y = f['Y']
x = np.zeros([batch_size, 130, 320, channs])
y = np.zeros([batch_size, num_classes])
rand = np.random.randint(max(stack_nums), X.shape[0], batch_size)
count = 0
for r in rand:
x[count,...] = X[r, 110:, ...]
y[count, int(Y[r] + 1.0)] = 1.0
count += 1
if im_method in [1, 2]:
X = np.mean(X, 3, keepdims=True) # grayscale and crop frames
assert(X.shape[0] == Y.shape[0]), 'Data and labels different sizes'
f.flush()
f.close()
return x, y
# Validation set
print('Validation Dataset: %s'%(val_name))
# Create input layer and label placeholder for the network
labels = tf.placeholder(dtype=tf.float32, shape=[None, num_classes])
network = tf.placeholder(dtype=tf.float32, shape=[None, 130, 320, channs])
net_out = modelswitch[network_name](network, 4, dropout_keep_prob)
# send the input placeholder to the specified network
acc = tf.reduce_mean(tf.to_float(tf.equal(tf.argmax(net_out, 1), tf.argmax(labels, 1))))
cost = categorical_crossentropy(net_out, labels) # crossentropy loss function
# Tensorboard summaries
tf.summary.scalar('Accuracy_', acc)
tf.summary.scalar('Loss_', cost)
merged = tf.summary.merge_all()
# gradient descent optimizer
opt = tf.train.AdamOptimizer(learning_rate=learn_rate)
trainop = tflearn.TrainOp(loss=cost,
optimizer=opt,
metric=None,
batch_size=batch_sz)
model = Trainer(train_ops=trainop)
writer = tf.summary.FileWriter('/tmp/tflearn_logs/test' + m_save + network_name,
model.session.graph)
writer2 = tf.summary.FileWriter('/tmp/tflearn_logs/train' + m_save + network_name,
model.session.graph)
################################## Main Loop #######################################
for i in range(training_iterations):
# pick random dataset for this epoch
n = np.random.randint(1, len(fnames)-1, 1)
filename = fnames[n[0]]
# skip validation set if chosen
if filename == val_name:
continue
# load the chosen data file
X, Y = batch_get(filename, batch_sz, channs, num_classes)
# local feature Scaling
X = feature_scale(X)
# framestack
if num_stack != 1:
X, Y = create_framestack(X, Y, stack_nums)
# random crop for augmentation
X = random_crop(X)
# Training
model.fit_batch(feed_dicts={network:X, labels:Y})
train_acc, train_loss = model.session.run([acc, cost],
feed_dict={network:X, labels:Y})
train_summary = model.session.run(merged, feed_dict={network:X, labels:Y})
writer2.add_summary(train_summary, i)
if i%100 == 0:
# get validation batch
tx, ty = batch_get(val_name, 600, channs, num_classes)
# feature scale validation data
tx = feature_scale(tx)
# Create validation framestack
if num_stack != 1:
tx, ty = create_framestack(tx, ty, stack_nums)
assert(ty.shape[0] == tx.shape[0]),'data and label shapes do not match'
# Get validation accuracy and error rate
val_acc, val_loss, summary = model.session.run([acc, cost, merged],
feed_dict={network:tx, labels:ty})
writer.add_summary(summary, i)
# Save model and acc/error curves
model.save(m_save + modelswitch[network_name].__name__)
| 6,753 | 27.618644 | 88 |
py
|
DNN_Rover
|
DNN_Rover-master/main.py
|
from RoverAPI import *
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
help='path to the model file if autonomous.')
parser.add_argument(
'--autonomous',
type=bool,
default=False,
help='True for autonomous, False for human control. Default False.')
parser.add_argument(
'--network',
type=str,
help='Name of the network you want to run if autonomous. Ex. resnet34')
parser.add_argument(
'--driver',
type=str,
default='unknown_driver',
help='The name of the person operating or running the rover. Optional')
parser.add_argument(
'--rover',
type=str,
default='no_name',
help='The name on the rover being used. Optional')
parser.add_argument(
'--frames_per_second',
type=int,
default=30,
help='The frame rate the rover will be operating at. Default 30')
parser.add_argument(
'--show_video_feed',
type=bool,
default=False,
help="True to see rover's video feed, False to supress this feature.")
parser.add_argument(
'--save_training_data',
type=bool,
default=False,
help='y to save training data if not autonomous, n to not save. Default y')
parser.add_argument(
'--ml_framework',
type=str,
default='tf',
help='tf for TensorFlow model, pt for PyTorch model. Default tf')
parser.add_argument(
'--image_type',
type=str,
default='color',
help='grayscale, color, or framestack for model input if autonomous. Default is color.')
parser.add_argument(
'--norm_method',
type=str,
default=None,
help='Type instance_norm or channel_norm. Default None')
parser.add_argument(
'--norm_vals',
type=str,
default='0,0,0',
help='values to use in normalization if norm_method is not None.')
parser.add_argument(
'--num_outputs',
type=int,
default=4,
help='The number of outputs for the network. Default 4.')
args = parser.parse_args()
norm_vals = [int(item) for item in args.norm_vals.split(',')]
if args.save_training_data is True and args.autonomous is True:
args.save_training_data = False
rover = RoverRun(args.model_name,
args.network,
args.autonomous,
args.driver,
args.rover,
args.frames_per_second,
args.show_video_feed,
args.save_training_data,
args.ml_framework,
args.image_type,
args.norm_method,
norm_vals,
args.num_outputs)
| 2,908 | 26.971154 | 96 |
py
|
DNN_Rover
|
DNN_Rover-master/NetworkSwitch.py
|
import os, sys
import tflearn
import tensorflow as tf
import h5py
import numpy as np
from sklearn.preprocessing import scale
from tflearn.layers.core import input_data, dropout, fully_connected, flatten
from tflearn.layers.conv import conv_2d, max_pool_2d, highway_conv_2d, avg_pool_2d
from tflearn.layers.normalization import local_response_normalization, batch_normalization
from tflearn.layers.estimator import regression
from tflearn import residual_bottleneck, activation, global_avg_pool, merge
def x3(x):
return 0.3*x**3
def whiten(X):
'''Function to ZCA whiten image matrix.'''
sigma = np.cov(X, rowvar=True) # [M x M]
# Singular Value Decomposition. X = U * np.diag(S) * V
U,S,V = np.linalg.svd(sigma)
# U: [M x M] eigenvectors of sigma.
# S: [M x 1] eigenvalues of sigma.
# V: [M x M] transpose of U
# Whitening constant: prevents division by zero
epsilon = 1e-5
# ZCA Whitening matrix: U * Lambda * U'
ZCAMatrix = np.dot(U, np.dot(np.diag(1.0/np.sqrt(S + epsilon)), U.T)) # [M x M]
return np.dot(ZCAMatrix, X)
########################################################
def DNN1(network, num_out, drop_prob=1.0):
network = tflearn.fully_connected(network, 64, activation='tanh',regularizer='L2', weight_decay=0.001)
network = tflearn.dropout(network, drop_prob)
network = tflearn.fully_connected(network, 64, activation='tanh', regularizer='L2', weight_decay=0.001)
network = tflearn.dropout(network, drop_prob)
network = tflearn.fully_connected(network, 64, activation='tanh', regularizer='L2', weight_decay=0.001)
network = tflearn.dropout(network, drop_prob)
network = tflearn.fully_connected(network, num_out, activation='softmax')
return network
########################################################
def Conv1(network, num_out, drop_prob=1.0):
network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 128, activation='tanh')
network = dropout(network, drop_prob)
network = fully_connected(network, 256, activation='tanh')
network = dropout(network, drop_prob)
network = fully_connected(network, num_out, activation='softmax')
return network
########################################################
def Alex1(network, num_out, drop_prob=1.0):
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, drop_prob)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, drop_prob)
network = fully_connected(network, num_out, activation='softmax')
return network
########################################################
def VGG1(network, num_out, drop_prob=1.0):
network = conv_2d(network, 45, 3, activation='relu')
network = conv_2d(network, 45, 3, activation='relu')
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 120, 3, activation='relu')
network = conv_2d(network, 120, 3, activation='relu')
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 200, 3, activation='relu')
network = conv_2d(network, 200, 3, activation='relu')
network = conv_2d(network, 200, 3, activation='relu')
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 450, 3, activation='relu')
network = conv_2d(network, 450, 3, activation='relu')
network = conv_2d(network, 450, 3, activation='relu')
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 450, 3, activation='relu')
network = conv_2d(network, 450, 3, activation='relu')
network = conv_2d(network, 450, 3, activation='relu')
network = max_pool_2d(network, 2, strides=2)
network = fully_connected(network, 3500, activation='relu')
network = dropout(network, drop_prob)
network = fully_connected(network, 3500, activation='relu')
network = dropout(network, drop_prob)
network = fully_connected(network, num_out, activation='softmax')
return network
########################################################
def Highway1(network, num_out, drop_prob=1.0):
dense1 = tflearn.fully_connected(network, 64, activation='elu', regularizer='L2', weight_decay=0.001)
highway = dense1
for i in range(10):
highway = tflearn.highway(highway, 64, activation='elu',regularizer='L2', weight_decay=0.001, transform_dropout=0.7)
network = tflearn.fully_connected(highway, num_out, activation='softmax')
return network
########################################################
def ConvHighway1(network, num_out, drop_prob=1.0):
for i in range(3):
for j in [3, 2, 1]:
network = highway_conv_2d(network, 16, j, activation='elu')
network = max_pool_2d(network, 2)
network = batch_normalization(network)
network = fully_connected(network, 128, activation='elu')
network = fully_connected(network, 256, activation='elu')
network = fully_connected(network, num_out, activation='softmax')
return network
########################################################
def Net_in_Net1(network, num_out, drop_prob=1.0):
network = conv_2d(network, 192, 5, activation='relu')
network = conv_2d(network, 160, 1, activation='relu')
network = conv_2d(network, 96, 1, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = dropout(network, drop_prob)
network = conv_2d(network, 192, 5, activation='relu')
network = conv_2d(network, 192, 1, activation='relu')
network = conv_2d(network, 192, 1, activation='relu')
network = avg_pool_2d(network, 3, strides=2)
network = dropout(network, drop_prob)
network = conv_2d(network, 192, 3, activation='relu')
network = conv_2d(network, 192, 1, activation='relu')
network = conv_2d(network, 10, 1, activation='relu')
network = avg_pool_2d(network, 8)
network = flatten(network)
network = fully_connected(network, num_out, activation='softmax')
return network
########################################################
def ResNet26(network, num_out, drop_prob=1.0):
n = 2 # number of residual blocks per layer
network = tflearn.conv_2d(network, 32, 7, regularizer='L2', strides=2, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = tflearn.residual_block(network, n, 32, activation='relu')
network = tflearn.residual_block(network, n, 32, activation='relu')
network = tflearn.residual_block(network, n, 64, downsample=True, activation='relu')
network = tflearn.residual_block(network, n, 64, activation='relu')
network = tflearn.residual_block(network, n, 128, activation='relu')
network = tflearn.residual_block(network, n, 128, activation='relu')
network = batch_normalization(network)
network = activation(network, 'relu')
network = global_avg_pool(network)
network = tflearn.fully_connected(network, num_out, activation='softmax')
return network
########################################################
def ResNeXt(network):
c = 32 # cardinality of each residual block
network = tflearn.conv_2d(network, 64, 7, regularizer='L2', strides=2, activation='linear')
network = max_pool_2d(network, 3, strides=2)
network = batch_normalization(network)
network = activation(network, 'relu')
network = resnext_block0(network, 2, 128, c, downsample=True)
network = resnext_block0(network, 2, 256, c, downsample=True)
network = resnext_block0(network, 2, 512, c, downsample=True)
network = resnext_block0(network, 2, 1024, c)
print(network)
network = global_avg_pool(network)
network = tflearn.fully_connected(network, 4, activation='softmax')
return network
########################################################
def LSTM1(network):
print(network.shape)
network = tflearn.lstm(network, 500, return_seq=True)
network = tflearn.lstm(network, 500)
network = tflearn.fully_connected(network, 4, activation='softmax')
return network
########################################################
def GoogLeNet1(network, num_out, drop_prob):
conv1_7_7 = conv_2d(network, 64, 7, strides=2, activation='relu', name = 'conv1_7_7_s2')
pool1_3_3 = max_pool_2d(conv1_7_7, 3,strides=2)
pool1_3_3 = local_response_normalization(pool1_3_3)
conv2_3_3_reduce = conv_2d(pool1_3_3, 64,1, activation='relu',name = 'conv2_3_3_reduce')
conv2_3_3 = conv_2d(conv2_3_3_reduce, 192,3, activation='relu', name='conv2_3_3')
conv2_3_3 = local_response_normalization(conv2_3_3)
pool2_3_3 = max_pool_2d(conv2_3_3, kernel_size=3, strides=2, name='pool2_3_3_s2')
inception_3a_1_1 = conv_2d(pool2_3_3, 64, 1, activation='relu', name='inception_3a_1_1')
inception_3a_3_3_reduce = conv_2d(pool2_3_3, 96,1, activation='relu', name='inception_3a_3_3_reduce')
inception_3a_3_3 = conv_2d(inception_3a_3_3_reduce, 128,filter_size=3, activation='relu', name = 'inception_3a_3_3')
inception_3a_5_5_reduce = conv_2d(pool2_3_3,16, filter_size=1,activation='relu', name ='inception_3a_5_5_reduce' )
inception_3a_5_5 = conv_2d(inception_3a_5_5_reduce, 32, filter_size=5, activation='relu', name= 'inception_3a_5_5')
inception_3a_pool = max_pool_2d(pool2_3_3, kernel_size=3, strides=1, )
inception_3a_pool_1_1 = conv_2d(inception_3a_pool, 32, filter_size=1, activation='relu', name='inception_3a_pool_1_1')
# merge the inception_3a__
inception_3a_output = merge([inception_3a_1_1, inception_3a_3_3, inception_3a_5_5, inception_3a_pool_1_1], mode='concat', axis=3)
inception_3b_1_1 = conv_2d(inception_3a_output, 128,filter_size=1,activation='relu', name= 'inception_3b_1_1' )
inception_3b_3_3_reduce = conv_2d(inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_3_3_reduce')
inception_3b_3_3 = conv_2d(inception_3b_3_3_reduce, 192, filter_size=3, activation='relu',name='inception_3b_3_3')
inception_3b_5_5_reduce = conv_2d(inception_3a_output, 32, filter_size=1, activation='relu', name = 'inception_3b_5_5_reduce')
inception_3b_5_5 = conv_2d(inception_3b_5_5_reduce, 96, filter_size=5, name = 'inception_3b_5_5')
inception_3b_pool = max_pool_2d(inception_3a_output, kernel_size=3, strides=1, name='inception_3b_pool')
inception_3b_pool_1_1 = conv_2d(inception_3b_pool, 64, filter_size=1,activation='relu', name='inception_3b_pool_1_1')
#merge the inception_3b_*
inception_3b_output = merge([inception_3b_1_1, inception_3b_3_3, inception_3b_5_5, inception_3b_pool_1_1], mode='concat',axis=3,name='inception_3b_output')
pool3_3_3 = max_pool_2d(inception_3b_output, kernel_size=3, strides=2, name='pool3_3_3')
inception_4a_1_1 = conv_2d(pool3_3_3, 192, filter_size=1, activation='relu', name='inception_4a_1_1')
inception_4a_3_3_reduce = conv_2d(pool3_3_3, 96, filter_size=1, activation='relu', name='inception_4a_3_3_reduce')
inception_4a_3_3 = conv_2d(inception_4a_3_3_reduce, 208, filter_size=3, activation='relu', name='inception_4a_3_3')
inception_4a_5_5_reduce = conv_2d(pool3_3_3, 16, filter_size=1, activation='relu', name='inception_4a_5_5_reduce')
inception_4a_5_5 = conv_2d(inception_4a_5_5_reduce, 48, filter_size=5, activation='relu', name='inception_4a_5_5')
inception_4a_pool = max_pool_2d(pool3_3_3, kernel_size=3, strides=1, name='inception_4a_pool')
inception_4a_pool_1_1 = conv_2d(inception_4a_pool, 64, filter_size=1, activation='relu', name='inception_4a_pool_1_1')
inception_4a_output = merge([inception_4a_1_1, inception_4a_3_3, inception_4a_5_5, inception_4a_pool_1_1], mode='concat', axis=3, name='inception_4a_output')
inception_4b_1_1 = conv_2d(inception_4a_output, 160, filter_size=1, activation='relu', name='inception_4a_1_1')
inception_4b_3_3_reduce = conv_2d(inception_4a_output, 112, filter_size=1, activation='relu', name='inception_4b_3_3_reduce')
inception_4b_3_3 = conv_2d(inception_4b_3_3_reduce, 224, filter_size=3, activation='relu', name='inception_4b_3_3')
inception_4b_5_5_reduce = conv_2d(inception_4a_output, 24, filter_size=1, activation='relu', name='inception_4b_5_5_reduce')
inception_4b_5_5 = conv_2d(inception_4b_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4b_5_5')
inception_4b_pool = max_pool_2d(inception_4a_output, kernel_size=3, strides=1, name='inception_4b_pool')
inception_4b_pool_1_1 = conv_2d(inception_4b_pool, 64, filter_size=1, activation='relu', name='inception_4b_pool_1_1')
inception_4b_output = merge([inception_4b_1_1, inception_4b_3_3, inception_4b_5_5, inception_4b_pool_1_1], mode='concat', axis=3, name='inception_4b_output')
inception_4c_1_1 = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu',name='inception_4c_1_1')
inception_4c_3_3_reduce = conv_2d(inception_4b_output, 128, filter_size=1, activation='relu', name='inception_4c_3_3_reduce')
inception_4c_3_3 = conv_2d(inception_4c_3_3_reduce, 256, filter_size=3, activation='relu', name='inception_4c_3_3')
inception_4c_5_5_reduce = conv_2d(inception_4b_output, 24, filter_size=1, activation='relu', name='inception_4c_5_5_reduce')
inception_4c_5_5 = conv_2d(inception_4c_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4c_5_5')
inception_4c_pool = max_pool_2d(inception_4b_output, kernel_size=3, strides=1)
inception_4c_pool_1_1 = conv_2d(inception_4c_pool, 64, filter_size=1, activation='relu', name='inception_4c_pool_1_1')
inception_4c_output = merge([inception_4c_1_1, inception_4c_3_3, inception_4c_5_5, inception_4c_pool_1_1], mode='concat', axis=3,name='inception_4c_output')
inception_4d_1_1 = conv_2d(inception_4c_output, 112, filter_size=1, activation='relu', name='inception_4d_1_1')
inception_4d_3_3_reduce = conv_2d(inception_4c_output, 144, filter_size=1, activation='relu', name='inception_4d_3_3_reduce')
inception_4d_3_3 = conv_2d(inception_4d_3_3_reduce, 288, filter_size=3, activation='relu', name='inception_4d_3_3')
inception_4d_5_5_reduce = conv_2d(inception_4c_output, 32, filter_size=1, activation='relu', name='inception_4d_5_5_reduce')
inception_4d_5_5 = conv_2d(inception_4d_5_5_reduce, 64, filter_size=5, activation='relu', name='inception_4d_5_5')
inception_4d_pool = max_pool_2d(inception_4c_output, kernel_size=3, strides=1, name='inception_4d_pool')
inception_4d_pool_1_1 = conv_2d(inception_4d_pool, 64, filter_size=1, activation='relu', name='inception_4d_pool_1_1')
inception_4d_output = merge([inception_4d_1_1, inception_4d_3_3, inception_4d_5_5, inception_4d_pool_1_1], mode='concat', axis=3, name='inception_4d_output')
inception_4e_1_1 = conv_2d(inception_4d_output, 256, filter_size=1, activation='relu', name='inception_4e_1_1')
inception_4e_3_3_reduce = conv_2d(inception_4d_output, 160, filter_size=1, activation='relu', name='inception_4e_3_3_reduce')
inception_4e_3_3 = conv_2d(inception_4e_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_4e_3_3')
inception_4e_5_5_reduce = conv_2d(inception_4d_output, 32, filter_size=1, activation='relu', name='inception_4e_5_5_reduce')
inception_4e_5_5 = conv_2d(inception_4e_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_4e_5_5')
inception_4e_pool = max_pool_2d(inception_4d_output, kernel_size=3, strides=1, name='inception_4e_pool')
inception_4e_pool_1_1 = conv_2d(inception_4e_pool, 128, filter_size=1, activation='relu', name='inception_4e_pool_1_1')
inception_4e_output = merge([inception_4e_1_1, inception_4e_3_3, inception_4e_5_5,inception_4e_pool_1_1],axis=3, mode='concat')
pool4_3_3 = max_pool_2d(inception_4e_output, kernel_size=3, strides=2, name='pool_3_3')
inception_5a_1_1 = conv_2d(pool4_3_3, 256, filter_size=1, activation='relu', name='inception_5a_1_1')
inception_5a_3_3_reduce = conv_2d(pool4_3_3, 160, filter_size=1, activation='relu', name='inception_5a_3_3_reduce')
inception_5a_3_3 = conv_2d(inception_5a_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_5a_3_3')
inception_5a_5_5_reduce = conv_2d(pool4_3_3, 32, filter_size=1, activation='relu', name='inception_5a_5_5_reduce')
inception_5a_5_5 = conv_2d(inception_5a_5_5_reduce, 128, filter_size=5, activation='relu', name='inception_5a_5_5')
inception_5a_pool = max_pool_2d(pool4_3_3, kernel_size=3, strides=1, name='inception_5a_pool')
inception_5a_pool_1_1 = conv_2d(inception_5a_pool, 128, filter_size=1,activation='relu', name='inception_5a_pool_1_1')
inception_5a_output = merge([inception_5a_1_1, inception_5a_3_3, inception_5a_5_5, inception_5a_pool_1_1], axis=3,mode='concat')
inception_5b_1_1 = conv_2d(inception_5a_output, 384, filter_size=1,activation='relu', name='inception_5b_1_1')
inception_5b_3_3_reduce = conv_2d(inception_5a_output, 192, filter_size=1, activation='relu', name='inception_5b_3_3_reduce')
inception_5b_3_3 = conv_2d(inception_5b_3_3_reduce, 384, filter_size=3,activation='relu', name='inception_5b_3_3')
inception_5b_5_5_reduce = conv_2d(inception_5a_output, 48, filter_size=1, activation='relu', name='inception_5b_5_5_reduce')
inception_5b_5_5 = conv_2d(inception_5b_5_5_reduce,128, filter_size=5, activation='relu', name='inception_5b_5_5' )
inception_5b_pool = max_pool_2d(inception_5a_output, kernel_size=3, strides=1, name='inception_5b_pool')
inception_5b_pool_1_1 = conv_2d(inception_5b_pool, 128, filter_size=1, activation='relu', name='inception_5b_pool_1_1')
inception_5b_output = merge([inception_5b_1_1, inception_5b_3_3, inception_5b_5_5, inception_5b_pool_1_1], axis=3, mode='concat')
pool5_7_7 = global_avg_pool(inception_5b_output)
pool5_7_7 = dropout(pool5_7_7, 0.4)
network = fully_connected(pool5_7_7, num_out, activation='softmax')
return network
########################################################
def DenseNet(network):
# Growth Rate (12, 16, 32, ...)
k = 3
# Depth (40, 100, ...)
L = 28
nb_layers = int((L - 4) / 3)
# Building DenseNet Network
network = tflearn.conv_2d(network, 10, 4, regularizer='L2', weight_decay=0.0001)
network = denseblock(network, nb_layers, k, dropout=drop_prob)
network = denseblock(network, nb_layers, k, dropout=drop_prob)
network = denseblock(network, nb_layers, k, dropout=drop_prob)
network = tflearn.global_avg_pool(network)
# Regression
network = tflearn.fully_connected(network, 4, activation='softmax')
return network
########################################################
def RCNN1(network, prev_activation=None, scale=False):
if prev_activation is None:
prev_activation = tf.zeros([1, 2500])
if scale is True:
network = tf.transpose(tf.reshape(network, [-1, num_rows*num_cols*num_channels]))
mean, var = tf.nn.moments(network, [0])
network = tf.transpose((network-mean)/(tf.sqrt(var)+1e-6))
network = tf.reshape(network, [-1, num_rows, num_cols, num_channels])
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 2500, activation='tanh')
network = dropout(network, drop_prob)
feat_layer = fully_connected(network, 2500, activation='tanh')
network = merge([feat_layer, prev_activation], 'concat', axis=1)
network = lstm(network, 250, dropout=drop_prob, activation='relu')
network = tflearn.fully_connected(network, 4, activation='softmax')
return network, feat_layer
########################################################
def lstm2(network):
network = lstm(network, 10000, dropout=0.7, activation='relu')
network = tflearn.fully_connected(network, 4, activation='softmax')
return network
########################################################
def X3(y, iters, batch_sz, num_dict_features=None, D=None):
''' Dynamical systems neural network used for sparse approximation of an
input vector.
Args:
y: input signal or vector, or multiple column vectors.
num_dict_features: number of dictionary patches to learn.
iters: number of LCA iterations.
batch_sz: number of samples to send to the network at each iteration.
D: The dictionary to be used in the network.'''
assert(num_dict_features is None or D is None), 'provide D or num_dict_features, not both'
e = np.zeros([iters, 1])
D=np.random.randn(y.shape[0], num_dict_features)
for i in range(iters):
# choose random examples this iteration
batch=y[:, np.random.randint(0, y.shape[1], batch_sz)]
batch = scale(batch, 1)
batch = whiten(batch)
# scale the values in the dict to between 0 and 1
D=np.matmul(D, np.diag(1/(np.sqrt(np.sum(D**2, 0))+1e-6)))
# get similarity between each feature and each data patch
a = np.matmul(D.transpose(), batch)
# scale the alpha coefficients (cosine similarity coefficients)
a=np.matmul(a, np.diag(1/(np.sqrt(np.sum(a**2, 0))+1e-6)))
# perform cubic activation on the alphas
a=0.5*a**3
# get the SSE between reconstruction and data batch
error = batch - np.matmul(D, a)
# save the error to plot later
e[i, 0] = np.mean(error**2)
# modify the dictionary to reduce the error
D=D+np.matmul(error, a.transpose())
return D, a, e
######################################################################
def ResNet34(network):
network = tflearn.conv_2d(network, 64, 7,
strides=2, activation='linear',
regularizer='L2')
network = max_pool_2d(network, 3, strides=2)
network = tflearn.residual_block(network, 3, 64, activation='relu')
network = tflearn.residual_block(network, 1, 128, activation='relu', downsample=True)
network = tflearn.residual_block(network, 3, 128, activation='relu')
network = tflearn.residual_block(network, 1, 256, activation='relu', downsample=True)
network = tflearn.residual_block(network, 5, 256, activation='relu')
network = tflearn.residual_block(network, 1, 512, activation='relu', downsample=True)
network = tflearn.residual_block(network, 2, 512, activation='relu')
network = batch_normalization(network)
network = activation(network, 'relu')
print(network)
network = global_avg_pool(network)
network = tflearn.fully_connected(network, 4, activation='softmax')
return network
#######################################################################
def ResNeXt34(network):
c = 8 #cardinality
network = tflearn.conv_2d(network, 32, 7, strides=2, activation='linear')
network = max_pool_2d(network, 3, strides=2)
network = batch_normalization(network)
network = activation(network, 'relu')
network = resnext_block5(network, 3, 32, c)
network = resnext_block5(network, 1, 64, c, downsample=True)
network = resnext_block5(network, 3, 64, c)
network = resnext_block5(network, 1, 128, c, downsample=True)
network = resnext_block5(network, 5, 128, c)
network = resnext_block5(network, 1, 256, c, downsample=True)
network = resnext_block5(network, 2, 256, c)
network = global_avg_pool(network)
network = tflearn.fully_connected(network, 4, activation='softmax')
return network
##########################################################################
##########################################################################
##########################################################################
modelswitch = {
'FullyConnected' : DNN1,
'CNN' : Conv1,
'AlexNet' : Alex1,
'VGG' : VGG1,
'Highway' : Highway1,
'CNNHighway' : ConvHighway1,
'NetinNet' : Net_in_Net1,
'ResNet26' : ResNet26,
'ResNeXt26' : ResNeXt,
'InceptionV3' : GoogLeNet1,
'LSTM' : LSTM1,
'DenseNet' : DenseNet,
'RCNN' : RCNN1,
'LSTM2' : lstm2,
'X3' : X3,
'ResNet34' : ResNet34,
'ResNeXt34' : ResNeXt34,
}
| 25,531 | 46.369202 | 161 |
py
|
DNN_Rover
|
DNN_Rover-master/RoverAPI.py
|
from __future__ import print_function
from rover.Data import *
import os, sys
from rover.Pygame_UI import *
from rover import Rover
import time
import numpy as np
#from scipy.misc import imresize
class RoverRun(Rover):
def __init__(self, fileName, network_name, autonomous, driver, rover, FPS,
view, save_data, framework, image_type, normalization,
norm_vals, num_out):
Rover.__init__(self)
self.FPS = FPS
self.view = view
self.speed = 0.5
self.save_data = save_data
self.userInterface = Pygame_UI(self.FPS, self.speed)
self.image = None
self.quit = False
self.angle = 0
self.autonomous = autonomous
self.image_type = image_type
self.im_shp = None
self.act = self.userInterface.action_dict['q']
if self.autonomous is True:
if self.image_type in ['color', 'Color']:
self.im_shp = [None, 130, 320, 3]
elif self.image_type in ['framestack', 'Framestack']:
self.im_shp = [None, 130, 320, 3]
self.framestack = np.zeros([1, 130, 320, self.FPS])
self.stack = [0, 5, 15]
elif self.image_type in ['grayscale', 'gray', 'Grayscale']:
self.im_shp = [None, 130, 320, 1]
self.d = Data(driver, rover, save_data, framework, fileName,
network_name, self.im_shp, normalization, norm_vals,
num_out, self.image_type)
if self.autonomous is True:
self.d.load_network()
self.run()
def run(self):
while type(self.image) == type(None):
pass
while not self.quit:
s = self.image
if self.view is True:
self.userInterface.show_feed(s)
key = self.userInterface.getActiveKey()
if key == 'z':
self.quit = True
if self.autonomous is not True:
if key in ['w', 'a', 's', 'd', 'q', ' ']:
self.act = self.userInterface.action_dict[key]
else:
continue
if self.act[-1] != 9 and self.save_data is True:
self.d.add_data(s, self.act[-1])
else:
s = self.d.normalize(s)
self.angle = self.d.predict(s)
self.act = self.userInterface.action_dict[self.angle]
self.set_wheel_treads(self.act[0], self.act[1])
self.userInterface.manage_UI()
# cleanup and stop vehicle
self.set_wheel_treads(0, 0)
self.userInterface.cleanup()
# save training data and close
self.d.save()
self.close()
| 2,802 | 31.593023 | 78 |
py
|
DNN_Rover
|
DNN_Rover-master/tobii_interface.py.py
|
import tobii_research as tr
import time
import cv2
import numpy as np
from numpy.random import randint
import csv
class Tobii:
def __init__(self):
self.cal_points = 5
self.ht, self.wd = 1024, 1280 # height and width of the display
self.point_size = 10 // 2 # number of pixels in a display point
self.r = list(randint(self.point_size, self.ht-self.point_size, self.cal_points))
self.c = list(randint(self.point_size, self.wd-self.point_size, self.cal_points))
self.res = 'calibration_status_failure'
self.points = 0
# find the eyetracker
self.tracker = tr.find_all_eyetrackers()[0]
while self.res != 'calibration_status_success' or self.points != self.cal_points:
self.res, self.points = self.calibrate()
def calibrate(self):
# instantiate calibration object
self.cal = tr.ScreenBasedCalibration(self.tracker)
self.cal.enter_calibration_mode() # enter calibration mode
for row, col in list(zip(self.r, self.c)):
img = np.zeros([self.ht, self.wd]) # initialize images with zeros
img[row-self.point_size:row+self.point_size,
col-self.point_size:col+self.point_size] = 255.
row, col = float(row) / self.ht, float(col) / self.wd # normalize the points for calibration
cv2.namedWindow('test', cv2.WINDOW_NORMAL)
cv2.imshow('test', img)
cv2.waitKey(800)
#if cal.collect_data(col, row) != tr.CALIBRATION_STATUS_SUCCESS:
self.cal.collect_data(col, row)
result = self.cal.compute_and_apply()
print "Compute and apply returned {0} and collected at {1} points.".\
format(result.status, len(result.calibration_points))
self.cal.leave_calibration_mode()
cv2.destroyAllWindows()
return result.status, len(result.calibration_points)
def gaze_data_callback(gaze_data):
global global_gaze_data
global_gaze_data.append([gaze_data['left_gaze_point_on_display_area'],
gaze_data['right_gaze_point_on_display_area']])
# tobii = Tobii()
# global global_gaze_data
# global_gaze_data = []
# tobii.tracker.subscribe_to(tr.EYETRACKER_GAZE_DATA,
# gaze_data_callback,
# as_dictionary=True)
#
# time.sleep(3)
#
#
# tobii.tracker.unsubscribe_from(tr.EYETRACKER_GAZE_DATA, gaze_data_callback)
| 2,462 | 33.690141 | 104 |
py
|
DNN_Rover
|
DNN_Rover-master/rover/adpcm.py
|
_indexAdjust = [-1, -1, -1, -1, 2, 4, 6, 8]
_stepTable = [
7,
8,
9,
10,
11,
12,
13,
14,
16,
17,
19,
21,
23,
25,
28,
31,
34,
37,
41,
45,
50,
55,
60,
66,
73,
80,
88,
97,
107,
118,
130,
143,
157,
173,
190,
209,
230,
253,
279,
307,
337,
371,
408,
449,
494,
544,
598,
658,
724,
796,
876,
963,
1060,
1166,
1282,
1411,
1552,
1707,
1878,
2066,
2272,
2499,
2749,
3024,
3327,
3660,
4026,
4428,
4871,
5358,
5894,
6484,
7132,
7845,
8630,
9493,
10442,
11487,
12635,
13899,
15289,
16818,
18500,
20350,
22385,
24623,
27086,
29794,
32767]
def _constrain(val, minval, maxval):
return min(max(val, minval), maxval)
def decodeADPCMToPCM(raw, pre_sample, index):
''' Returns ordinary PCM samples in interval +/- 2^15, decoded from ADPCM samples
'''
decoded = []
for i in range(len(raw) << 1):
b = ord(raw[i >> 1])
code = 0xF & b if i & 1 else b >> 4
sb = 1 if code & 0x08 else 0
code &= 0x07
delta = (_stepTable[index] * code) / 4 + _stepTable[index] / 8
if sb:
delta = -delta
pre_sample += delta;
pre_sample = _constrain(pre_sample, -32768, 32767)
decoded.append(pre_sample)
index += _indexAdjust[code];
index = _constrain(index, 0, 88)
return decoded
| 1,671 | 11.571429 | 85 |
py
|
DNN_Rover
|
DNN_Rover-master/rover/blowfish.py
|
import ctypes
class Blowfish:
def __init__(self, key):
'''Uses the specified key to create a Blowfish object that you can then
to encrypt and decrypt pairs of numbers. P-array is initialized
with values derived from the hexadecimal digits of Pi.
'''
# from https://www.schneier.com/code/bfsh-koc.zip
ORIG_P = \
[0x243F6A88, 0x85A308D3, 0x13198A2E, 0x03707344,
0xA4093822, 0x299F31D0, 0x082EFA98, 0xEC4E6C89,
0x452821E6, 0x38D01377, 0xBE5466CF, 0x34E90C6C,
0xC0AC29B7, 0xC97C50DD, 0x3F84D5B5, 0xB5470917,
0x9216D5D9, 0x8979FB1B]
self._keygen(key, ORIG_P)
def encrypt(self, L, R):
'''Accepts a pair of numbers and returns them in encrypted form.
'''
for i in range(0, 16, 2):
L ^= self.P[i]
R ^= self._f(L)
R ^= self.P[i + 1]
L ^= self._f(R)
L ^= self.P[16]
R ^= self.P[17]
return (R, L)
def decrypt(self, L, R):
'''Accepts an encrypted pair of numbers and returns them in unencrypted
form.
'''
for i in range(16, 0, -2):
L ^= self.P[i + 1]
R ^= self._f(L)
R ^= self.P[i]
L ^= self._f(R)
L ^= self.P[1]
R ^= self.P[0]
return (R, L)
def _keygen(self, key, ORIG_P):
# S-boxes
# from https://www.schneier.com/code/bfsh-koc.zip
self.S = \
[
[0xD1310BA6, 0x98DFB5AC, 0x2FFD72DB, 0xD01ADFB7,
0xB8E1AFED, 0x6A267E96, 0xBA7C9045, 0xF12C7F99,
0x24A19947, 0xB3916CF7, 0x0801F2E2, 0x858EFC16,
0x636920D8, 0x71574E69, 0xA458FEA3, 0xF4933D7E,
0x0D95748F, 0x728EB658, 0x718BCD58, 0x82154AEE,
0x7B54A41D, 0xC25A59B5, 0x9C30D539, 0x2AF26013,
0xC5D1B023, 0x286085F0, 0xCA417918, 0xB8DB38EF,
0x8E79DCB0, 0x603A180E, 0x6C9E0E8B, 0xB01E8A3E,
0xD71577C1, 0xBD314B27, 0x78AF2FDA, 0x55605C60,
0xE65525F3, 0xAA55AB94, 0x57489862, 0x63E81440,
0x55CA396A, 0x2AAB10B6, 0xB4CC5C34, 0x1141E8CE,
0xA15486AF, 0x7C72E993, 0xB3EE1411, 0x636FBC2A,
0x2BA9C55D, 0x741831F6, 0xCE5C3E16, 0x9B87931E,
0xAFD6BA33, 0x6C24CF5C, 0x7A325381, 0x28958677,
0x3B8F4898, 0x6B4BB9AF, 0xC4BFE81B, 0x66282193,
0x61D809CC, 0xFB21A991, 0x487CAC60, 0x5DEC8032,
0xEF845D5D, 0xE98575B1, 0xDC262302, 0xEB651B88,
0x23893E81, 0xD396ACC5, 0x0F6D6FF3, 0x83F44239,
0x2E0B4482, 0xA4842004, 0x69C8F04A, 0x9E1F9B5E,
0x21C66842, 0xF6E96C9A, 0x670C9C61, 0xABD388F0,
0x6A51A0D2, 0xD8542F68, 0x960FA728, 0xAB5133A3,
0x6EEF0B6C, 0x137A3BE4, 0xBA3BF050, 0x7EFB2A98,
0xA1F1651D, 0x39AF0176, 0x66CA593E, 0x82430E88,
0x8CEE8619, 0x456F9FB4, 0x7D84A5C3, 0x3B8B5EBE,
0xE06F75D8, 0x85C12073, 0x401A449F, 0x56C16AA6,
0x4ED3AA62, 0x363F7706, 0x1BFEDF72, 0x429B023D,
0x37D0D724, 0xD00A1248, 0xDB0FEAD3, 0x49F1C09B,
0x075372C9, 0x80991B7B, 0x25D479D8, 0xF6E8DEF7,
0xE3FE501A, 0xB6794C3B, 0x976CE0BD, 0x04C006BA,
0xC1A94FB6, 0x409F60C4, 0x5E5C9EC2, 0x196A2463,
0x68FB6FAF, 0x3E6C53B5, 0x1339B2EB, 0x3B52EC6F,
0x6DFC511F, 0x9B30952C, 0xCC814544, 0xAF5EBD09,
0xBEE3D004, 0xDE334AFD, 0x660F2807, 0x192E4BB3,
0xC0CBA857, 0x45C8740F, 0xD20B5F39, 0xB9D3FBDB,
0x5579C0BD, 0x1A60320A, 0xD6A100C6, 0x402C7279,
0x679F25FE, 0xFB1FA3CC, 0x8EA5E9F8, 0xDB3222F8,
0x3C7516DF, 0xFD616B15, 0x2F501EC8, 0xAD0552AB,
0x323DB5FA, 0xFD238760, 0x53317B48, 0x3E00DF82,
0x9E5C57BB, 0xCA6F8CA0, 0x1A87562E, 0xDF1769DB,
0xD542A8F6, 0x287EFFC3, 0xAC6732C6, 0x8C4F5573,
0x695B27B0, 0xBBCA58C8, 0xE1FFA35D, 0xB8F011A0,
0x10FA3D98, 0xFD2183B8, 0x4AFCB56C, 0x2DD1D35B,
0x9A53E479, 0xB6F84565, 0xD28E49BC, 0x4BFB9790,
0xE1DDF2DA, 0xA4CB7E33, 0x62FB1341, 0xCEE4C6E8,
0xEF20CADA, 0x36774C01, 0xD07E9EFE, 0x2BF11FB4,
0x95DBDA4D, 0xAE909198, 0xEAAD8E71, 0x6B93D5A0,
0xD08ED1D0, 0xAFC725E0, 0x8E3C5B2F, 0x8E7594B7,
0x8FF6E2FB, 0xF2122B64, 0x8888B812, 0x900DF01C,
0x4FAD5EA0, 0x688FC31C, 0xD1CFF191, 0xB3A8C1AD,
0x2F2F2218, 0xBE0E1777, 0xEA752DFE, 0x8B021FA1,
0xE5A0CC0F, 0xB56F74E8, 0x18ACF3D6, 0xCE89E299,
0xB4A84FE0, 0xFD13E0B7, 0x7CC43B81, 0xD2ADA8D9,
0x165FA266, 0x80957705, 0x93CC7314, 0x211A1477,
0xE6AD2065, 0x77B5FA86, 0xC75442F5, 0xFB9D35CF,
0xEBCDAF0C, 0x7B3E89A0, 0xD6411BD3, 0xAE1E7E49,
0x00250E2D, 0x2071B35E, 0x226800BB, 0x57B8E0AF,
0x2464369B, 0xF009B91E, 0x5563911D, 0x56DFA6AA,
0x78C14389, 0xD95A537F, 0x207D5BA2, 0x02E5B9C5,
0x83260376, 0x6295CFA9, 0x11C81968, 0x4E734A41,
0xB3472DCA, 0x7B14A94A, 0x1B510052, 0x9A532915,
0xD60F573F, 0xBC9BC6E4, 0x2B60A476, 0x81E67400,
0x08BA6FB5, 0x571BE91F, 0xF296EC6B, 0x2A0DD915,
0xB6636521, 0xE7B9F9B6, 0xFF340528, 0xC5855664,
0x53B02D5D, 0xA99F8FA1, 0x08BA4799, 0x6E85076A],
[0x4B7A70E9, 0xB5B32944, 0xDB75092E, 0xC4192623,
0xAD6EA6B0, 0x49A7DF7D, 0x9CEE60B8, 0x8FEDB266,
0xECAA8C71, 0x699A17FF, 0x5664526C, 0xC2B19EE1,
0x193602A5, 0x75094C29, 0xA0591340, 0xE4183A3E,
0x3F54989A, 0x5B429D65, 0x6B8FE4D6, 0x99F73FD6,
0xA1D29C07, 0xEFE830F5, 0x4D2D38E6, 0xF0255DC1,
0x4CDD2086, 0x8470EB26, 0x6382E9C6, 0x021ECC5E,
0x09686B3F, 0x3EBAEFC9, 0x3C971814, 0x6B6A70A1,
0x687F3584, 0x52A0E286, 0xB79C5305, 0xAA500737,
0x3E07841C, 0x7FDEAE5C, 0x8E7D44EC, 0x5716F2B8,
0xB03ADA37, 0xF0500C0D, 0xF01C1F04, 0x0200B3FF,
0xAE0CF51A, 0x3CB574B2, 0x25837A58, 0xDC0921BD,
0xD19113F9, 0x7CA92FF6, 0x94324773, 0x22F54701,
0x3AE5E581, 0x37C2DADC, 0xC8B57634, 0x9AF3DDA7,
0xA9446146, 0x0FD0030E, 0xECC8C73E, 0xA4751E41,
0xE238CD99, 0x3BEA0E2F, 0x3280BBA1, 0x183EB331,
0x4E548B38, 0x4F6DB908, 0x6F420D03, 0xF60A04BF,
0x2CB81290, 0x24977C79, 0x5679B072, 0xBCAF89AF,
0xDE9A771F, 0xD9930810, 0xB38BAE12, 0xDCCF3F2E,
0x5512721F, 0x2E6B7124, 0x501ADDE6, 0x9F84CD87,
0x7A584718, 0x7408DA17, 0xBC9F9ABC, 0xE94B7D8C,
0xEC7AEC3A, 0xDB851DFA, 0x63094366, 0xC464C3D2,
0xEF1C1847, 0x3215D908, 0xDD433B37, 0x24C2BA16,
0x12A14D43, 0x2A65C451, 0x50940002, 0x133AE4DD,
0x71DFF89E, 0x10314E55, 0x81AC77D6, 0x5F11199B,
0x043556F1, 0xD7A3C76B, 0x3C11183B, 0x5924A509,
0xF28FE6ED, 0x97F1FBFA, 0x9EBABF2C, 0x1E153C6E,
0x86E34570, 0xEAE96FB1, 0x860E5E0A, 0x5A3E2AB3,
0x771FE71C, 0x4E3D06FA, 0x2965DCB9, 0x99E71D0F,
0x803E89D6, 0x5266C825, 0x2E4CC978, 0x9C10B36A,
0xC6150EBA, 0x94E2EA78, 0xA5FC3C53, 0x1E0A2DF4,
0xF2F74EA7, 0x361D2B3D, 0x1939260F, 0x19C27960,
0x5223A708, 0xF71312B6, 0xEBADFE6E, 0xEAC31F66,
0xE3BC4595, 0xA67BC883, 0xB17F37D1, 0x018CFF28,
0xC332DDEF, 0xBE6C5AA5, 0x65582185, 0x68AB9802,
0xEECEA50F, 0xDB2F953B, 0x2AEF7DAD, 0x5B6E2F84,
0x1521B628, 0x29076170, 0xECDD4775, 0x619F1510,
0x13CCA830, 0xEB61BD96, 0x0334FE1E, 0xAA0363CF,
0xB5735C90, 0x4C70A239, 0xD59E9E0B, 0xCBAADE14,
0xEECC86BC, 0x60622CA7, 0x9CAB5CAB, 0xB2F3846E,
0x648B1EAF, 0x19BDF0CA, 0xA02369B9, 0x655ABB50,
0x40685A32, 0x3C2AB4B3, 0x319EE9D5, 0xC021B8F7,
0x9B540B19, 0x875FA099, 0x95F7997E, 0x623D7DA8,
0xF837889A, 0x97E32D77, 0x11ED935F, 0x16681281,
0x0E358829, 0xC7E61FD6, 0x96DEDFA1, 0x7858BA99,
0x57F584A5, 0x1B227263, 0x9B83C3FF, 0x1AC24696,
0xCDB30AEB, 0x532E3054, 0x8FD948E4, 0x6DBC3128,
0x58EBF2EF, 0x34C6FFEA, 0xFE28ED61, 0xEE7C3C73,
0x5D4A14D9, 0xE864B7E3, 0x42105D14, 0x203E13E0,
0x45EEE2B6, 0xA3AAABEA, 0xDB6C4F15, 0xFACB4FD0,
0xC742F442, 0xEF6ABBB5, 0x654F3B1D, 0x41CD2105,
0xD81E799E, 0x86854DC7, 0xE44B476A, 0x3D816250,
0xCF62A1F2, 0x5B8D2646, 0xFC8883A0, 0xC1C7B6A3,
0x7F1524C3, 0x69CB7492, 0x47848A0B, 0x5692B285,
0x095BBF00, 0xAD19489D, 0x1462B174, 0x23820E00,
0x58428D2A, 0x0C55F5EA, 0x1DADF43E, 0x233F7061,
0x3372F092, 0x8D937E41, 0xD65FECF1, 0x6C223BDB,
0x7CDE3759, 0xCBEE7460, 0x4085F2A7, 0xCE77326E,
0xA6078084, 0x19F8509E, 0xE8EFD855, 0x61D99735,
0xA969A7AA, 0xC50C06C2, 0x5A04ABFC, 0x800BCADC,
0x9E447A2E, 0xC3453484, 0xFDD56705, 0x0E1E9EC9,
0xDB73DBD3, 0x105588CD, 0x675FDA79, 0xE3674340,
0xC5C43465, 0x713E38D8, 0x3D28F89E, 0xF16DFF20,
0x153E21E7, 0x8FB03D4A, 0xE6E39F2B, 0xDB83ADF7],
[0xE93D5A68, 0x948140F7, 0xF64C261C, 0x94692934,
0x411520F7, 0x7602D4F7, 0xBCF46B2E, 0xD4A20068,
0xD4082471, 0x3320F46A, 0x43B7D4B7, 0x500061AF,
0x1E39F62E, 0x97244546, 0x14214F74, 0xBF8B8840,
0x4D95FC1D, 0x96B591AF, 0x70F4DDD3, 0x66A02F45,
0xBFBC09EC, 0x03BD9785, 0x7FAC6DD0, 0x31CB8504,
0x96EB27B3, 0x55FA3941, 0xDA2547E6, 0xABCA0A9A,
0x28507825, 0x530429F4, 0x0A2C86DA, 0xE9B66DFB,
0x68DC1462, 0xD7486900, 0x680EC0A4, 0x27A18DEE,
0x4F3FFEA2, 0xE887AD8C, 0xB58CE006, 0x7AF4D6B6,
0xAACE1E7C, 0xD3375FEC, 0xCE78A399, 0x406B2A42,
0x20FE9E35, 0xD9F385B9, 0xEE39D7AB, 0x3B124E8B,
0x1DC9FAF7, 0x4B6D1856, 0x26A36631, 0xEAE397B2,
0x3A6EFA74, 0xDD5B4332, 0x6841E7F7, 0xCA7820FB,
0xFB0AF54E, 0xD8FEB397, 0x454056AC, 0xBA489527,
0x55533A3A, 0x20838D87, 0xFE6BA9B7, 0xD096954B,
0x55A867BC, 0xA1159A58, 0xCCA92963, 0x99E1DB33,
0xA62A4A56, 0x3F3125F9, 0x5EF47E1C, 0x9029317C,
0xFDF8E802, 0x04272F70, 0x80BB155C, 0x05282CE3,
0x95C11548, 0xE4C66D22, 0x48C1133F, 0xC70F86DC,
0x07F9C9EE, 0x41041F0F, 0x404779A4, 0x5D886E17,
0x325F51EB, 0xD59BC0D1, 0xF2BCC18F, 0x41113564,
0x257B7834, 0x602A9C60, 0xDFF8E8A3, 0x1F636C1B,
0x0E12B4C2, 0x02E1329E, 0xAF664FD1, 0xCAD18115,
0x6B2395E0, 0x333E92E1, 0x3B240B62, 0xEEBEB922,
0x85B2A20E, 0xE6BA0D99, 0xDE720C8C, 0x2DA2F728,
0xD0127845, 0x95B794FD, 0x647D0862, 0xE7CCF5F0,
0x5449A36F, 0x877D48FA, 0xC39DFD27, 0xF33E8D1E,
0x0A476341, 0x992EFF74, 0x3A6F6EAB, 0xF4F8FD37,
0xA812DC60, 0xA1EBDDF8, 0x991BE14C, 0xDB6E6B0D,
0xC67B5510, 0x6D672C37, 0x2765D43B, 0xDCD0E804,
0xF1290DC7, 0xCC00FFA3, 0xB5390F92, 0x690FED0B,
0x667B9FFB, 0xCEDB7D9C, 0xA091CF0B, 0xD9155EA3,
0xBB132F88, 0x515BAD24, 0x7B9479BF, 0x763BD6EB,
0x37392EB3, 0xCC115979, 0x8026E297, 0xF42E312D,
0x6842ADA7, 0xC66A2B3B, 0x12754CCC, 0x782EF11C,
0x6A124237, 0xB79251E7, 0x06A1BBE6, 0x4BFB6350,
0x1A6B1018, 0x11CAEDFA, 0x3D25BDD8, 0xE2E1C3C9,
0x44421659, 0x0A121386, 0xD90CEC6E, 0xD5ABEA2A,
0x64AF674E, 0xDA86A85F, 0xBEBFE988, 0x64E4C3FE,
0x9DBC8057, 0xF0F7C086, 0x60787BF8, 0x6003604D,
0xD1FD8346, 0xF6381FB0, 0x7745AE04, 0xD736FCCC,
0x83426B33, 0xF01EAB71, 0xB0804187, 0x3C005E5F,
0x77A057BE, 0xBDE8AE24, 0x55464299, 0xBF582E61,
0x4E58F48F, 0xF2DDFDA2, 0xF474EF38, 0x8789BDC2,
0x5366F9C3, 0xC8B38E74, 0xB475F255, 0x46FCD9B9,
0x7AEB2661, 0x8B1DDF84, 0x846A0E79, 0x915F958E,
0x466E598E, 0x20B45770, 0x8CD55591, 0xC902DE4C,
0xB90BACE1, 0xBB8205D0, 0x11A86248, 0x7574A99E,
0xB77F19B6, 0xE0A9DC09, 0x662D09A1, 0xC4324633,
0xE85A1F02, 0x09F0BE8C, 0x4A99A025, 0x1D6EFE10,
0x1AB93D1D, 0x0BA5A4DF, 0xA186F20F, 0x2868F169,
0xDCB7DA83, 0x573906FE, 0xA1E2CE9B, 0x4FCD7F52,
0x50115E01, 0xA70683FA, 0xA002B5C4, 0x0DE6D027,
0x9AF88C27, 0x773F8641, 0xC3604C06, 0x61A806B5,
0xF0177A28, 0xC0F586E0, 0x006058AA, 0x30DC7D62,
0x11E69ED7, 0x2338EA63, 0x53C2DD94, 0xC2C21634,
0xBBCBEE56, 0x90BCB6DE, 0xEBFC7DA1, 0xCE591D76,
0x6F05E409, 0x4B7C0188, 0x39720A3D, 0x7C927C24,
0x86E3725F, 0x724D9DB9, 0x1AC15BB4, 0xD39EB8FC,
0xED545578, 0x08FCA5B5, 0xD83D7CD3, 0x4DAD0FC4,
0x1E50EF5E, 0xB161E6F8, 0xA28514D9, 0x6C51133C,
0x6FD5C7E7, 0x56E14EC4, 0x362ABFCE, 0xDDC6C837,
0xD79A3234, 0x92638212, 0x670EFA8E, 0x406000E0],
[0x3A39CE37, 0xD3FAF5CF, 0xABC27737, 0x5AC52D1B,
0x5CB0679E, 0x4FA33742, 0xD3822740, 0x99BC9BBE,
0xD5118E9D, 0xBF0F7315, 0xD62D1C7E, 0xC700C47B,
0xB78C1B6B, 0x21A19045, 0xB26EB1BE, 0x6A366EB4,
0x5748AB2F, 0xBC946E79, 0xC6A376D2, 0x6549C2C8,
0x530FF8EE, 0x468DDE7D, 0xD5730A1D, 0x42D04DC6,
0x2939BBDB, 0xA9BA4650, 0xAC9526E8, 0xBE5EE304,
0xA1FAD5F0, 0x6A2D519A, 0x63EF8CE2, 0x9A86EE22,
0xC089C2B8, 0x43242EF6, 0xA51E03AA, 0x9CF2D0A4,
0x83C061BA, 0x9BE96A4D, 0x8FE51550, 0xBA645BD6,
0x2826A2F9, 0xA73A3AE1, 0x4BA99586, 0xEF5562E9,
0xC72FEFD3, 0xF752F7DA, 0x3F046F69, 0x77FA0A59,
0x80E4A915, 0x87B08601, 0x9B09E6AD, 0x3B3EE593,
0xE990FD5A, 0x9E34D797, 0x2CF0B7D9, 0x022B8B51,
0x96D5AC3A, 0x017DA67D, 0xD1CF3ED6, 0x7C7D2D28,
0x1F9F25CF, 0xADF2B89B, 0x5AD6B472, 0x5A88F54C,
0xE029AC71, 0xE019A5E6, 0x47B0ACFD, 0xED93FA9B,
0xE8D3C48D, 0x283B57CC, 0xF8D56629, 0x79132E28,
0x785F0191, 0xED756055, 0xF7960E44, 0xE3D35E8C,
0x15056DD4, 0x88F46DBA, 0x03A16125, 0x0564F0BD,
0xC3EB9E15, 0x3C9057A2, 0x97271AEC, 0xA93A072A,
0x1B3F6D9B, 0x1E6321F5, 0xF59C66FB, 0x26DCF319,
0x7533D928, 0xB155FDF5, 0x03563482, 0x8ABA3CBB,
0x28517711, 0xC20AD9F8, 0xABCC5167, 0xCCAD925F,
0x4DE81751, 0x3830DC8E, 0x379D5862, 0x9320F991,
0xEA7A90C2, 0xFB3E7BCE, 0x5121CE64, 0x774FBE32,
0xA8B6E37E, 0xC3293D46, 0x48DE5369, 0x6413E680,
0xA2AE0810, 0xDD6DB224, 0x69852DFD, 0x09072166,
0xB39A460A, 0x6445C0DD, 0x586CDECF, 0x1C20C8AE,
0x5BBEF7DD, 0x1B588D40, 0xCCD2017F, 0x6BB4E3BB,
0xDDA26A7E, 0x3A59FF45, 0x3E350A44, 0xBCB4CDD5,
0x72EACEA8, 0xFA6484BB, 0x8D6612AE, 0xBF3C6F47,
0xD29BE463, 0x542F5D9E, 0xAEC2771B, 0xF64E6370,
0x740E0D8D, 0xE75B1357, 0xF8721671, 0xAF537D5D,
0x4040CB08, 0x4EB4E2CC, 0x34D2466A, 0x0115AF84,
0xE1B00428, 0x95983A1D, 0x06B89FB4, 0xCE6EA048,
0x6F3F3B82, 0x3520AB82, 0x011A1D4B, 0x277227F8,
0x611560B1, 0xE7933FDC, 0xBB3A792B, 0x344525BD,
0xA08839E1, 0x51CE794B, 0x2F32C9B7, 0xA01FBAC9,
0xE01CC87E, 0xBCC7D1F6, 0xCF0111C3, 0xA1E8AAC7,
0x1A908749, 0xD44FBD9A, 0xD0DADECB, 0xD50ADA38,
0x0339C32A, 0xC6913667, 0x8DF9317C, 0xE0B12B4F,
0xF79E59B7, 0x43F5BB3A, 0xF2D519FF, 0x27D9459C,
0xBF97222C, 0x15E6FC2A, 0x0F91FC71, 0x9B941525,
0xFAE59361, 0xCEB69CEB, 0xC2A86459, 0x12BAA8D1,
0xB6C1075E, 0xE3056A0C, 0x10D25065, 0xCB03A442,
0xE0EC6E0E, 0x1698DB3B, 0x4C98A0BE, 0x3278E964,
0x9F1F9532, 0xE0D392DF, 0xD3A0342B, 0x8971F21E,
0x1B0A7441, 0x4BA3348C, 0xC5BE7120, 0xC37632D8,
0xDF359F8D, 0x9B992F2E, 0xE60B6F47, 0x0FE3F11D,
0xE54CDA54, 0x1EDAD891, 0xCE6279CF, 0xCD3E7E6F,
0x1618B166, 0xFD2C1D05, 0x848FD2C5, 0xF6FB2299,
0xF523F357, 0xA6327623, 0x93A83531, 0x56CCCD02,
0xACF08162, 0x5A75EBB5, 0x6E163697, 0x88D273CC,
0xDE966292, 0x81B949D0, 0x4C50901B, 0x71C65614,
0xE6C6C7BD, 0x327A140A, 0x45E1D006, 0xC3F27B9A,
0xC9AA53FD, 0x62A80F00, 0xBB25BFE2, 0x35BDD2F6,
0x71126905, 0xB2040222, 0xB6CBCF7C, 0xCD769C2B,
0x53113EC0, 0x1640E3D3, 0x38ABBD60, 0x2547ADF0,
0xBA38209C, 0xF746CE76, 0x77AFA1C5, 0x20756060,
0x85CBFE4E, 0x8AE88DD8, 0x7AAAF9B0, 0x4CF9AA7E,
0x1948C25C, 0x02FB8A8C, 0x01C36AE4, 0xD6EBE1F9,
0x90D4F869, 0xA65CDEA0, 0x3F09252D, 0xC208E69F,
0xB74E6132, 0xCE77E25B, 0x578FDFE3, 0x3AC372E6]
]
# P-array
self.P = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# from https://www.schneier.com/code/bfsh-koc.zip
j = 0
for i in range(18):
data = 0x00000000
for k in range(4):
data = (data << 8) | ord(key[j])
j = (j + 1) % len(key)
self.P[i] = ORIG_P[i] ^ data
L, R = 0, 0
for i in range(0, 18, 2):
L, R = self.encrypt(L, R)
self.P[i] = L
self.P[i + 1] = R
for i in range(4):
for j in range(0, 256, 2):
L, R = self.encrypt(L, R)
self.S[i][j] = L
self.S[i][j + 1] = R
def _f(self, x):
x = _uint32(x)
h = _uint32(self.S[0][x >> 24] + self.S[1][x >> 16 & 0xff])
return _uint32(( h ^ self.S[2][x >> 8 & 0xff] ) + self.S[3][x & 0xff])
def _uint32(n):
return ctypes.c_uint32(n).value
| 19,150 | 53.561254 | 80 |
py
|
DNN_Rover
|
DNN_Rover-master/rover/Data.py
|
import time
import numpy as np
import h5py
import progressbar
import datetime
import tflearn
from tflearn.layers.core import input_data
import torchvision.models as models
from NetworkSwitch import *
import torch
import torch.nn as nn
from scipy.misc import imresize
from skimage.transform import resize
class Data():
def __init__(self, driver_name, rover_name, save_data, framework,
filename, network_name, input_shape, normalization, norm_vals,
num_out, image_type):
self.angles = []
self.images = []
self.start = time.time()
self.names = driver_name + '_' + rover_name
self.save_data = save_data
self.framework = framework
self.filename = filename
self.network_name =network_name
self.input_shape = input_shape
self.normalization = normalization
self.norm_vals = norm_vals
self.num_out = num_out
self.image_type = image_type
def load_network(self):
if self.framework in ['tf', 'TF']:
if self.network_name in ['ResNet34',
'ResNet26',
'ResNeXt34',
'ResNeXt26']:
tflearn.config.init_training_mode()
self.network_name = modelswitch[self.network_name]
self.network = input_data(shape=self.input_shape)
self.network = self.network_name(self.network,
self.num_out,
drop_prob=1.0)
self.model = tflearn.DNN(self.network)
self.model.load(self.filename)
elif self.framework in ['PT', 'pt']:
self.network_name = models.__dict__[self.network_name]
self.model=self.network_name()
self.model.fc = nn.Linear(512, self.num_out)
self.model.cuda()
self.model.load_state_dict(torch.load(self.filename))
self.model.eval()
return
def predict(self, s):
if self.framework in ['tf', 'TF']:
s = s[None, 110:, ...]
if self.image_type in ['grayscale', 'framestack']:
s = np.mean(s, 3, keepdims=True)
if self.image_type in ['framestack']:
current = s
self.framestack = np.concatenate((current,
self.framestack[:, :, :, 1:]), 3)
s = self.framestack[:, :, :, self.stack]
out = self.model.predict(s)
elif self.framework in ['pt', 'PT']:
out = resize(s, (224, 224)).transpose((2, 0, 1))[None,...]
out = torch.from_numpy(out).float().cuda()
out = self.model(out).detach().cpu().numpy()[0, :]
return np.argmax(out)
def normalize(self, x):
if self.normalization is not None:
if self.normalization == 'instance_norm':
x = (x - np.mean(x)) / (np.std(x) + 1e-6)
elif self.normalization == 'channel_norm':
for j in range(x.shape[-1]):
x[..., j] -= self.norm_vals[j]
return x
def add_data(self, image, action):
self.angles.append(action)
self.images.append(image)
print('Collecting Data')
return
def save(self):
if self.save_data in ['y', 'Y', 'yes', 'Yes']:
print('Saving the Training Data you collected.')
self.images = np.array(self.images, dtype='uint8')
self.angles = np.array(self.angles, dtype='float16')
elapsedTime = int(time.time() - self.start)
dset_name = str(elapsedTime) + "seconds_" + self.names + ".h5"
h5f = h5py.File(dset_name, 'w')
h5f.create_dataset('X', data=self.images)
h5f.create_dataset('Y', data=self.angles)
h5f.close()
return
| 3,962 | 35.027273 | 80 |
py
|
DNN_Rover
|
DNN_Rover-master/rover/byteutils.py
|
import struct
import sys
def dump_bytes(bytes):
for c in bytes:
sys.stdout.write('%02x ' % ord(c))
sys.stdout.write('\n')
def bytes_to_int(bytes, offset):
return struct.unpack('i', bytes[offset:offset + 4])[0]
def bytes_to_uint(bytes, offset):
return struct.unpack('I', bytes[offset:offset + 4])[0]
def bytes_to_short(bytes, offset):
return struct.unpack('h', bytes[offset:offset + 2])[0]
| 426 | 18.409091 | 58 |
py
|
DNN_Rover
|
DNN_Rover-master/rover/__init__.py
|
import threading
import socket
import time
import numpy as np
import cv2
from rover.blowfish import Blowfish
from rover.adpcm import decodeADPCMToPCM
from rover.byteutils import *
# Base class for handling sockets, encryption, and movement
class Rover:
def __init__(self):
""" Creates a Rover object that you can communicate with.
"""
self.HOST = '192.168.1.100'
self.PORT = 80
TARGET_ID = 'AC13'
TARGET_PASSWORD = 'AC13'
self.TREAD_DELAY_SEC = 0.05
self.KEEPALIVE_PERIOD_SEC = 60
# Create command socket connection to Rover
self.commandsock = self._new_socket()
# Send login request with four arbitrary numbers
self._send_command_int_request(0, [0, 0, 0, 0])
# Get login reply
reply = self._receive_a_command_reply_from_rover(82)
# Extract Blowfish key from camera ID in reply
camera_ID = reply[25:37].decode('utf-8')
key = TARGET_ID + ':' + camera_ID + '-save-private:' + TARGET_PASSWORD
# Extract Blowfish inputs from rest of reply
l = bytes_to_int(reply, 66)
r1 = bytes_to_int(reply, 70)
l2 = bytes_to_int(reply, 74)
r2 = bytes_to_int(reply, 78)
# Make Blowfish cipher from key
bf = _RoverBlowfish(key)
# Encrypt inputs from reply
l, r1 = bf.encrypt(l, r1)
l2, r2 = bf.encrypt(l2, r2)
# Send encrypted reply to Rover
self._send_command_int_request(2, [l, r1, l2, r2])
# Ignore reply from Rover
self._receive_a_command_reply_from_rover(26)
# Start timer task for keep-alive message every 60 seconds
self._start_keep_rover_alive_task()
# Setup vertical camera controller
self.cameraVertical = _RoverCamera(self, 1)
# Send video-start request
self._send_command_int_request(4, [1])
# Get reply from Rover
reply = self._receive_a_command_reply_from_rover(29)
# Create media socket connection to Rover
self.mediasock = self._new_socket()
# Send video-start request based on last four bytes of reply
self._send_a_request(self.mediasock, 'V', 0, 4, map(ord, reply[25:]))
# Send audio-start request
self._send_command_byte_request(8, [1])
# Ignore audio-start reply
self._receive_a_command_reply_from_rover(25)
# Receive images on another thread until closed
self.is_active = True
self.reader_thread = _MediaThread(self)
self.reader_thread.start()
# Set up treads
self.leftTread = _RoverTread(self, 4)
self.rightTread = _RoverTread(self, 1)
def close(self):
""" Closes off communication with Rover.
"""
self.keep_a_live_timer.cancel()
self.is_active = False
self.commandsock.close()
if self.mediasock:
self.mediasock.close()
# Stop moving treads
self.set_wheel_treads(0, 0)
def turn_stealth_on(self):
""" Turns on stealth mode (infrared).
"""
self._send_camera_request(94)
def turn_stealth_off(self):
""" Turns off stealth mode (infrared).
"""
self._send_camera_request(95)
def move_camera_in_vertical_direction(self, where):
""" Moves the camera up or down, or stops moving it. A nonzero value for the
where parameter causes the camera to move up (+) or down (-). A
zero value stops the camera from moving.
"""
self.cameraVertical.move(where)
def _start_keep_rover_alive_task(self, ):
self._send_command_byte_request(255)
self.keep_a_live_timer = \
threading.Timer(self.KEEPALIVE_PERIOD_SEC, self._start_keep_rover_alive_task, [])
self.keep_a_live_timer.start()
def _send_command_byte_request(self, request_id, bytes_request=None):
if not bytes_request:
bytes_request = []
self._send_a_command_request(request_id, len(bytes_request), bytes_request)
def _send_command_int_request(self, request_id, intervals):
byte_value = []
for val in intervals:
for c in struct.pack('I', val):
byte_value.append(ord(c))
self._send_a_command_request(request_id, 4 * len(intervals), byte_value)
def _send_a_command_request(self, id_command_request, n, contents):
self._send_a_request(self.commandsock, 'O', id_command_request, n, contents)
def _send_a_request(self, sock, c, id_request, n, contents):
bytes_request = [ord('M'), ord('O'), ord('_'), ord(c), id_request,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, n, 0, 0, 0, 0, 0, 0, 0]
bytes_request.extend(contents)
request = ''.join(map(chr, bytes_request))
sock.send(request)
def _receive_a_command_reply_from_rover(self, count):
reply = self.commandsock.recv(count)
return reply
def _new_socket(self):
sock = socket.socket()
sock.connect((self.HOST, self.PORT))
return sock
def _send_control_request_to_rover(self, a, b):
self._send_command_byte_request(250, [a, b])
# 2.0 overrides:
def _send_camera_request(self, request):
self._send_command_byte_request(14, [request])
#def __init__(self):
#Rover.__init__(self)
# Set up treads
#self.leftTread = _RoverTread(self, 4)
#self.rightTread = _RoverTread(self, 1)
def get_battery_percentage(self):
""" Returns percentage of battery remaining.
"""
self._send_command_byte_request(251)
reply = self._receive_a_command_reply_from_rover(32)
return 15 * ord(reply[23])
def set_wheel_treads(self, left, right):
""" Sets the speed of the left and right treads (wheels). + = forward;
- = backward; 0 = stop. Values should be in [-1..+1].
"""
currTime = time.time()
self.leftTread.update(left)
self.rightTread.update(right)
def turn_the_lights_on(self):
""" Turns the headlights and taillights on.
"""
self._set_the_lights_on_or_off(8)
def turn_the_lights_off(self):
""" Turns the headlights and taillights off.
"""
self._set_the_lights_on_or_off(9)
def _set_the_lights_on_or_off(self, on_or_off):
self._send_control_request_to_rover(on_or_off, 0)
def process_video_from_rover(self, jpegbytes, timestamp_10msec):
array_of_bytes = np.fromstring(jpegbytes, np.uint8)
self.image = cv2.imdecode(array_of_bytes, flags=3)
k = cv2.waitKey(1) & 0xFF
return self.image
def process_audio_from_rover(self, pcmsamples, timestamp_10msec):
""" Processes a block of 320 PCM audio samples streamed from Rover.
Audio is sampled at 8192 Hz and quantized to +/- 2^15.
Default method is a no-op; subclass and override to do something
interesting.
"""
pass
def _spin_rover_wheels(self, wheel_direction, speed):
# 1: Right, forward
# 2: Right, backward
# 4: Left, forward
# 5: Left, backward
self._send_control_request_to_rover(wheel_direction, speed)
# "Private" classes ===========================================================
# A special Blowfish variant with P-arrays set to zero instead of digits of Pi
class _RoverBlowfish(Blowfish):
def __init__(self, key):
Blowfish.__init__(self, key)
ORIG_P = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
self._keygen(key, ORIG_P)
# A thread for reading streaming media from the Rover
class _MediaThread(threading.Thread):
def __init__(self, rover):
threading.Thread.__init__(self)
self.rover = rover
self.buffer_size = 1024
def run(self):
# Accumulates media bytes
media_bytes = ''
# Starts True; set to False by Rover.close()
while self.rover.is_active:
# Grab bytes from rover, halting on failure
try:
buf = self.rover.mediasock.recv(self.buffer_size)
except:
break
# Do we have a media frame start?
k = buf.find('MO_V')
# Yes
if k >= 0:
# Already have media bytes?
if len(media_bytes) > 0:
# Yes: add to media bytes up through start of new
media_bytes += buf[0:k]
# Both video and audio messages are time-stamped in 10msec units
timestamp = bytes_to_uint(media_bytes, 23)
# Video bytes: call processing routine
if ord(media_bytes[4]) == 1:
self.rover.process_video_from_rover(media_bytes[36:], timestamp)
# Audio bytes: call processing routine
else:
audio_size = bytes_to_uint(media_bytes, 36)
sample_audio_size = 40 + audio_size
offset = bytes_to_short(media_bytes, sample_audio_size)
index = ord(media_bytes[sample_audio_size + 2])
pcmsamples = decodeADPCMToPCM(media_bytes[40:sample_audio_size], offset, index)
self.rover.process_audio_from_rover(pcmsamples, timestamp)
# Start over with new bytes
media_bytes = buf[k:]
# No media bytes yet: start with new bytes
else:
media_bytes = buf[k:]
# No: accumulate media bytes
else:
media_bytes += buf
class _RoverTread(object):
def __init__(self, rover, index):
self.rover = rover
self.index = index
self.isMoving = False
self.startTime = 0
def update(self, value):
if value == 0:
if self.isMoving:
self.rover._spin_rover_wheels(self.index, 0)
self.isMoving = False
else:
if value > 0:
wheel = self.index
else:
wheel = self.index + 1
current_run_time = time.time()
if (current_run_time - self.startTime) > self.rover.TREAD_DELAY_SEC:
self.startTime = current_run_time
self.rover._spin_rover_wheels(wheel, int(round(abs(value) * 10)))
self.isMoving = True
class _RoverCamera(object):
def __init__(self, rover, stop_rover_command):
self.rover = rover
self.stop_rover_command = stop_rover_command
self.isMoving = False
def move(self, where):
if where == 0:
if self.isMoving:
self.rover._send_camera_request(self.stop_rover_command)
self.isMoving = False
elif not self.isMoving:
if where == 1:
self.rover._send_camera_request(self.stop_rover_command - 1)
else:
self.rover._send_camera_request(self.stop_rover_command + 1)
self.isMoving = True
| 11,218 | 31.518841 | 103 |
py
|
DNN_Rover
|
DNN_Rover-master/rover/Pygame_UI.py
|
import pygame
import numpy as np
import cv2
from scipy.misc import bytescale
import os
import time
class Pygame_UI:
def __init__(self, fps, speed):
pygame.init()
pygame.display.set_caption('Rover Dashboard')
self.screen_size = [700, 480]
self.screen = pygame.display.set_mode(self.screen_size)
self.screen.fill((255,255,255))
self.fontSize = 30
self.font = pygame.font.SysFont(None, self.fontSize)
self.clock = pygame.time.Clock()
self.fps = fps
self.start_time = time.time()
self.color = (0,0,0)
self.action_dict = {}
self.action_dict['a'] = [-speed, speed, 0]
self.action_dict[0] = [-speed, speed]
self.action_dict['w'] = [speed, speed, 1]
self.action_dict[1] = [speed, speed]
self.action_dict['d'] = [speed, -speed, 2]
self.action_dict[2] = [speed, -speed]
self.action_dict['s'] = [-speed, -speed, 3]
self.action_dict[3] = [-speed, -speed]
self.action_dict[' '] = [0, 0, 4]
self.action_dict[4] = [0, 0]
self.action_dict['q'] = [0, 0, 9]
def display_message(self, text, color, x, y):
label = self.font.render(text, True, color)
self.screen.blit(label, (x,y))
def getActiveKey(self):
key = None
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
key = event.key
key = chr(key)
return key
def manage_UI(self):
self.clock.tick(self.fps)
os.system('clear')
return
def show_feed(self, image):
cv2.imshow("RoverCam", bytescale(image))
cv2.waitKey(1)
return
def cleanup(self):
elapsed_time = np.round(time.time() - self.start_time, 2)
print('This run lasted %.2f seconds'%(elapsed_time))
pygame.quit()
cv2.destroyAllWindows()
return
| 1,930 | 29.650794 | 65 |
py
|
StodAp
|
StodAp-master/main.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import functions
import model
import config
import cPickle as pickle
import time
#functions.LoadODPs()
#functions.LoadODPData()
#functions.WriteWikiPages()
#functions.CalculateStats()
#functions.TagsOverN(1)
#functions.TagsDistribution()
#functions.TagsPerDataset()
#functions.Similarity2('naive')
#functions.WriteTagsCSV()
#functions.GetLanguage()
#functions.MostUsedTags()
#functions.LoadGlobalTags()
#functions.GroupStats()
#functions.SignificanceOfTagsWithMeaning()
#with open(config.global_tags_file, 'rb') as input:
# g = pickle.load(input)
#for gg in g:
# print '"' + gg.label + '"'
# for ggg in gg.local_tags:
# print ">>>" + ggg.url + " " + ggg.name
#with open(config.objects_file, 'rb') as input:
# ODP = pickle.load(input)
#with open("ODP3.pkl-12-10manha", 'rb') as input:
# ODPb = pickle.load(input)
#x = 0
#for k in range(0,len(ODP)):
# a = sum(map(lambda z: len(z.meanings), ODP[k].tags))
# b = sum(map(lambda z: len(z.meanings), ODPb[k].tags))
# print str(k) + " " + ODP[k].url + " " + str(a) + " " + str(b)
#with open(config.objects_file, 'rb') as input:
# ODP = pickle.load(input)
#r = functions.find_in_tags(ODP, "education")
#for a in r:
# print a
#with open(config.objects_file, 'rb') as input:
# ODP = pickle.load(input)
#groups = []
#for o in ODP:
# print o.url
# oo = model.OpenDataPortal(o.url, o.name, None, None)
# oo.load_groups()
# groups.append(oo)
# print ">>>> " + str(len(oo.groups))
#
# with open(config.groups_file, 'wb') as output:
# pickle.dump(groups, output, -1)
#r = functions.find_in_tags(ODP,"saúde")
#print len(r)
#r = functions.find_in_tags(ODP,"Saude")
#print len(r)
#with open("ODP3.pkl.bkp2", 'rb') as input:
# ODPb = pickle.load(input)
#for k in range(0,len(ODP)):
# o = 0
# ob = 0
# for t in range(0,len(ODP[k].tags)):
# o += len(ODP[k].tags[t].meanings)
# ob += len(ODPb[k].tags[t].meanings)
# print str(k) + " " + str(ODP[k].url) + " " + str(o) + " " + str(ob)
#k = -1
#for o in ODP:
## k += 1
# print str(o.url)
# try:
# print o.lang
# except:
# if o.url == "http://portal.openbelgium.be":
# o.lang = None
# if o.url == "http://datosabiertos.ec":
# o.lang = "esp"
# if o.url == "http://udct-data.aigid.jp":
# o.lang = "jpn"
# if o.url == "http://data.wu.ac.at":
# o.lang = "deu"
# if k > 19:
# o.set_language()
# for tag in o.tags:
# if ([int(tag.name[i]) for i in range(0,len(tag.name)) if tag.name[i].encode('utf-8').isdigit()] == []) and (len(tag.name)>3):
# time.sleep(.02)
# tag.set_meaning_2(o.lang)
# with open(config.objects_file, 'wb') as output:
# pickle.dump(ODP, output, -1)
#
#import rdflib
#from rdflib import URIRef
#from rdflib import Graph
#import urllib2
#import urllib
#means = URIRef("http://lexvo.org/ontology#means")
#seeAlso = URIRef("http://www.w3.org/2000/01/rdf-schema#seeAlso")
#abstract = URIRef("http://dbpedia.org/ontology/abstract")
#with open(config.global_tags_file, 'rb') as input:
# global_tags = pickle.load(input)
#for tag in global_tags:
# g = Graph()
# parse = True
# try:
# #print "http://www.lexvo.org/data/term/" + lang + "/" + urllib.quote(self.name.encode('utf-8'))
# g.parse("http://dbpedia.org/data/" + urllib.quote(tag.label.capitalize().encode('utf-8')))
# except:
# parse = False
#
# print urllib.quote(tag.label.capitalize().encode('utf-8'))
# if parse:
# #out = self.name.encode('utf-8')
#
# for s,p,o in g.triples((None,abstract,None)):
# if o.language == "en":
# #print o
# tag.description = o
#with open(config.global_tags_file, 'wb') as output:
# pickle.dump(global_tags, output, -1)
functions.WriteWikiPages()
| 3,688 | 23.430464 | 129 |
py
|
StodAp
|
StodAp-master/functions.py
|
##########################################################
#This scripts walks through several CKAN instances given by the CKAN instances project (https://github.com/ckan/ckan-instances/blob/gh-pages/config/instances.json) and collects information about the portals, the datasets, and tags. Data are stored as objects of the class Open Data Portal.
#This script outputs a file that is suitable to be inserted in a (semantic) media wiki instance.
##########################################################
import config
import model
import urllib2
import urllib
import json
import pprint
import cPickle as pickle
import numpy
import lib
from unidecode import unidecode
import Levenshtein
def LoadODPs():
"Reads the instance files, and initialize a list of ODP objects"
ODP = []
with open(config.instances_file, 'r') as f:
instances = json.loads(f.read())
print 'Number of instances: ' + str(len(instances))
for i in instances:
if 'url-api' in i:
url = i['url-api']
else:
url = i['url']
try:
response = lib.urlopen_with_retry(url + '/api/3/action/tag_list')
response_pkg = lib.urlopen_with_retry(url + '/api/3/action/package_list')
except:
#print "Could not connect"
response = 0
if response:
try:
response_dict = json.loads(response.read())
result = response_dict['result']
response_dict_pkg = json.loads(response_pkg.read())
packages = response_dict_pkg['result']
ODP.append(model.OpenDataPortal(url, i['title'], len(result), len(packages)))
#print i['title'] + ';' + i['url'] + ';' + str(len(result)) + ';' + str(len(packages))
except:
print i['title'] + ';' + url + ';' + 'No API 1'
else:
print i['title'] + ';' + url + ';' + 'No API 2'
with open(config.objects_file, 'wb') as output:
pickle.dump(ODP, output, -1)
def LoadODPData():
"loop through all portals in ODP and load data - tags, dataset, tagging"
with open(config.objects_file, 'rb') as input:
ODP = pickle.load(input)
for o in ODP:
if len(o.tags) == 0:
print "process" + o.url
o.load_data()
with open(config.objects_file, 'wb') as output:
pickle.dump(ODP, output, -1)
else:
print o.url + "already processed"
def CalculateStats():
with open(config.objects_file, 'rb') as input:
ODP = pickle.load(input)
print 'Number of portals: ' + str(len(ODP))
x = 0; y = 0; z = 0; ld = 0;
tags_per_ds = []
tags_with_meaning = []
tags = []
datasets = []
for o in ODP:
if o.num_of_tags == len(o.tags):
x = x + o.num_of_tags
y = y + o.num_of_packages
z = z + len(o.tagging)
ld = ld + len(o.datasets)
tags_per_ds.append(o.tags_per_dataset_mean())
tags_with_meaning.append(o.tags_with_meaning())
tags.append(o.num_of_tags)
datasets.append(o.num_of_packages)
# else:
# print "Diff: " + o.url + ": " + str(o.num_of_tags) + " - " + str(len(o.tags))
tags = numpy.array(tags);
datasets = numpy.array(datasets);
print 'Number of tags: ' , str(x)
print 'Average tag number: ' + str(tags.mean()) + '+/-' + str(tags.std())
print 'Number of datasets: ' , str(y)
print 'Average dataset number: ' + str(datasets.mean()) + '+/-' + str(datasets.std())
all_tags, unique_tags = CalculateUniqueTags()
print 'Number of loaded taggings: ' , str(z)
print 'Number of loaded tags: ' , str(len(all_tags))
print 'Number of loaded datasets: ' , str(ld)
print 'Number of loaded unique tags: ' , str(len(unique_tags))
tags_per_ds = numpy.array(tags_per_ds);
tags_with_meaning = numpy.array(tags_with_meaning);
print "------"
print("Tags per dataset (av.): %.2f" % tags_per_ds.mean())
print("Tags per dataset (max): %.2f" % tags_per_ds.max())
print("Tags per dataset (min): %.2f" % tags_per_ds.min())
print "------"
print("Tags with meaning (av.): %.2f" % tags_with_meaning.mean())
print("Tags with meaning (max): %.2f" % tags_with_meaning.max())
print("Tags with meaning (min): %.2f" % tags_with_meaning.min())
tg = 0
N = 0
no_groups =0
ds_group = []
for o in ODP:
if len(o.groups) > 0:
tg += len(o.groups)
N += 1
for g in o.groups:
if g.n_datasets > 0:
ds_group.append(g.n_datasets)
else:
no_groups += 1
ds_group = numpy.array(ds_group);
print "------"
print 'Number of groups: ' , str(tg)
print 'ODP without groups: ' , str(no_groups)
print 'Groups / ODP: ' , str(tg/float(N))
print 'Datasets / Group: ' , ds_group.mean()
def CalculateUniqueTags():
with open(config.objects_file, 'rb') as input:
ODP = pickle.load(input)
all_tags = []
unique_tags = []
for o in ODP:
for t in o.tags:
all_tags.append(str(t.name.encode('utf-8')))
srtd = sorted(all_tags,key=str.lower)
unique_tags.append(srtd[0].lower().strip())
for t in srtd:
if t.lower().strip() != unique_tags[len(unique_tags)-1]:
unique_tags.append(t.lower().strip())
return all_tags, unique_tags
def TagsOverN(N):
mfile = open('percentage_over_' + str(N) + '.m', 'w')
mfile.write ('tags_over_n = [' + '\n')
mfile_m = open('percentage_over_' + str(N) + '_merged.m', 'w')
mfile_m.write ('tags_over_n_merged = [' + '\n')
with open(config.objects_file, 'rb') as input:
ODP = pickle.load(input)
tags_over_n_perc = []
for o in range(0,len(ODP)):
tags_over_n = 0
for t in ODP[o].tags:
if int(t.count) > N:
tags_over_n += 1
if len(ODP[o].tags) != 0:
res = float(tags_over_n)/float(len(ODP[o].tags))
else:
res = 0
av_reuse = sum(map(lambda z: z.count, ODP[o].tags))/float(len(ODP[o].tags))
tags_over_n_perc.append(res)
mfile.write (str(tags_over_n) + ' ' + str(res) + " " + str(av_reuse) +'\n')
# merge similar tags
alltags = []
odp = ODP[o]
for t in odp.tags:
alltags.append(model.AllTags(t.name,odp.url,t.count,odp.lang))
alltags = sorted(alltags,key=lambda x: x.name)
k = 0
print odp.url
while k < len(alltags)-1:
if (unidecode(alltags[k].name.lower()) == unidecode(alltags[k+1].name.lower())):
alltags[k].count += alltags[k+1].count
alltags.remove(alltags[k+1])
k -= 1
k += 1
#print str(k) + " " + str(len(list))
tags_over_n = 0
for t in alltags:
if int(t.count) > N:
tags_over_n += 1
if len(alltags) != 0:
res2 = float(tags_over_n)/float(len(alltags))
else:
res2 = 0
av_reuse_m = sum(map(lambda z: z.count, alltags))/float(len(alltags))
print av_reuse_m
mfile_m.write (str(tags_over_n) + ' ' + str(res2) + " " + str(av_reuse_m) + '\n')
mfile.write ('];')
mfile.close()
mfile_m.write ('];')
mfile_m.close()
tags_over_n_perc = sorted (tags_over_n_perc)
return tags_over_n_perc
def WriteODPCSV():
with open(config.objects_file, 'rb') as input:
ODP = pickle.load(input)
csv_file = open(config.objects_file + '.csv', 'w')
csv_file.write("Name ; URL ; Number of Tags ; Very similar tags ; Number of Packages; Tags per dataset (mean) ; Tags with meaning\n")
for k in range(0,len(ODP)):
o = ODP[k]
sim = Similarity_ODP(k)
csv_file.write(o.name.encode('utf-8') + ";"+ o.url.encode('utf-8') + ";" + str(o.num_of_tags).encode('utf-8') + ";" + str(sim) + ";"+ str(o.num_of_packages).encode('utf-8') + ";" + str(o.tags_per_dataset_mean()).encode('utf-8') + ";" + str(o.tags_with_meaning()).encode('utf-8') + "\n")
csv_file.close()
def WriteTagsCSV():
with open(config.objects_file, 'rb') as input:
ODP = pickle.load(input)
csv_file = open(config.objects_file + '.tags.csv', 'w')
csv_file.write("URL ; Tag ; Count ; Meanings\n")
for k in range(0,len(ODP)):
o = ODP[k]
for t in o.tags:
csv_file.write(o.url.encode('utf-8') + ";" + t.name.encode('utf-8') + ";" + str(t.count))
for m in t.meanings:
csv_file.write(";" + m)
csv_file.write("\n")
csv_file.close()
def MostUsedTags():
with open(config.objects_file, 'rb') as input:
ODP = pickle.load(input)
csv_file = open(config.objects_file + '.most_used_tags.csv', 'w')
csv_file.write("Tags ; URLs ; Count (times) ; Count (ODPs) \n")
alltags = []
for o in ODP:
for t in o.tags:
alltags.append(model.AllTags(t.name,o.url,t.count,o.lang))
alltags = sorted(alltags,key=lambda x: x.name)
all_unique = [alltags[0]]
s = 0
for k in range(0,len(alltags)-1):
if (unidecode(alltags[k].name.lower()) != unidecode(alltags[k+1].name.lower())):
all_unique.append(alltags[k+1])
s += 1
else:
if alltags[k].url != alltags[k+1].url:
all_unique[s].global_count += 1
all_unique[s].url.append(alltags[k+1].url)
if alltags[k].lang != alltags[k+1].lang:
all_unique[s].lang += ";" + alltags[k+1].lang
all_unique[s].count += alltags[k+1].count
all_unique = sorted(all_unique,key=lambda x: x.global_count, reverse = True)
for t in all_unique:
#url = ' '.join(t.url).encode('utf-8')
#csv_file.write(t.name.encode('utf-8') + ";" + str(url) + ";" + str(t.count) + ";" + str(t.global_count) + "\n")
csv_file.write(t.name.encode('utf-8') + ";" + ";" + str(t.count) + ";" + str(t.global_count) + "\n")
csv_file.close()
return alltags, all_unique
def TagsDistribution():
mfile = open('tags_distibution.m', 'w')
with open(config.objects_file, 'rb') as input:
ODP = pickle.load(input)
k = 0;
for o in ODP:
if len(o.tags) > 0:
k += 1
mfile.write('tags_distibution{' + str(k) + '} = [\n')
for t in o.tags:
mfile.write(str(t.count) + '\n')
mfile.write('];\n')
mfile.close()
def TagsPerDataset():
mfile = open('tags_perdataset.m', 'w')
with open(config.objects_file, 'rb') as input:
ODP = pickle.load(input)
k = 0;
for o in ODP:
if len(o.datasets) > 0:
k += 1
mfile.write('tags_per_dataset{' + str(k) + '} = [\n')
for d in o.datasets:
mfile.write(str(d.number_of_tags) + '\n')
mfile.write('];\n')
mfile.close()
def Similarity():
mfile = open('similarity.m', 'w')
with open(config.objects_file, 'rb') as input:
ODP = pickle.load(input)
k = 0
for o in ODP:
m = o.similarity_matrix()
return
k +=1
s = 0
mfile.write('similarity{' + str(k) + '} = [\n')
for i in range(0,len(o.tags)):
for j in range(0,len(o.tags)):
if m[i][j] == 1:
s += 1
mfile.write(str(s) + '] \n')
# for i in range(0,len(o.tags)):
# for j in range(0,len(o.tags)):
# mfile.write(str(m[i][j]) + ' ')
# mfile.write('\n')
# mfile.write('];\n')
mfile.close()
def Similarity2(method = 'naive'):
mfile = open('similarity_' + method + '.m', 'w')
with open(config.objects_file, 'rb') as input:
ODP = pickle.load(input)
mfile.write('similarity = [\n')
for o in ODP:
s = 0
srtd = sorted(map(lambda z: z.name.encode('utf-8'), o.tags),key=str.lower)
for i in range(1,len(o.tags)):
if method == 'naive':
if unidecode(srtd[i].lower()) == unidecode(srtd[i-1].lower()):
#print o.tags[i].name.encode('utf-8') + " " + o.tags[j].name.encode('utf-8')
s +=1
else:
if Levenshtein.distance(unidecode(srtd[i].lower()),unidecode(srtd[i-1].lower())) < 3:
s +=1
mfile.write(o.url + " " + str(s) + ' ' + str(float(s)/len(o.tags)) + " " + str(len(o.tags)) + '\n')
print o.name
mfile.write('];\n')
# for i in range(0,len(o.tags)):
# for j in range(0,len(o.tags)):
# mfile.write(str(m[i][j]) + ' ')
# mfile.write('\n')
# mfile.write('];\n')
mfile.close()
def Similarity_ODP(odp):
with open(config.objects_file, 'rb') as input:
ODP = pickle.load(input)
s = 0
o = ODP[odp]
srtd = sorted(map(lambda z: z.name.encode('utf-8'), o.tags),key=str.lower)
for i in range(1,len(o.tags)):
if unidecode(srtd[i].lower()) == unidecode(srtd[i-1].lower()):
#print o.tags[i].name.encode('utf-8') + " " + o.tags[j].name.encode('utf-8')
s +=1
return s
def LoadGlobalTags():
'''
This function creates an array of AllTags. Each element is the name of a tag, and stores the urls where it is used, including translated versions.
This array is the used to generate a wiki page.
'''
print "#step 1: get most used tags"
all_tags, most_used = MostUsedTags()
print "#step 2: start the Global Tags Dataset"
with open(config.objects_file, 'rb') as input:
ODP = pickle.load(input)
global_tags = []
for i in range(0,200):
G = model.GlobalTag(most_used[i].name)
local_tags = find_in_tags(ODP,most_used[i].name)
for l in local_tags:
G.local_tags.append(l)
global_tags.append(G)
print "#step 3: find the tags meanings"
import rdflib
from rdflib import URIRef
from rdflib import Graph
means = URIRef("http://lexvo.org/ontology#means")
seeAlso = URIRef("http://www.w3.org/2000/01/rdf-schema#seeAlso")
translation = URIRef("http://lexvo.org/ontology#translation")
literal_form = URIRef("http://www.w3.org/2008/05/skos-xl#literalForm")
for global_tag in global_tags:
g = Graph()
parse = True
try:
g.parse("http://www.lexvo.org/data/term/" + global_tag.lang + "/" + urllib.quote(global_tag.label.encode('utf-8').lower()))
except:
parse = False
if parse:
for s,p,o in g.triples((None,means,None)):
global_tag.resources.append(str(o))
for s,p,o in g.triples((None,seeAlso,None)):
global_tag.resources.append(str(o))
print "#step 4: find the tags in other idioms"
for global_tag in global_tags:
g = Graph()
parse = True
try:
g.parse("http://www.lexvo.org/data/term/" + global_tag.lang + "/" + urllib.quote(global_tag.label.encode('utf-8').lower()))
except:
parse = False
if parse:
for s,p,o in g.triples((None,translation,None)):
#TODO UGLY - Dont do this!!!
translated = str(o).split("/")[len(str(o).split("/"))-1]
translated = urllib.unquote(translated).decode('utf8')
print global_tag.label + " === " + translated
# raw_input("Press Enter to continue...")
tags = find_in_tags(ODP,translated.encode('utf8'))
for t in tags:
global_tag.local_tags.append(t)
with open(config.global_tags_file, 'wb') as output:
pickle.dump(global_tags, output, -1)
# print "----------"
# for global_tag in global_tags:
# print global_tag.label
# for l in global_tag.local_tags:
# print l
# for r in global_tag.resources:
# print r
# print "----------"
return global_tags
def find_in_tags(ODP, name):
result = []
for o in ODP:
for t in o.tags:
if t.name.lower() == name.lower():
result.append(model.LocalTag(t.name,o.url, t.count, o.lang))
return result
def WriteWikiPages():
with open(config.global_tags_file, 'rb') as input:
global_tags = pickle.load(input)
pages_ODP = open(config.wiki_out_file, 'wb')
for g in global_tags:
pages_ODP.write(g.label + '\n\n')
pages_ODP.write('--ENDTITLE--\n')
pages_ODP.write('{{Global Tag\n')
if g.description:
pages_ODP.write('|1=' + g.description.encode('utf-8') + '\n')
pages_ODP.write('|2=' + str(g.resources_print()) + '\n')
pages_ODP.write('|3=' + g.local_tags_print().encode('utf-8') + '\n')
pages_ODP.write('|4=' + g.related_print() + '\n')
pages_ODP.write('}}' + '\n')
pages_ODP.write('--ENDPAGE--\n\n')
def SignificanceOfTagsWithMeaning():
with open(config.objects_file, 'rb') as input:
ODP = pickle.load(input)
N = 0; n = 0; S = 0; s = 0; x = 0 ; X = 0; z = 0 ; Z = 0
for o in ODP:
for tag in o.tags:
N += tag.count
n += 1
if ([int(tag.name[i]) for i in range(0,len(tag.name)) if tag.name[i].encode('utf-8').isdigit()] == []) and (len(tag.name)>3):
if tag.meanings != []:
S +=tag.count
s += 1
else:
Z +=tag.count
z += 1
else:
x +=1
X += tag.count
print "With meaning (perc): " + str(s/float(n)*100)
print "Not analysed (perc): " + str(x/float(n)*100)
print "No meaning (perc): " + str(z/float(n)*100)
print "With meaning (sig): " + str(S/float(N)*100)
print "Not analysed (sig): " + str(X/float(N)*100)
print "No meaning (sig): " + str(Z/float(N)*100)
def ListCooccurences():
with open(config.objects_file, 'rb') as input:
ODP = pickle.load(input)
for o in ODP:
for t in o.tags:
print "Tag: " + t.name
for c in t.cooccurences:
for tt in o.tags:
if c == tt.tag_id:
print ">> " + tt.name
break
| 16,020 | 26.154237 | 289 |
py
|
StodAp
|
StodAp-master/model.py
|
##########################################################
#This scripts walks through several CKAN instances given by the CKAN instances project (https://github.com/ckan/ckan-instances/blob/gh-pages/config/instances.json) and collects information about the portals, the datasets, and tags. Data are stored as objects of the class Open Data Portal.
#This script outputs a file that is suitable to be inserted in a (semantic) media wiki instance.
##########################################################
import urllib2
import urllib
import json
import pprint
import cPickle as pickle
import Levenshtein
import lib
import config
class OpenDataPortal:
def __init__(self, url, name, num_of_tags, num_of_packages):
self.url = url
self.name = name
self.num_of_tags = num_of_tags
self.num_of_packages = num_of_packages
#self.matching_tags = []
self.tags = []
self.datasets = []
self.tagging = []
self.groups = []
def __repr__(self):
return repr(self.url)
def add_tag(self, tag):
self.tags.append(Tag(tag))
def set_tag_count(self):
for tag in self.tags:
taggging_tag = [tagging.tag_id for tagging in self.tagging if tagging.tag_id == tag.tag_id]
tag.set_count(len(taggging_tag))
def add_dataset(self, dataset):
self.datasets.append(Dataset(dataset))
def add_tagging(self, tag, dataset):
self.tagging.append(Tagging(tag, dataset))
def tags_per_dataset_mean (self):
if len(self.datasets) > 0:
ret = float(reduce (lambda x,y: x + y, map(lambda z: z.number_of_tags, self.datasets))) / len (self.datasets)
else:
ret = 0
return ret
def tags_with_meaning (self):
res = 0
for t in self.tags:
if hasattr(t, 'meanings'):
if t.meanings != []:
res += 1
else:
print "no meaning"
if len(self.tags) > 0:
ret = res/float(len(self.tags))
else:
ret = 0
return ret
def similarity_matrix (self):
T = len(self.tags)
matrix = [[0 for x in range(T)] for x in range(T)]
for t in range(0,T-1):
for s in range(t,T-1):
if s != t:
matrix[s][t] = Levenshtein.distance(self.tags[t].name,self.tags[s].name)
return matrix
def load_data(self):
"get all tags from a CKAN website and count the occurences"
tag_list = False
if config.DEBUG: print "start collect tags"
#get tags
try:
tag_list_response = lib.urlopen_with_retry(self.url + '/api/3/action/tag_list?all_fields=True')
except:
1 == 1
if tag_list_response:
try:
tag_list_dict = json.loads(tag_list_response.read())
tag_list = tag_list_dict['result']
except:
1 == 1
for tag in tag_list:
if config.DEBUG: print tag
self.add_tag(tag)
#get datasets
try:
dataset_list_response = lib.urlopen_with_retry(self.url + '/api/3/action/package_list')
except:
1 == 1
if config.DEBUG: print "start collect datasets"
if dataset_list_response:
try:
dataset_list_dict = json.loads(dataset_list_response.read())
dataset_list = dataset_list_dict['result']
except:
1 == 1
for dataset in dataset_list:
dataset_response = 0
try:
dataset_response = lib.urlopen_with_retry(self.url + '/api/3/action/package_search?fq=name:"' + urllib2.quote(dataset.encode('UTF-8')) + '"')
except:
1 == 1
if dataset_response:
try:
dataset_dict = json.loads(dataset_response.read())
dataset_allfields = dataset_dict['result']['results'][0]
self.add_dataset(dataset_allfields)
for tag in dataset_allfields['tags']:
self.add_tagging(tag, dataset_allfields)
except:
1 == 1
if config.DEBUG: print "final tasks"
#set tag count
self.set_tag_count()
self.set_language()
self.load_groups()
for tag in self.tags:
tag.set_cooccurences(self)
def set_language(self):
import pycountry
try:
response = lib.urlopen_with_retry(self.url + '/api/3/action/status_show')
except:
response = 0
if response:
response_dict = json.loads(response.read())
code_1 = response_dict['result']['locale_default']
if code_1:
lang = str(code_1[0]) + str(code_1[1])
code_3 = pycountry.languages.get(iso639_1_code=lang).iso639_3_code
else:
code_3 = 'eng'
self.lang = code_3
#print code_1 + "; " + code_3
return code_3
#ODP.append(model.OpenDataPortal(url, i['title'], len(result), len(packages)))
def load_groups(self):
"get all groups from a CKAN website and count the datasets in it"
group_list_response = False;
try:
group_list_response = lib.urlopen_with_retry(self.url + '/api/3/action/group_list?all_fields=True')
except:
#1 == 1
print "Failed: " + self.url
if group_list_response:
try:
group_list_dict = json.loads(group_list_response.read())
group_list = group_list_dict['result']
except:
#1 == 1
print "Failed 2: " + self.url
for group in group_list:
#difference in the apis
try:
package_count = group['packages'];
except:
try:
package_count = group['package_count'];
except:
package_count = 0
g = Group(group['name'],package_count)
self.groups.append(g)
class Group:
def __init__(self, name, n_datasets):
self.name = name
self.n_datasets = n_datasets
def __repr__(self):
return repr(self.name)
class Dataset:
def __init__(self, dataset):
self.name = dataset['title']
self.dataset_id = dataset['id']
self.number_of_tags = len(dataset['tags'])
def __repr__(self):
return repr(self.name)
class Tag:
def __init__(self, tag):
self.name = tag['name']
self.tag_id = tag['id']
self.set_meaning()
self.cooccurences = []
def __repr__(self):
return repr(self.name)
def set_count(self, count):
self.count = count
def set_meaning(self):
try:
self.meanings = []
req = urllib2.Request('http://spotlight.dbpedia.org/rest/annotate?text=' + urllib.quote(self.name.encode('utf-8')), headers = {'Accept' : 'application/json'})
contents = json.loads(lib.urlopen_with_retry(req).read())
if len(contents) == 7:
# if isinstance(contents['annotation']['surfaceForm'], list):
for m in contents['Resources']:
self.meanings.append(m['@URI'])
#else:
# print "here"
# self.meanings.append('http://dbpedia.org/page/' + contents['annotation']['surfaceForm']['resource']['@uri'].encode('utf-8'))
except:
1 == 1
def set_meaning_2(self,lang):
import rdflib
from rdflib import URIRef
from rdflib import Graph
means = URIRef("http://lexvo.org/ontology#means")
seeAlso = URIRef("http://www.w3.org/2000/01/rdf-schema#seeAlso")
g = Graph()
parse = True
try:
#print "http://www.lexvo.org/data/term/" + lang + "/" + urllib.quote(self.name.encode('utf-8'))
g.parse("http://www.lexvo.org/data/term/" + lang + "/" + urllib.quote(self.name.encode('utf-8')))
except:
parse = False
self.meanings = []
if parse:
#out = self.name.encode('utf-8')
if (None, seeAlso, None) in g:
#print "See Also found!"
for s,p,o in g.triples((None,seeAlso,None)):
#print o
#out = out + ";" + o.encode('utf-8')
self.meanings.append(o.encode('utf-8'))
if (None, means, None) in g:
#print "Meaning found!"
for s,p,o in g.triples((None,means,None)):
#print o
#out = out + ";" + o.encode('utf-8')
self.meanings.append(o.encode('utf-8'))
#print out
#print self.meanings
def set_cooccurences(self,ODP):
self.cooccurences = []
datasets = []
for tg in ODP.tagging:
if tg.tag_id == self.tag_id:
datasets.append(tg)
for dt in datasets:
for tg in ODP.tagging:
if (dt.dataset_id == tg.dataset_id) and (self.tag_id != tg.tag_id):
self.cooccurences.append(tg.tag_id)
class Tagging:
def __init__(self, tag, dataset):
self.tag_id = tag['id']
self.dataset_id = dataset['id']
class AllTags:
def __init__(self,name,url,count, lang):
self.name = name
self.url = [url]
self.count = count
self.global_count = 1
self.lang = lang
class GlobalTag:
def __init__(self,label):
self.label = label
self.description = []
self.resources = []
self.local_tags = []
self.lang = "eng" #the global tag shall always be in english
self.related = None
def resources_print(self):
out = ""
for r in self.resources:
out += str(r) + ","
return out
def related_print(self):
out = ""
for r in self.related:
out += str(r.label) + ","
return out
def local_tags_print(self):
out = ""
self.local_tags = list(set(self.local_tags))
for r in self.local_tags:
tag_url = r.url + "/dataset?tags=" + r.name
odp_url = r.url.replace("http://","").replace("www.","").rstrip("/")
out += "{{Display Tagged Resource |1=" + tag_url + " |2=" + odp_url + " |3=" + r.name + "}},"
return out
def set_related(self,global_tags):
from nltk.corpus import wordnet as wn
self.related = []
n=wn.synsets(self.label)
if n == []:
return
for x in range(0,len(global_tags)):
g=wn.synsets(global_tags[x].label)
if (g != []) and (global_tags[x].label != self.label):
#a = max(g[i].path_similarity(n[0]) for i in range(len(g)))
b = max(g[i].wup_similarity(n[0]) for i in range(len(g)))
if b >= .8:
self.related.append(global_tags[x])
return
class LocalTag:
def __init__(self,name,url,count, lang):
self.name = name
self.url = url
self.count = count
self.lang = lang
def __repr__(self):
return self.name + "-" + self.url + "-" + str(self.count) + "-" + self.lang
def __eq__(self, other):
return (self.url == other.url) and (self.name == other.name)
def __hash__(self):
return hash(('url', self.url,'name',self.name))
| 9,593 | 25.576177 | 289 |
py
|
StodAp
|
StodAp-master/lib.py
|
import time
from functools import wraps
def retry(ExceptionToCheck, tries=2, delay=2, backoff=1, logger=None):
"""Retry calling the decorated function using an exponential backoff.
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
:param ExceptionToCheck: the exception to check. may be a tuple of
exceptions to check
:type ExceptionToCheck: Exception or tuple
:param tries: number of times to try (not retry) before giving up
:type tries: int
:param delay: initial delay between retries in seconds
:type delay: int
:param backoff: backoff multiplier e.g. value of 2 will double the delay
each retry
:type backoff: int
:param logger: logger to use. If None, print
:type logger: logging.Logger instance
"""
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck, e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
if logger:
logger.warning(msg)
else:
print msg
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
return f(*args, **kwargs)
return f_retry # true decorator
return deco_retry
import urllib2
import urllib
@retry(urllib2.URLError, tries=4, delay=2, backoff=1)
def urlopen_with_retry(url):
return urllib2.urlopen(url)
@retry(urllib2.URLError, tries=2, delay=2, backoff=1)
def Request_with_retry(url):
return urllib2.Request(url)
| 1,825 | 28.934426 | 77 |
py
|
StodAp
|
StodAp-master/config.py
|
##########################################################
#This scripts walks through several CKAN instances given by the CKAN instances project (https://github.com/ckan/ckan-instances/blob/gh-pages/config/instances.json) and collects information about the portals, the datasets, and tags. Data are stored as objects of the class Open Data Portal.
#This script outputs a file that is suitable to be inserted in a (semantic) media wiki instance.
##########################################################
global DEBUG
DEBUG = True
global objects_file
objects_file = 'ODP.pkl'
global global_tags_file
global_tags_file = 'GlobalTags.pkl'
global wiki_out_file
wiki_out_file = 'wiki_portal.txt'
global instances_file
#instances_file = 'instances.json'
#instances_file = 'instances_debug.json'
instances_file = 'instances_test.json'
global groups_file
groups_file = 'groups.pkl'
| 881 | 32.923077 | 289 |
py
|
StodAp
|
StodAp-master/main_data_collection.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import functions
functions.LoadODPs()
functions.LoadODPData()
functions.ListCooccurences()
| 135 | 14.111111 | 28 |
py
|
StodAp
|
StodAp-master/main_data_analysis.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import functions
import config
import cPickle as pickle
functions.CalculateStats()
#functions.TagsOverN(1)
#functions.TagsDistribution()
#functions.TagsPerDataset()
#functions.Similarity2('naive')
#functions.WriteTagsCSV()
#functions.GetLanguage()
#functions.MostUsedTags()
#functions.LoadGlobalTags()
#functions.SignificanceOfTagsWithMeaning()
#functions.WriteWikiPages()
| 418 | 19.95 | 42 |
py
|
Rep-Learning
|
Rep-Learning-main/nips_supp/Meta_learning.py
|
from scipy.io import loadmat
import numpy as np
import scipy
from scipy.optimize import minimize
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import numpy.linalg as npl
class optimal_representation_matrix:
def __init__(self, beta, p, n, sigma_square, epsilon, beta_star):
self.beta = beta # beta vector in the non-asymptotic optimal representation part
self.p = p # p is dimension of the features
self.n = n # Number of training samples
self.sigma_square = sigma_square # Noise variance
self.epsilon = epsilon # tiny parameter to prevent divergence of the optimization algorithm (actually not necessary but provides early stopping until some tolerance in the optimization algo.)
self.expected_error = 0
self.risk_test = 0
self.risk_test_no_shaping = 0
self.beta_star = beta_star
def objective_func(self, zeta): # Objective func to minimize, zeta refers to theta in the paper
return self.sigma_square + self.p * ((self.n / self.p) * (
np.linalg.norm(np.multiply(self.beta, 1 - zeta)) ** 2) + self.sigma_square / self.p * np.linalg.norm(
zeta) ** 2) / (self.n - np.linalg.norm(zeta) ** 2)
'''
Compute ksi by binary search
'''
def func_ksi(self, B, ksi):
ksi_B = ksi * B
ksi_B_frac = ksi_B / (1 + ksi_B)
return np.sum(ksi_B_frac)
def ksi_solver(self, n, p, B, lower_bd, upper_bd, ksi_iters):
left = lower_bd
right = upper_bd
while self.func_ksi(B, right) < n:
left = right
right *= 2
T = ksi_iters
for i in range(T):
mid = (left + right) / 2
n_mid = self.func_ksi(B, mid)
if n_mid > n:
right = mid
else:
left = mid
return mid
'''
Above: Compute ksi by binary search
'''
def lambdas(self):
zeta = np.random.uniform(0, 1, size=(self.p,))
cons = ({'type': 'eq',
'fun': lambda x: np.sum(x) - self.n})
res = minimize(self.objective_func, zeta, constraints=cons,
bounds=[(0, 1 - self.epsilon) for _ in range(self.p)],
# Optimization algorithm over theta in the paper (we call it zeta here)
method='trust-constr', options={'disp': True, 'maxiter': 3000})
zeta_optimal = res.x
# self.expected_error = self.objective_func(res.x)
self.expected_error = self.sigma_square + self.p * ((self.n / self.p) * (
np.linalg.norm(np.multiply(self.beta_star, 1 - zeta_optimal)) ** 2) + self.sigma_square / self.p * np.linalg.norm(zeta_optimal) ** 2) / (
self.n - np.linalg.norm(zeta_optimal) ** 2)
lambda_vector = np.zeros(shape=(self.p,))
for i in range(self.p):
ksi = 1
# lambda_vector[i] = res.x[i] / (ksi * (1 - res.x[i]))
lambda_vector[i] = res.x[i] / (ksi * (1 - res.x[i]))
lambda_sqrt_vector = np.sqrt(lambda_vector)
lambda_matrix = np.diag(lambda_sqrt_vector)
'''
Check risk
'''
ksi = self.ksi_solver(self.n, self.p, lambda_vector, 0, 10, 100)
gamma = (sigma_square / self.p + np.mean(self.beta ** 2 * lambda_vector / (1 + ksi * lambda_vector) ** 2)) \
/ (np.mean(1 / (1 + 1 / (ksi * lambda_vector)) ** 2) * self.p / self.n)
risk_test_1 = np.mean(lambda_vector * self.beta ** 2 / (1 + ksi * lambda_vector) ** 2)
risk_test_2 = self.p / self.n * gamma * np.mean((1 + 1 / (lambda_vector)) ** (-2))
risk_test = risk_test_1 + risk_test_2
risk_test *= self.p
risk_test += sigma_square
self.risk_test = risk_test
## No-Shaping Theoretical
lambda_vector = np.ones(shape=(self.p,))
ksi = self.ksi_solver(self.n, self.p, lambda_vector, 0, 10, 100)
gamma = (sigma_square / self.p + np.mean(self.beta ** 2 * lambda_vector / (1 + ksi * lambda_vector) ** 2)) \
/ (np.mean(1 / (1 + 1 / (ksi * lambda_vector)) ** 2) * self.p / self.n)
risk_test_1 = np.mean(lambda_vector * self.beta ** 2 / (1 + ksi * lambda_vector) ** 2)
risk_test_2 = self.p / self.n * gamma * np.mean((1 + 1 / (lambda_vector)) ** (-2))
risk_test = risk_test_1 + risk_test_2
risk_test *= self.p
risk_test += sigma_square
self.risk_test_no_shaping = risk_test
## End of No-Shaping Theoretical
return lambda_matrix # Returns the optimal shaping matrix
p,n,nb_iteration,ksi,sigma_square,epsilon,n_test = 100,40,3,1,0.01,0.0000001,2000 # p = feature dimension, n=number of training samples for the new task, ksi=1(it can be randomly chose), sigma_square=noise variance,
# epsilon=tolerance in the optimization,n_test = number of test samples for the new task
Constant,s = 25, int(2*p/10) # Constant=big eigenvalues; s = effective rank
iota = 0.2
B = np.diag(np.concatenate((1 * np.ones(shape=(s,)), # Covariance of task vectors
iota * np.ones(shape=(p-s,))), axis=0))
beta_star = np.sqrt(B).dot(np.random.normal(loc=0, scale=1,size=(p,))) # New task vector
n_truth = n
X = np.random.normal(loc=0, scale=1, size=(n,p))
y = X.dot(beta_star) + np.random.normal(loc=0,scale=np.sqrt(sigma_square),size=(n,))
X_test = np.random.normal(loc=0, scale=1, size=(n_test,p))
y_test = X_test.dot(beta_star) + np.random.normal(loc=0,scale=np.sqrt(sigma_square),size=(n_test,))
ns = [100,200,500,1000,10000] #number of samples per task
k = 500 # number of tasks
rs = [int(n+4*i) for i in range(0,int((p-n+4)/4))] # Subspace dimensions
ensure_level = 1 # Level of doing same experiments over and over again and taking average
error, error_opt, error_opt_B_sqrt = np.zeros(shape=(len(ns),len(rs))), np.zeros(shape=(len(ns),len(rs))), np.zeros(shape=(len(ns),len(rs))) # Errors for the 3 cases : 1sr no shaping,2nd shaping with knowledge of \beta^*, 3rd shaping with knowledge of B matrix
error_theoretic, error_B_sqrt_theoretic = np.zeros(shape=(len(ns),len(rs))), np.zeros(shape=(len(ns),len(rs))) # theoretical values for the error (we will not use them in the paper)
for _ in range(ensure_level):
# beta_star = np.sqrt(B).dot(np.random.normal(loc=0, scale=1,size=(p,))) # New task vector
# X = np.random.normal(loc=0, scale=1, size=(n_truth,p))
# y = X.dot(beta_star) + np.random.normal(loc=0,scale=np.sqrt(sigma_square),size=(n_truth,))
# X_test = np.random.normal(loc=0, scale=1, size=(n_test,p))
# y_test = X_test.dot(beta_star) + np.random.normal(loc=0,scale=np.sqrt(sigma_square),size=(n_test,))
for z in range(len(ns)):
nt = ns[z] #number of samples per task
X_meta = np.random.normal(loc=0,scale=1,size=(k, nt, p)) # meta-train data k=numberoftask,p is the dimension
beta_meta = np.random.normal(loc=0,scale=1,size=(k,p)) # taks vectors
y_meta = np.ones(shape=(k,nt))
for j in range(k):
beta_meta[j,:] = np.sqrt(B).dot(beta_meta[j,:].T) # shaping task vectors
for j in range(k):
for i in range(nt):
y_meta[j,i] = X_meta[j,i,:].dot(beta_meta[j,:].T) + np.sqrt(sigma_square)*np.random.normal(loc=0,scale=1) #label generation
# MOM
B_hat = np.zeros(shape=(p,p))
for j in range(k):
avg = np.zeros(shape=(p,))
for i in range(nt): # Method of Moment estimator as described
avg += y_meta[j,i]*X_meta[j,i,:]/(nt)
B_hat += np.outer(avg,avg)
# PSD Projection of B matrix
B_averaged = B_hat/(k)
print(nt)
print(B)
print(B_averaged)
# e_values,e_vectors = npl.eig(B_averaged)
# e_values_new = np.maximum(e_values,0)
# B_averaged = e_vectors.dot(np.diag(e_values_new).dot(npl.inv(e_vectors)))
diagonal = np.diag(B_averaged)
beta_B_sqrt = np.sqrt(diagonal) # Getting estimated beta vector as described in the paper
S = np.linalg.svd(B_averaged)[0]
for i in range(len(rs)):
r = rs[i] # Subspace dimension
X_r = X.dot(S[:, :r]) # Projection of the data onto the subspace
beta_hat = np.linalg.lstsq(X_r, y, rcond=None)[0]
error_cur = (np.linalg.norm(X_test.dot(S[:, :r]).dot(beta_hat) - y_test)) ** 2 / n_test
error_cur_n = (np.linalg.norm(X_test.dot(S[:, :r]).dot(beta_hat) - y_test)) ** 2 / (np.linalg.norm(y_test) ** 2) # test Error with no shaping
error[z,i] += error_cur_n
zeta = ((n / r)) * np.ones(shape=(r,))
diagonal = np.diag(B_averaged)
beta_B_sqrt = np.sqrt(diagonal)
sigma_square_r = sigma_square + np.linalg.norm(beta_star.T.dot(S[:, r:])) ** 2
error_theoretic_cur = sigma_square_r + r * ((n / r) * (
np.linalg.norm(np.multiply(beta_star.T.dot(S[:, :r]), 1 - zeta)) ** 2) + sigma_square_r / r * np.linalg.norm(zeta) ** 2) / (
n - np.linalg.norm(zeta) ** 2)
error_theoretic_cur_n = error_theoretic_cur / (np.trace(B)+sigma_square)
error_theoretic[z,i] += error_theoretic_cur_n
A = optimal_representation_matrix(beta_B_sqrt.T.dot(S[:, :r]), r, n,
sigma_square + np.linalg.norm(beta_star.T.dot(S[:, r:])) ** 2,
epsilon,beta_star.T.dot(S[:, :r])) # finding optimal shaping matrix
lambda_mat = A.lambdas() # optimal shaping matrix
X_r_opt = X_r.dot(lambda_mat) # data after shaping
beta_hat = (np.linalg.pinv(X_r.dot(lambda_mat))).dot(y)
error_opt_B_sqrt[z,i] += (np.linalg.norm(X_test.dot(S[:, :r]).dot(lambda_mat).dot(beta_hat) - y_test)) ** 2 / (
np.linalg.norm(y_test) ** 2) # test error with shaping
error_B_sqrt_theoretic[z,i] += A.expected_error / (np.trace(B)+sigma_square)
error, error_theoretic, error_opt_B_sqrt, error_B_sqrt_theoretic = error / ensure_level, error_theoretic / ensure_level, error_opt_B_sqrt / ensure_level, error_B_sqrt_theoretic / ensure_level # Averaging over ensure_level
# Doing same projection and shaping, then calculating the error. But for the perfect covariance knowledge B. So, no use of MoM
perfect_subspace_error = np.zeros(shape=(len(rs),))
perfect_subspace_error_theoric = np.zeros(shape=(len(rs),))
ps_error, ps_error_opt, ps_error_opt_B_sqrt = np.zeros(shape=(len(rs),)), np.zeros(shape=(len(rs),)), np.zeros(shape=(len(rs),)) # Errors for the 3 cases : 1sr no shaping,2nd shaping with knowledge of \beta^*, 3rd shaping with knowledge of B matrix
ps_error_theoretic, ps_error_B_sqrt_theoretic = np.zeros(shape=(len(rs),)), np.zeros(shape=(len(rs),)) # theoretical values for the error (we will not use them in the paper)
for _ in range(ensure_level):
# beta_star = np.sqrt(B).dot(np.random.normal(loc=0, scale=1,size=(p,))) # New task vector
# X = np.random.normal(loc=0, scale=1, size=(n_truth,p))
# y = X.dot(beta_star) + np.random.normal(loc=0,scale=np.sqrt(sigma_square),size=(n_truth,))
# X_test = np.random.normal(loc=0, scale=1, size=(n_test,p))
# y_test = X_test.dot(beta_star) + np.random.normal(loc=0,scale=np.sqrt(sigma_square),size=(n_test,))
S = np.linalg.svd(B)[0]
for i in range(len(rs)):
r = rs[i] # Subspace dimension
X_r = X.dot(S[:, :r]) # Projection of the data onto the subspace
diagonal = np.diag(B)
beta_B_sqrt = np.sqrt(diagonal)
sigma_square_r = sigma_square + np.linalg.norm(beta_B_sqrt.T.dot(S[:, r:])) ** 2
error_theoretic_cur = sigma_square_r + r * ((n / r) * (
np.linalg.norm(np.multiply(beta_B_sqrt.T.dot(S[:, :r]), 1 - zeta)) ** 2) + sigma_square_r / r * np.linalg.norm(zeta) ** 2) / (
n - np.linalg.norm(zeta) ** 2)
error_theoretic_cur_n = error_theoretic_cur / (np.linalg.norm(y_test) ** 2 / n_test)
ps_error_theoretic[i] += error_theoretic_cur_n
A = optimal_representation_matrix(beta_B_sqrt.T.dot(S[:, :r]), r, n,
sigma_square + np.linalg.norm(beta_star.T.dot(S[:, r:])) ** 2,
epsilon,beta_star.T.dot(S[:, :r])) # finding optimal shaping matrix
lambda_mat = A.lambdas() # optimal shaping matrix
X_r_opt = X_r.dot(lambda_mat) # data after shaping
beta_hat = (np.linalg.pinv(X_r.dot(lambda_mat))).dot(y)
ps_error_opt_B_sqrt[i] += (np.linalg.norm(X_test.dot(S[:, :r]).dot(lambda_mat).dot(beta_hat) - y_test)) ** 2 / (
np.linalg.norm(y_test) ** 2) # test error with shaping
ps_error_B_sqrt_theoretic[i] += A.expected_error / (np.trace(B)+sigma_square)
perfect_subspace_error, perfect_subspace_error_theoric = perfect_subspace_error/ensure_level, perfect_subspace_error_theoric/ensure_level
ps_error, ps_error_theoretic, ps_error_opt_B_sqrt, ps_error_B_sqrt_theoretic = ps_error / ensure_level, ps_error_theoretic / ensure_level, ps_error_opt_B_sqrt / ensure_level, ps_error_B_sqrt_theoretic / ensure_level # Averaging over ensure_level
# PLOTTING FOR THE OVERPARAMETERIZED CASE
plt.plot(rs, error_B_sqrt_theoretic[0,:],color='b',marker='*')
plt.plot(rs, error_B_sqrt_theoretic[1,:],color='g',marker='o')
plt.plot(rs, error_B_sqrt_theoretic[2,:],color='r',marker='d')
plt.plot(rs, error_B_sqrt_theoretic[3,:],color='k',marker='v')
plt.plot(rs, error_B_sqrt_theoretic[4,:],color='b',marker='v')
plt.plot(rs, ps_error_B_sqrt_theoretic,color='y',marker='^')
plt.axis(ymin=0,ymax=2.5)
plt.xlabel('Representation Dimension', fontsize=20)
plt.ylabel('Few-Shot Test Error', fontsize=20)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.legend([r'$n_{spt}=$'+str(ns[0]), r'$n_{spt}=$'+str(ns[1]),r'$n_{spt}=$'+str(ns[2]),r'$n_{spt}=$'+str(ns[3]),'perfect covariance'],loc='lower left')
plt.grid(True)
plt.show()
def computeLeftError(bSi, B, sigma_square, p, n, R):
# bsi and B are vectors
gamma = p/n
theta = R/p
idx_trun = int(p * theta)
trun_covs = B * bSi
exp_term_1 = np.sum(trun_covs[idx_trun:])
err = (exp_term_1 + sigma_square) / (1- theta * gamma)
return err
def computeLeftError2(bSi, B, S_hat, sigma_square, p, n, R):
# bsi and B are vectors
# B is the true B, not send B_average here
# make it back to matrix
Canonical = np.diag(bSi*B)
# S_hat is p*R
# does this return eigenspace? I'm assuming it's p*R
U = S_hat
Residual = Canonical - U.dot(U.T).dot(Canonical) - Canonical.dot(U).dot(U.T) + U.dot(U.T).dot(Canonical).dot(U).dot(U.T)
gamma = p/n
theta = R/p
exp_term1 = np.trace(Residual)
err = (exp_term1 + sigma_square) / (1- theta * gamma)
return err
##UNDERPARAMETERIZED REGIME FOR ESTIMATED COVARIANCES
# Here we are doing same thing but for the underparameterized case. Namely r<n_fs. So, when we are solving the problem we are doing classical linear regression
# ensure_level = 3
rs1 = [int(4 * i) for i in range(1, int(n / 4))]
all_errors1 = np.zeros(shape=(len(ns), len(rs1)))
error_left_side_theoric = np.zeros(shape=(len(ns), len(rs1)))
error_left_side_experimental = np.zeros(shape=(len(ns), len(rs1)))
for _ in range(ensure_level):
beta_star = np.sqrt(B).dot(np.random.normal(loc=0, scale=1, size=(p,))) # New task vector
X = np.random.normal(loc=0, scale=1, size=(n_truth, p))
y = X.dot(beta_star) + np.random.normal(loc=0, scale=np.sqrt(sigma_square), size=(n_truth,))
X_test = np.random.normal(loc=0, scale=1, size=(n_test, p))
y_test = X_test.dot(beta_star) + np.random.normal(loc=0, scale=np.sqrt(sigma_square), size=(n_test,))
for z in range(len(ns)):
nt = ns[z]
X_meta = np.random.normal(loc=0, scale=1, size=(k, nt, p))
beta_meta = np.random.normal(loc=0, scale=1, size=(k, p))
y_meta = np.ones(shape=(k, nt))
for j in range(k):
beta_meta[j, :] = np.sqrt(B).dot(beta_meta[j, :].T)
for j in range(k):
for i in range(nt):
y_meta[j, i] = X_meta[j, i, :].dot(beta_meta[j, :].T) + np.sqrt(sigma_square) * np.random.normal(loc=0, scale=1)
B_hat = np.zeros(shape=(p, p))
# An option for MoM
for j in range(k):
avg = np.zeros(shape=(p,))
for i in range(nt):
avg += y_meta[j, i] * X_meta[j, i, :] / (nt)
B_hat += np.outer(avg, avg)
B_averaged = B_hat / (k)
# e_values,e_vectors = npl.eig(B_averaged)
# e_values_new = np.maximum(e_values,0)
# B_averaged = e_vectors.dot(np.diag(e_values_new).dot(npl.inv(e_vectors)))
diagonal = np.diag(B_averaged)
beta_B_sqrt = np.sqrt(diagonal)
B_averaged = np.real(B_averaged)
S_hat = np.linalg.svd(B_averaged)[0]
for i in range(len(rs1)):
r = rs1[i]
X_r = X.dot(S_hat[:, :r])
err_left_test_cur = computeLeftError2(np.ones((p,)), np.diag(B), S_hat[:, :r], sigma_square, p, n, r)
err_left_test_cur /= (np.trace(B) + sigma_square)
error_left_side_theoric[z, i] += err_left_test_cur
reg1 = LinearRegression().fit(X_r, y)
error_left_side_experimental[z,i] += (np.linalg.norm(y_test - reg1.predict(X_test.dot(S_hat[:, :r])))) ** 2 / (
np.linalg.norm(y_test) ** 2) # test error for underparameterized case
error_left_side_experimental /= ensure_level
error_left_side_theoric /= ensure_level
##UNDERPARAMETERIZED REGIME for PERFECT COVARİANCE
# Here we are doing same thing but for the underparameterized case. Namely r<n_fs. So, when we are solving the problem we are doing classical linear regression
# ensure_level = 5
rs1 = [int(4*i) for i in range(1,int(n/4))]
all_errors1 = np.zeros(shape=(len(ns), len(rs1)))
error_left_side_theoric_perfect_subpsace = np.zeros(shape=(len(rs1),))
error_left_side_experimental_perfect_subpsace = np.zeros(shape=(len(rs1),))
for _ in range(ensure_level):
beta_star = np.sqrt(B).dot(np.random.normal(loc=0, scale=1,size=(p,))) # New task vector
X = np.random.normal(loc=0, scale=1, size=(n_truth,p))
y = X.dot(beta_star) + np.random.normal(loc=0,scale=np.sqrt(sigma_square),size=(n_truth,))
X_test = np.random.normal(loc=0, scale=1, size=(n_test,p))
y_test = X_test.dot(beta_star) + np.random.normal(loc=0,scale=np.sqrt(sigma_square),size=(n_test,))
S_hat = np.linalg.svd(B)[0]
for i in range(len(rs1)):
r = rs1[i]
X_r = X.dot(S_hat[:,:r])
err_left_test_cur = computeLeftError2(np.ones((p,)), np.diag(B), S_hat[:,:r], sigma_square, p, n, r)
err_left_test_cur /= (np.trace(B)+sigma_square)
error_left_side_theoric_perfect_subpsace[i] += err_left_test_cur
reg1 = LinearRegression().fit(X_r, y)
error_left_side_experimental_perfect_subpsace[i] += (np.linalg.norm(y_test - reg1.predict(X_test.dot(S_hat[:, :r])))) ** 2 / (
np.linalg.norm(y_test) ** 2) # test error for underparameterized case
all_errors1 = all_errors1/ensure_level
error_left_side_theoric_perfect_subpsace /= ensure_level
error_left_side_experimental_perfect_subpsace /= ensure_level
# PLOTTING FOR THE UNDERPARAMETERIZED CASE
plt.plot(rs1, error_left_side_theoric[0,:],color='b',marker='*')
plt.plot(rs1, error_left_side_theoric[1,:],color='g',marker='o')
plt.plot(rs1, error_left_side_theoric[2,:],color='r',marker='d')
plt.plot(rs1, error_left_side_theoric[3,:],color='k',marker='v')
plt.plot(rs1, error_left_side_theoric_perfect_subpsace,color='y',marker='^')
print(error_left_side_theoric_perfect_subpsace)
plt.axis(ymin=0,ymax=2)
plt.xlabel('Representation Dimension', fontsize=20)
plt.ylabel('Few-Shot Test Error', fontsize=20)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.grid(True)
plt.show()
plt.plot(np.concatenate((rs1,rs),axis=0),np.concatenate((error_left_side_theoric[0,:],error_B_sqrt_theoretic[0,:]),axis=0),color='b',marker='*')
plt.plot(np.concatenate((rs1,rs),axis=0),np.concatenate((error_left_side_theoric[1,:],error_B_sqrt_theoretic[1,:]),axis=0),color='g',marker='o')
plt.plot(np.concatenate((rs1,rs),axis=0),np.concatenate((error_left_side_theoric[2,:],error_B_sqrt_theoretic[2,:]),axis=0),color='r',marker='d')
plt.plot(np.concatenate((rs1,rs),axis=0),np.concatenate((error_left_side_theoric[3,:],error_B_sqrt_theoretic[3,:]),axis=0),color='k',marker='v')
plt.plot(np.concatenate((rs1,rs),axis=0),np.concatenate((error_left_side_theoric[4,:],error_B_sqrt_theoretic[4,:]),axis=0),color='m',marker='v')
plt.plot(np.concatenate((rs1,rs),axis=0),np.concatenate((error_left_side_theoric_perfect_subpsace, ps_error_B_sqrt_theoretic),axis=0),color='y',marker='^')
plt.axis(ymin=0,ymax=2)
plt.xlabel('Representation Dimension', fontsize=20)
plt.ylabel('Few-Shot Test Error', fontsize=20)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
# plt.legend([r'$n_{spt}=$'+str(ns[0]), r'$n_{spt}=$'+str(ns[1]),r'$n_{spt}=$'+str(ns[2]),r'$n_{spt}=$'+str(ns[3]),r'$n_{spt}=$'+str(ns[4]),'perfect covariance'],loc='lower left')
plt.grid(True)
plt.show()
x1 = np.concatenate((rs1,rs),axis=0)
x2,x3,x4,x5,x6 = x1,x1,x1,x1,x1
y1 = np.concatenate((error_left_side_theoric[0,:],error_B_sqrt_theoretic[0,:]),axis=0)
y2 = np.concatenate((error_left_side_theoric[1,:],error_B_sqrt_theoretic[1,:]),axis=0)
y3 = np.concatenate((error_left_side_theoric[2,:],error_B_sqrt_theoretic[2,:]),axis=0)
y4 = np.concatenate((error_left_side_theoric[3,:],error_B_sqrt_theoretic[3,:]),axis=0)
y5 = np.concatenate((error_left_side_theoric[4,:],error_B_sqrt_theoretic[4,:]),axis=0)
y6 = np.concatenate((error_left_side_theoric_perfect_subpsace, ps_error_B_sqrt_theoretic),axis=0)
plt.plot(x1,y1,color='b',linewidth=3)
plt.plot(x2,y2,color='g',linewidth=3)
#plt.plot(x3,y3,color='y')
plt.plot(x4,y4,color='k',linewidth=3)
#plt.plot(x5,y5,color='m')
plt.plot(x6,y6,color='r',linewidth=3)
plt.axis(xmin=40,ymin=0,ymax=2)
plt.xlabel('Representation Dimension', fontsize=20)
plt.ylabel('Few-Shot Test Error', fontsize=20)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.legend([r'$n_1=$'+str(ns[0]),r'$n_1=$'+str(ns[1]),r'$n_1=$'+str(ns[3]),'perfect covariance'],loc='upper right',fontsize=12)
plt.tight_layout()
plt.grid(True)
| 22,568 | 50.645309 | 261 |
py
|
Rep-Learning
|
Rep-Learning-main/nips_supp/mom_iota.py
|
from scipy.io import loadmat
import numpy as np
# import torch
# import torch.nn as nn
# import torch.optim as optim
# from torchvision import models
# import torch.utils.data
# from torch.utils import data
# from torch.utils.data import DataLoader, TensorDataset
import scipy
from scipy.optimize import minimize
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import shelve
N = 10
pf = .3
pt = .2
rf = int(N*pf)
rt = int(N*pt)
iota = 0.01*np.arange(101)
err = np.zeros((101,))
for idx in range(101):
i = iota[idx]
bSi = np.concatenate( (np.ones((rf,)), i * np.ones((N-rf,))) )
B = np.concatenate( (np.ones((rt,)), i * np.ones((N-rt,))) )
r1 = np.sum(B*bSi)
sf = np.sum(bSi)
st = np.sum(B)
err[idx] = np.sqrt(sf)*r1 + np.sqrt(st)
#save
plt.plot(iota,err/np.max(err),'b',linewidth = 2)
plt.xlabel(r'$\iota$',fontsize=25)
plt.ylabel(r'$||\hat\mathbf{M} - \mathbf{M}||$',fontsize=25)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.tight_layout()
plt.grid(True)
#plt.savefig('align_err_YS2.pdf')
plt.savefig('align_err_YS2.eps', format='eps')
plt.show()
| 1,116 | 23.822222 | 66 |
py
|
Rep-Learning
|
Rep-Learning-main/nips_supp/SVM_MNIST.py
|
import numpy as np
import numpy.linalg as npl
import matplotlib.pyplot as plt
from keras.datasets import mnist
import cvxpy as cp
(train_X, train_y), (test_X, test_y) = mnist.load_data()
train_X=train_X.astype(float)
test_X=test_X.astype(float)
for i in range(len(train_X)):
train_X[i]-=np.mean(train_X[i])
train_X[i]/=np.std(train_X[i])
for i in range(len(test_X)):
test_X[i]-=np.mean(test_X[i])
test_X[i]/=np.std(test_X[i])
trX=train_X.reshape([60000,784])
tsX=test_X.reshape([10000,784])
i1=0
i2=2
MAX=10
NUM=int(MAX*(MAX-1)/2)
ctr=0
BT=np.zeros((NUM,784))
BTC=np.zeros((NUM,784))
COV=np.zeros((NUM,784,784))
batch_sz = 800
for i1 in np.arange(MAX):
for i2 in np.arange(i1+1,MAX):
print(ctr)
ly1=train_y==i1
ly2=train_y==i2
lt1=test_y==i1
lt2=test_y==i2
tr1=trX[ly1]+0.
tr2=trX[ly2]+0.
ts1=tsX[lt1]+0.
ts2=tsX[lt2]+0.
y=-np.ones((np.sum(ly1)+np.sum(ly2>0)))
y[:np.sum(ly1)]=1
yt=-np.ones((np.sum(lt1)+np.sum(lt2>0)))
yt[:np.sum(lt1)]=1
ts12=np.concatenate([ts1,ts2])
tr12=np.concatenate([tr1,tr2])
batch = np.random.choice(tr12.shape[0],batch_sz)
print("batch")
print(batch[:10])
tr_b = tr12[batch,:]
y_b = y[batch]
w = cp.Variable(784)
objective = cp.Minimize(cp.sum_squares(w))
constraints = [1 <= cp.multiply(y_b, cp.matmul(tr_b,w))]
#objective = cp.Minimize(cp.sum_squares(w) + 0.1 * cp.sum(cp.pos(1 - cp.multiply(y_b, cp.matmul(tr_b,w)))))
#constraints = []
prob = cp.Problem(objective, constraints)
#result = prob.solve()
result = prob.solve(solver=cp.CVXOPT,abstol = 1e-4)
#result = prob.solve(solver=cp.CBC,maximumSeconds = 10)
bt = w.value
#print(cp.matmul(tr_b,w.value).shape)
#print(y_b.shape)
#print(cp.multiply(y_b, cp.matmul(tr_b,w.value)).value)
#ytest = y*cp.matmul(tr12,w)
#print(ytest.shape)
#bt = np.mean(ts1, axis=0) - np.mean(ts2, axis=0)
#bt=npl.pinv(tr12).dot(y)
COV12=ts12.T.dot(ts12)/len(ts12)
COV[ctr]=COV12
V,EIG,_=npl.svd(COV12)
SQ=V.dot(np.diag(np.sqrt(EIG)).dot(V.T))
BT[ctr]=bt
btc=SQ.dot(bt)
BTC[ctr]=btc
ctr+=1
np.save('BT',BT)
np.save('COV',COV)
for i in range(NUM):
BT[i,:]/=npl.norm(BT[i,:])
BTC[i,:]/=npl.norm(BTC[i,:])
COV_BT=BTC.T.dot(BTC)/NUM
COV_F=np.sum(COV,0)/NUM
EIG_BT=npl.eig(COV_BT)[0]
EIG_F=npl.eig(COV_F)[0]
V,EIG_BT,_=npl.svd(COV_BT)
SQ=V.dot(np.diag(np.sqrt(EIG_BT)).dot(V.T))
PROD=SQ.dot(COV_F).dot(SQ.T)
ID=np.identity(784)
PRODID=SQ.dot(ID).dot(SQ.T)
print('Identity alignment',np.sum(np.real(npl.eig(PRODID)[0]))/npl.norm(EIG_BT)/np.sqrt(784))
print('Canonical alignment',np.sum(np.real(npl.eig(PROD)[0]))/npl.norm(EIG_F)/npl.norm(EIG_BT))
COV_BT=BT.T.dot(BT)/NUM
COV_F=np.sum(COV,0)/NUM
EIG_BT=npl.eig(COV_BT)[0]
EIG_F=npl.eig(COV_F)[0]
V,EIG_BT,_=npl.svd(COV_BT)
SQ=V.dot(np.diag(np.sqrt(EIG_BT)).dot(V.T))
PROD=SQ.dot(COV_F).dot(SQ.T)
ID=np.identity(784)
PRODID=SQ.dot(ID).dot(SQ.T)
print('Identity alignment',np.sum(np.real(npl.eig(PRODID)[0]))/npl.norm(EIG_BT)/np.sqrt(784))
print('Beta alignment',np.sum(np.real(npl.eig(PROD)[0]))/npl.norm(EIG_F)/npl.norm(EIG_BT))
| 3,339 | 28.298246 | 115 |
py
|
Rep-Learning
|
Rep-Learning-main/nips_supp/Optimal_weighting.py
|
from scipy.io import loadmat
import numpy as np
import scipy
from scipy.optimize import minimize
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
def computeLeftError(bSi, B, sigma_square, p, n, R):
# bsi and B are vectors
gamma = p/n
theta = R/p
idx_trun = int(p * theta)
trun_covs = B * bSi
exp_term_1 = np.sum(trun_covs[idx_trun:])
err = (exp_term_1 + sigma_square) / (1- theta * gamma)
return err
def computeLeftError2(bSi, B, S_hat, sigma_square, p, n, R):
# bsi and B are vectors
# B is the true B, not send B_average here
# make it back to matrix
Canonical = np.diag(bSi*B)
U = S_hat
Residual = Canonical - U.dot(U.T).dot(Canonical) - Canonical.dot(U).dot(U.T) + U.dot(U.T).dot(Canonical).dot(U).dot(U.T)
gamma = p/n
theta = R/p
exp_term1 = np.trace(Residual)
err = (exp_term1 + sigma_square) / (1- theta * gamma)
return err
class optimal_representation_matrix:
def __init__(self, beta, p, n, sigma_square, epsilon):
self.beta = beta # beta vector in the non-asymptotic optimal representation part
self.p = p # p is dimension of the features
self.n = n # Number of training samples
self.sigma_square = sigma_square # Noise variance
self.epsilon = epsilon # tiny parameter to prevent divergence of the optimization algorithm (actually not necessary but provides early stopping until some tolerance in the optimization algo.)
self.expected_error = 0
self.risk_test = 0
self.risk_test_no_shaping = 0
def objective_func(self, zeta): # Objective func to minimize, zeta refers to theta in the paper
return self.sigma_square + self.p * ((self.n / self.p) * (
np.linalg.norm(np.multiply(self.beta, 1 - zeta)) ** 2) + self.sigma_square / self.p * np.linalg.norm(
zeta) ** 2) / (
n - np.linalg.norm(zeta) ** 2)
'''
Compute ksi by binary search
'''
def func_ksi(self, B, ksi):
ksi_B = ksi * B
ksi_B_frac = ksi_B / (1 + ksi_B)
return np.sum(ksi_B_frac)
def ksi_solver(self, n, p, B, lower_bd, upper_bd, ksi_iters):
left = lower_bd
right = upper_bd
while self.func_ksi(B, right) < n:
left = right
right *= 2
T = ksi_iters
for i in range(T):
mid = (left + right) / 2
n_mid = self.func_ksi(B, mid)
if n_mid > n:
right = mid
else:
left = mid
return mid
'''
Above: Compute ksi by binary search
'''
def lambdas(self):
zeta = np.random.uniform(0, 1, size=(self.p,))
cons = ({'type': 'eq',
'fun': lambda x: np.sum(x) - self.n})
res = minimize(self.objective_func, zeta, constraints=cons,
bounds=[(0, 1 - self.epsilon) for _ in range(self.p)],
# Optimization algorithm over theta in the paper (we call it zeta here)
method='trust-constr', options={'disp': True, 'maxiter': 3000})
self.expected_error = self.objective_func(res.x)
lambda_vector = np.zeros(shape=(self.p,))
for i in range(self.p):
ksi = 1
# lambda_vector[i] = res.x[i] / (ksi * (1 - res.x[i]))
lambda_vector[i] = res.x[i] / (ksi * (1 - res.x[i]))
lambda_sqrt_vector = np.sqrt(lambda_vector)
lambda_matrix = np.diag(lambda_sqrt_vector)
'''
Check risk
'''
ksi = self.ksi_solver(self.n, self.p, lambda_vector, 0, 10, 100)
gamma = (sigma_square / self.p + np.mean(self.beta ** 2 * lambda_vector / (1 + ksi * lambda_vector) ** 2)) \
/ (np.mean(1 / (1 + 1 / (ksi * lambda_vector)) ** 2) * self.p / self.n)
risk_test_1 = np.mean(lambda_vector * self.beta ** 2 / (1 + ksi * lambda_vector) ** 2)
risk_test_2 = self.p / self.n * gamma * np.mean((1 + 1 / (lambda_vector)) ** (-2))
risk_test = risk_test_1 + risk_test_2
risk_test *= self.p
risk_test += sigma_square
self.risk_test = risk_test
## No-Shaping Theoretical
lambda_vector = np.ones(shape=(self.p,))
ksi = self.ksi_solver(self.n, self.p, lambda_vector, 0, 10, 100)
gamma = (sigma_square / self.p + np.mean(self.beta ** 2 * lambda_vector / (1 + ksi * lambda_vector) ** 2)) \
/ (np.mean(1 / (1 + 1 / (ksi * lambda_vector)) ** 2) * self.p / self.n)
risk_test_1 = np.mean(lambda_vector * self.beta ** 2 / (1 + ksi * lambda_vector) ** 2)
risk_test_2 = self.p / self.n * gamma * np.mean((1 + 1 / (lambda_vector)) ** (-2))
risk_test = risk_test_1 + risk_test_2
risk_test *= self.p
risk_test += sigma_square
self.risk_test_no_shaping = risk_test
## End of No-Shaping Theoretical
return lambda_matrix # Returns the optimal shaping matrix
K = 1
p, n, nb_iteration, ksi, sigma_square, epsilon, n_test = 100, 40, 3, 1, 0.05, 0.0001, 2000 # p = feature dimension, n=number of training samples for the new task, ksi=1(it can be randomly chose), sigma_square=noise variance,
# epsilon=tolerance in the optimization,n_test = number of test samples for the new task
Constant, s = 25, int(2 * p / 10) # Constant=big eigenvalues; s = effective rank
iota = 0.1
B = np.diag(np.concatenate((1 * np.ones(shape=(s,)), # Covariance of task vectors
iota * np.ones(shape=(p-s,))), axis=0))
rs = [int(n + 4 * i) for i in
range(int((p - n + 4) / 4))] # Subspace representation dimension for overparameterized case
error, error_opt, error_opt_B_sqrt = np.zeros(shape=(len(rs),)), np.zeros(shape=(len(rs),)), np.zeros(shape=(len(
rs),)) # Errors for the 3 cases : 1sr no shaping,2nd shaping with knowledge of \beta^*, 3rd shaping with knowledge of B matrix
error_theoretic, error_B_sqrt_theoretic = np.zeros(shape=(len(rs),)), np.zeros(
shape=(len(rs),)) # theoretical values for the error (we will not use them in the paper)
ensure_level = 5
for _ in range(ensure_level):
beta = np.sqrt(B).dot(np.random.normal(loc=0, scale=1, size=(p,))) # task vector generation
X = np.random.normal(loc=0, scale=1, size=(n, p)) # Data samples generation
y = X.dot(beta) + np.random.normal(loc=0, scale=np.sqrt(sigma_square), size=(n,)) # Label generation
X_test = np.random.normal(loc=0, scale=1, size=(n_test, p)) # Data samples generation
y_test = X_test.dot(beta) + np.random.normal(loc=0, scale=np.sqrt(sigma_square), size=(n_test,)) # Label generation
diagonal = np.diag(B)
beta_B_sqrt = np.sqrt(
diagonal) # we will do the shaping as if the task vector is square roots of the diagonal values of the B matrix
S = np.linalg.svd(B)[0]
# beta = np.sqrt(B)
# for i in range(len(rs) - 1, len(rs)):
for i in range(len(rs)):
r = rs[len(rs) - 1 - i] # Subspace dimension
X_r = X.dot(S[:, :r]) # Subspace projection of the data
diagonal = np.diag(B)
beta_B_sqrt = np.sqrt(diagonal)
# beta_hat = np.linalg.pinv(X_r).dot(y)
beta_hat = np.linalg.lstsq(X_r, y, rcond=None)[0]
error_cur = (np.linalg.norm(X_test.dot(S[:, :r]).dot(beta_hat) - y_test)) ** 2 / n_test
error_cur_n = (np.linalg.norm(X_test.dot(S[:, :r]).dot(beta_hat) - y_test)) ** 2 / (
np.linalg.norm(y_test) ** 2) # test Error with no shaping
error[i] += error_cur_n
zeta = ((n / r)) * np.ones(shape=(r,))
diagonal = np.diag(B)
beta_B_sqrt = np.sqrt(diagonal)
sigma_square_r = sigma_square + np.linalg.norm(beta_B_sqrt.T.dot(S[:, r:])) ** 2
error_theoretic_cur = sigma_square_r + r * ((n / r) * (
np.linalg.norm(np.multiply(beta_B_sqrt.T.dot(S[:, :r]), 1 - zeta)) ** 2) + sigma_square_r / r * np.linalg.norm(zeta) ** 2) / (
n - np.linalg.norm(zeta) ** 2)
error_theoretic_cur_n = error_theoretic_cur / (np.linalg.norm(y_test) ** 2 / n_test)
error_theoretic[i] += error_theoretic_cur_n
##error with shaping
#diagonal = np.diag(B)
#beta_B_sqrt = np.sqrt(diagonal)
A = optimal_representation_matrix(beta_B_sqrt.T.dot(S[:, :r]), r, n,
sigma_square + np.linalg.norm(beta_B_sqrt.T.dot(S[:, r:])) ** 2,
epsilon) # finding optimal shaping matrix
lambda_mat = A.lambdas() # optimal shaping matrix
# lambda_mat = lambda_mat.dot(lambda_mat).dot(lambda_mat).dot(lambda_mat).dot(lambda_mat).dot(lambda_mat)
# error_theoretic[i] += A.risk_test_no_shaping / (np.linalg.norm(y_test) ** 2 / n_test)
X_r_opt = X_r.dot(lambda_mat) # data after shaping
beta_hat = (np.linalg.pinv(X_r.dot(lambda_mat))).dot(y)
error_opt_B_sqrt[i] += (np.linalg.norm(X_test.dot(S[:, :r]).dot(lambda_mat).dot(beta_hat) - y_test)) ** 2 / (
np.linalg.norm(y_test) ** 2) # test error with shaping
error_B_sqrt_theoretic[i] += A.expected_error / (np.linalg.norm(y_test) ** 2 / n_test)
error, error_theoretic, error_opt_B_sqrt, error_B_sqrt_theoretic = error / ensure_level, error_theoretic / ensure_level, error_opt_B_sqrt / ensure_level, error_B_sqrt_theoretic / ensure_level # Averaging over ensure_level
## UNDERPARAMETERIZED CASE
# We are doing same thing here. Namely calculating error. But note that as we are in underparameterized case, shaping will be meaningless. Also we are solving problem using linear regression solver
rs1 = [int(2 * i) for i in range(2, int(n / 2))]
S = np.linalg.svd(B)[0]
error1 = np.zeros(shape=(len(rs1),)) # error for underparameterized case
error_intuitive_left_side = np.zeros(shape=(len(rs1),))
error_intuitive_left_side_2 = np.zeros(shape=(len(rs1),))
ensure_level = 80
for _ in range(ensure_level):
# for i in range(len(rs1) - 6, len(rs1)):
beta = np.sqrt(B).dot(np.random.normal(loc=0, scale=1, size=(p,)))
for i in range(len(rs1)):
X = np.random.normal(loc=0, scale=1, size=(n, p)) # data generation
y = X.dot(beta) + np.random.normal(loc=0, scale=np.sqrt(sigma_square), size=(n,)) # label generation
X_test = np.random.normal(loc=0, scale=1, size=(n_test, p)) # test data generation
y_test = X_test.dot(beta) + np.random.normal(loc=0, scale=np.sqrt(sigma_square),
size=(n_test,)) # test label generation
r = rs1[i]
X_r = X.dot(S[:, :r])
# lambda1 = np.diag(np.random.uniform(low=0.001,high=5,size=(r,)))
# X_r_1= X_r.dot(lambda1)
theta_for_left_side = np.concatenate(( np.ones(shape=(r,)), ((n-r)/(p-r)) * np.ones(shape=(p-r,)), # Covariance of task vectors
), axis=0)
error_intuitive_left_side[i] += ((n/p)*np.linalg.norm(beta*(1-theta_for_left_side))**2+sigma_square*np.linalg.norm(theta_for_left_side)**2)/((n-np.linalg.norm(theta_for_left_side)**2))
# err_left_test_cur = computeLeftError(np.ones((p,)), np.diag(B), sigma_square, p, n, r)
err_left_test_cur = computeLeftError2(np.ones((p,)), np.diag(B), S[:,:r],sigma_square, p, n, r)
err_left_test_cur /= (np.linalg.norm(y_test) ** 2 / n_test)
error_intuitive_left_side_2[i] += err_left_test_cur
reg1 = LinearRegression().fit(X_r, y)
error1[i] += (np.linalg.norm(y_test - reg1.predict(X_test.dot(S[:, :r])))) ** 2 / (
np.linalg.norm(y_test) ** 2) # test error for underparameterized case
error1 = error1 / ensure_level # averaging
error_intuitive_left_side = error_intuitive_left_side / ensure_level
error_intuitive_left_side_2 = error_intuitive_left_side_2 / ensure_level
# PLIOTTING EXPERİMENTAL ERRORS
plt.figure(figsize=(8, 6))
plt.plot(np.concatenate((rs1,rs),axis=0), np.concatenate((error1,np.flip(error)),axis=0),color='r',marker='o',linewidth=0)
plt.plot(rs, np.flip(error_opt_B_sqrt),color='b',marker='o',linewidth=0)
plt.plot(rs[1:], (np.flip(error_theoretic))[1:],color='r')
plt.plot(rs[1:], (np.flip(error_B_sqrt_theoretic))[1:],color='b')
plt.plot(rs1,error_intuitive_left_side_2,color='r')
plt.axis(ymin=0,ymax=2)
plt.xlabel('Representation Dimension', fontsize=20)
plt.ylabel('Few-Shot Test Error', fontsize=25)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.legend(['no shaping experimental','optimal shaping experimental','no shaping theoretical','optimal shaping theoretical'], loc='upper right',fontsize=14)
plt.tight_layout()
plt.grid(True)
plt.show()
| 12,764 | 47.721374 | 225 |
py
|
FORK
|
FORK-master/TD3-FORK/TD3_FORK.py
|
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, 256)
self.l5 = nn.Linear(256, 256)
self.l6 = nn.Linear(256, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
class Sys_R(nn.Module):
def __init__(self,state_dim, action_dim, fc1_units, fc2_units):
super(Sys_R, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(2 * state_dim + action_dim, fc1_units)
self.l2 = nn.Linear(fc1_units,fc2_units)
self.l3 = nn.Linear(fc2_units, 1)
def forward(self, state,next_state, action):
sa = torch.cat([state,next_state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
class SysModel(nn.Module):
def __init__(self, state_size, action_size, fc1_units, fc2_units):
super(SysModel, self).__init__()
self.l1 = nn.Linear(state_size + action_size, fc1_units)
self.l2 = nn.Linear(fc1_units, fc2_units)
self.l3 = nn.Linear(fc2_units, state_size)
def forward(self, state, action):
"""Build a system model to predict the next state at a given state."""
xa = torch.cat([state, action], 1)
x1 = F.relu(self.l1(xa))
x1 = F.relu(self.l2(x1))
x1 = self.l3(x1)
return x1
class TD3_FORK(object):
def __init__(
self,
env,
policy,
state_dim,
action_dim,
max_action,
sys1_units = 400,
sys2_units = 300,
r1_units = 256,
r2_units = 256,
discount=0.99,
tau=0.005,
policy_noise=0.2,
noise_clip=0.5,
policy_freq=2,
sys_weight = 0.5,
sys_weight2 = 0.4,
sys_threshold = 0.020,
):
self.env = env
self.policy = policy
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = copy.deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)
self.critic = Critic(state_dim, action_dim).to(device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4)
self.sysmodel = SysModel(state_dim, action_dim, sys1_units,sys2_units).to(device)
self.sysmodel_optimizer = torch.optim.Adam(self.sysmodel.parameters(), lr=3e-4)
self.sysmodel.apply(self.init_weights)
self.sysr = Sys_R(state_dim, action_dim, r1_units, r2_units).to(device)
self.sysr_optimizer = torch.optim.Adam(self.sysr.parameters(), lr=3e-4)
self.obs_upper_bound = float(self.env.observation_space.high[0]) #state space upper bound
self.obs_lower_bound = float(self.env.observation_space.low[0]) #state space lower bound
self.reward_lower_bound = 0
self.reward_upper_bound = 0
if self.obs_upper_bound == float('inf'):
self.obs_upper_bound,self.obs_lower_bound = 0,0
self.sysmodel_loss = 0
self.sysr_loss = 0
self.max_action = max_action
self.discount = discount
self.tau = tau
self.policy_noise = policy_noise
self.noise_clip = noise_clip
self.policy_freq = policy_freq
self.sys_weight = sys_weight
self.sys_weight2 = sys_weight2
self.sys_threshold = sys_threshold
self.total_it = 0
def init_weights(self,m):
if type(m) == nn.Linear:
torch.nn.init.xavier_uniform_(m.weight)
m.bias.data.fill_(0.001)
def select_action(self, state):
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
return self.actor(state).cpu().data.numpy().flatten()
def train(self, replay_buffer, batch_size=100,train_steps=1):
for _ in range(train_steps):
self.total_it += 1
# Sample replay buffer
state, action, next_state, reward, not_done = replay_buffer.sample(batch_size)
with torch.no_grad():
# Select action according to policy and add clipped noise
noise = (
torch.randn_like(action) * self.policy_noise
).clamp(-self.noise_clip, self.noise_clip)
next_action = (
self.actor_target(next_state) + noise
).clamp(-self.max_action, self.max_action)
# Compute the target Q value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + not_done * self.discount * target_Q
# Get current Q estimates
current_Q1, current_Q2 = self.critic(state, action)
# Compute critic loss
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
#Train system and reward model
predict_next_state = self.sysmodel(state, action)
predict_next_state = predict_next_state.clamp(self.obs_lower_bound,self.obs_upper_bound)
sysmodel_loss = F.smooth_l1_loss(predict_next_state, next_state.detach())
self.sysmodel_optimizer.zero_grad()
sysmodel_loss.backward()
self.sysmodel_optimizer.step()
self.sysmodel_loss = sysmodel_loss.item()
predict_reward = self.sysr(state,next_state,action)
sysr_loss = F.mse_loss(predict_reward, reward.detach())
self.sysr_optimizer.zero_grad()
sysr_loss.backward()
self.sysr_optimizer.step()
self.sysr_loss = sysr_loss.item()
s_flag = 1 if sysmodel_loss.item() < self.sys_threshold else 0
#Delayed policy updates
if self.total_it % self.policy_freq == 0:
#Compute actor losse
actor_loss1 = -self.critic.Q1(state, self.actor(state)).mean()
if s_flag == 1:
p_next_state = self.sysmodel(state, self.actor(state))
p_next_state = p_next_state.clamp(self.obs_lower_bound,self.obs_upper_bound)
actions2 = self.actor(p_next_state.detach())
if self.policy in ['TD3_FORK_Q','TD3_FORK_Q_F','TD3_FORK_DQ','TD3_FORK_DQ_F']:
actor_loss2 = self.critic.Q1(p_next_state.detach(),actions2)
if self.policy in ['TD3_FORK_DQ','TD3_FORK_DQ_F']:
p_next_state2 = self.sysmodel(p_next_state, self.actor(p_next_state.detach()))
p_next_state2 = p_next_state2.clamp(self.obs_lower_bound,self.obs_upper_bound)
actions3 = self.actor(p_next_state2.detach())
actor_loss22 = self.critic.Q1(p_next_state2.detach(),actions3)
actor_loss3 = - actor_loss2.mean() - self.sys_weight2 * actor_loss22.mean()
else:
actor_loss3 = - actor_loss2.mean()
elif self.policy in ['TD3_FORK_S','TD3_FORK_S_F','TD3_FORK','TD3_FORK_F']:
p_next_r = self.sysr(state,p_next_state.detach(),self.actor(state))
if self.policy in ['TD3_FORK_S','TD3_FORK_S_F']:
actor_loss2 = self.critic.Q1(p_next_state.detach(),actions2)
actor_loss3 = -(p_next_r + self.discount * actor_loss2).mean()
else:
p_next_state2 = self.sysmodel(p_next_state, self.actor(p_next_state.detach()))
p_next_state2 = p_next_state2.clamp(self.obs_lower_bound,self.obs_upper_bound)
p_next_r2 = self.sysr(p_next_state.detach(),p_next_state2.detach(),self.actor(p_next_state.detach()))
actions3 = self.actor(p_next_state2.detach())
actor_loss2 = self.critic.Q1(p_next_state2.detach(),actions3)
actor_loss3 = -(p_next_r + self.discount * p_next_r2 + self.discount ** 2 * actor_loss2).mean()
actor_loss = (actor_loss1 + self.sys_weight * actor_loss3)
self.update_sys += 1
else:
actor_loss = actor_loss1
# Optimize the actor
self.critic_optimizer.zero_grad()
self.sysmodel_optimizer.zero_grad()
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Update the frozen target models
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def save(self, filename):
torch.save(self.critic.state_dict(), filename + "_critic")
torch.save(self.critic_optimizer.state_dict(), filename + "_critic_optimizer")
torch.save(self.actor.state_dict(), filename + "_actor")
torch.save(self.actor_optimizer.state_dict(), filename + "_actor_optimizer")
torch.save(self.sysmodel.state_dict(), filename + "_sysmodel")
torch.save(self.sysmodel_optimizer.state_dict(), filename + "_sysmodel_optimizer")
torch.save(self.sysr.state_dict(), filename + "_reward_model")
torch.save(self.sysr_optimizer.state_dict(), filename + "_reward_model_optimizer")
def load(self, filename):
self.critic.load_state_dict(torch.load(filename + "_critic.pth"))
self.critic_optimizer.load_state_dict(torch.load(filename + "_critic_optimizer"))
self.critic_target = copy.deepcopy(self.critic)
self.actor.load_state_dict(torch.load(filename + "_actor.pth"))
self.actor_optimizer.load_state_dict(torch.load(filename + "_actor_optimizer"))
self.actor_target = copy.deepcopy(self.actor)
self.sysmodel.load_state_dict(torch.load(filename + "_sysmodel.pth"))
relf.sysmodel_optimizer.load_state_dict(torch.load(filename + "_sysmodel_optimizer"))
self.sysr.load_state_dict(torch.load(filename + "_reward_model.pth"))
relf.sysr_optimizer.load_state_dict(torch.load(filename + "_reward_model_optimizer"))
| 10,157 | 31.14557 | 108 |
py
|
FORK
|
FORK-master/TD3-FORK/utils.py
|
import numpy as np
import torch
import math
class ReplayBuffer(object):
def __init__(self, state_dim, action_dim, max_size=int(1e6)):
self.max_size = max_size
self.ptr = 0
self.size = 0
self.state = np.zeros((max_size, state_dim))
self.action = np.zeros((max_size, action_dim))
self.next_state = np.zeros((max_size, state_dim))
self.reward = np.zeros((max_size, 1))
self.not_done = np.zeros((max_size, 1))
self.welford_state_n = 1
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def normalize_state(self, states, update=True):
"""
Use Welford's algorithm to normalize a state, and optionally update the statistics
for normalizing states using the new state, online.
"""
states = torch.Tensor(states)
states2 = states.data.clone()
ii = 0
for state in states:
if self.welford_state_n == 1:
self.welford_state_mean = torch.zeros(state.size(-1))
self.welford_state_mean_diff = torch.ones(state.size(-1))
if update:
if len(state.size()) == 1: # if we get a single state vector
state_old = self.welford_state_mean
self.welford_state_mean += (state - state_old) / self.welford_state_n
self.welford_state_mean_diff += (state - state_old) * (state - state_old)
self.welford_state_n += 1
else:
raise RuntimeError # this really should not happen
states2[ii] = (state - self.welford_state_mean) / np.sqrt(self.welford_state_mean_diff / self.welford_state_n)
ii += 1
return states2
def add(self, state, action, next_state, reward, done):
self.state[self.ptr] = state
self.action[self.ptr] = action
self.next_state[self.ptr] = next_state
self.reward[self.ptr] = reward
self.not_done[self.ptr] = 1. - done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample(self, batch_size):
ind = np.random.randint(0,int(self.size), size=batch_size)
return (
torch.FloatTensor(self.state[ind]).to(self.device),
#torch.FloatTensor(self.normalize_state(self.state[ind])).to(self.device),
torch.FloatTensor(self.action[ind]).to(self.device),
torch.FloatTensor(self.next_state[ind]).to(self.device),
torch.FloatTensor(self.reward[ind]).to(self.device),
torch.FloatTensor(self.not_done[ind]).to(self.device)
)
def create_log_gaussian(mean, log_std, t):
quadratic = -((0.5 * (t - mean) / (log_std.exp())).pow(2))
l = mean.shape
log_z = log_std
z = l[-1] * math.log(2 * math.pi)
log_p = quadratic.sum(dim=-1) - log_z.sum(dim=-1) - 0.5 * z
return log_p
def logsumexp(inputs, dim=None, keepdim=False):
if dim is None:
inputs = inputs.view(-1)
dim = 0
s, _ = torch.max(inputs, dim=dim, keepdim=True)
outputs = s + (inputs - s).exp().sum(dim=dim, keepdim=True).log()
if not keepdim:
outputs = outputs.squeeze(dim)
return outputs
def soft_update(target, source, tau):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
def hard_update(target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
| 3,165 | 32.680851 | 113 |
py
|
FORK
|
FORK-master/TD3-FORK/TD3.py
|
import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Implementation of Twin Delayed Deep Deterministic Policy Gradients (TD3)
# Paper: https://arxiv.org/abs/1802.09477
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, max_action):
super(Actor, self).__init__()
self.l1 = nn.Linear(state_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, action_dim)
self.max_action = max_action
def forward(self, state):
a = F.relu(self.l1(state))
a = F.relu(self.l2(a))
return self.max_action * torch.tanh(self.l3(a))
class Critic(nn.Module):
def __init__(self, state_dim, action_dim):
super(Critic, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(state_dim + action_dim, 256)
self.l2 = nn.Linear(256, 256)
self.l3 = nn.Linear(256, 1)
# Q2 architecture
self.l4 = nn.Linear(state_dim + action_dim, 256)
self.l5 = nn.Linear(256, 256)
self.l6 = nn.Linear(256, 1)
def forward(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
q2 = F.relu(self.l4(sa))
q2 = F.relu(self.l5(q2))
q2 = self.l6(q2)
return q1, q2
def Q1(self, state, action):
sa = torch.cat([state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
class TD3(object):
def __init__(
self,
state_dim,
action_dim,
max_action,
discount=0.99,
tau=0.005,
policy_noise=0.2,
noise_clip=0.5,
policy_freq=2
):
self.actor = Actor(state_dim, action_dim, max_action).to(device)
self.actor_target = copy.deepcopy(self.actor)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4)
self.critic = Critic(state_dim, action_dim).to(device)
self.critic_target = copy.deepcopy(self.critic)
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4)
self.max_action = max_action
self.discount = discount
self.tau = tau
self.policy_noise = policy_noise
self.noise_clip = noise_clip
self.policy_freq = policy_freq
self.total_it = 0
def select_action(self, state):
state = torch.FloatTensor(state.reshape(1, -1)).to(device)
return self.actor(state).cpu().data.numpy().flatten()
def train(self, replay_buffer, batch_size=100):
self.total_it += 1
# Sample replay buffer
state, action, next_state, reward, not_done = replay_buffer.sample(batch_size)
with torch.no_grad():
# Select action according to policy and add clipped noise
noise = (
torch.randn_like(action) * self.policy_noise
).clamp(-self.noise_clip, self.noise_clip)
next_action = (
self.actor_target(next_state) + noise
).clamp(-self.max_action, self.max_action)
# Compute the target Q value
target_Q1, target_Q2 = self.critic_target(next_state, next_action)
target_Q = torch.min(target_Q1, target_Q2)
target_Q = reward + not_done * self.discount * target_Q
# Get current Q estimates
current_Q1, current_Q2 = self.critic(state, action)
# Compute critic loss
critic_loss = F.mse_loss(current_Q1, target_Q) + F.mse_loss(current_Q2, target_Q)
# Optimize the critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# Delayed policy updates
if self.total_it % self.policy_freq == 0:
# Compute actor losse
actor_loss = -self.critic.Q1(state, self.actor(state)).mean()
# Optimize the actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# Update the frozen target models
for param, target_param in zip(self.critic.parameters(), self.critic_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
for param, target_param in zip(self.actor.parameters(), self.actor_target.parameters()):
target_param.data.copy_(self.tau * param.data + (1 - self.tau) * target_param.data)
def save(self, filename):
torch.save(self.critic.state_dict(), filename + "_critic")
torch.save(self.critic_optimizer.state_dict(), filename + "_critic_optimizer")
torch.save(self.actor.state_dict(), filename + "_actor")
torch.save(self.actor_optimizer.state_dict(), filename + "_actor_optimizer")
def load(self, filename):
self.critic.load_state_dict(torch.load(filename + "_critic"))
self.critic_optimizer.load_state_dict(torch.load(filename + "_critic_optimizer"))
self.critic_target = copy.deepcopy(self.critic)
self.actor.load_state_dict(torch.load(filename + "_actor"))
self.actor_optimizer.load_state_dict(torch.load(filename + "_actor_optimizer"))
self.actor_target = copy.deepcopy(self.actor)
| 4,752 | 26.473988 | 93 |
py
|
FORK
|
FORK-master/TD3-FORK/main_td3_fork.py
|
import numpy as np
import torch
import gym
import argparse
import os
import copy
import utils
import TD3
import pandas as pd
import json,os
import TD3_FORK
def eval_policy(policy, env_name,eval_episodes=10):
eval_env = gym.make(env_name)
avg_reward = 0.
for _ in range(eval_episodes):
state, done = eval_env.reset(), False
while not done:
action = policy.select_action(np.array(state))
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
print("---------------------------------------")
print(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
print("---------------------------------------")
return avg_reward
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--policy", default="TD3") # Policy name (TD3,or TD3_FORK,TD3_FORK_Q,TD3_FORK_DQ,TD3_FORK_S)
parser.add_argument("--env", default="HalfCheetah-v2") # OpenAI gym environment name
parser.add_argument("--seed", default=0, type=int) # Sets Gym, PyTorch and Numpy seeds
parser.add_argument("--start_timesteps", default=1e4, type=int) # Time steps initial random policy is used
parser.add_argument("--eval_freq", default=5e3, type=int) # How often (time steps) we evaluate
parser.add_argument("--max_timesteps", default=1e6, type=int) # Max time steps to run environment
parser.add_argument("--expl_noise", default=0.1) # Std of Gaussian exploration noise
parser.add_argument("--batch_size", default=100, type=int) # Batch size for both actor and critic
parser.add_argument("--max_reward", default=100, type=int) # max_reward for dynamic weight
parser.add_argument("--discount", default=0.99) # Discount factor
parser.add_argument("--tau", default=0.005) # Target network update rate
parser.add_argument("--policy_noise", default=0.2,type=float) # Noise added to target policy during critic update
parser.add_argument("--noise_clip", default=0.5,type=float) # Range to clip target policy noise
parser.add_argument("--policy_freq", default=2, type=int) # Frequency of delayed policy updates
parser.add_argument("--sys_neurons1", default=400, type=int) #units of the first layer in system model
parser.add_argument("--sys_neurons2", default=300, type=int) #units of the second layer in system model
parser.add_argument("--r_neurons1", default=256, type=int) #units of the first layer in reward model
parser.add_argument("--r_neurons2", default=256, type=int) #units of the second layer in reward model
parser.add_argument("--save_model", default="False") # Save model and optimizer parameters
parser.add_argument("--load_model", default="") # Model load file name, "" doesn't load, "default" uses file_name
parser.add_argument("--training_mode", default="Online") #training_mode Offline or Online
parser.add_argument("--sys_weight", default=0.5,type=float) # weight for FORK
parser.add_argument("--sys_weight2", default=0.4,type=float) # weight for FORK-DQ
parser.add_argument("--base_weight", default=0.6,type=float) # base weight if using dynamic_weight
parser.add_argument("--sys_threshold", default=0.020,type=float) # threshold for FORK
parser.add_argument("--sys_dynamic_weight", default="False") # whether use dynamic weight or not
args = parser.parse_args()
if args.sys_dynamic_weight == 'False':
args.policy = args.policy + '_F'
file_name = f"{args.policy}_{args.env}_{args.seed}_{args.training_mode}"
print("---------------------------------------")
print(f"Policy: {args.policy}, Env: {args.env}, Seed: {args.seed}, Weight: {args.sys_weight},Training_mode: {args.training_mode}, Dynamic_weight: {args.sys_dynamic_weight}")
print("---------------------------------------")
if args.save_model == "True" and not os.path.exists("./models"):
os.makedirs("./models")
env = gym.make(args.env)
# Set seeds
env.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
state_dim = env.observation_space.shape[0]
state_max = env.observation_space.shape
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
kwargs = {
"state_dim": state_dim,
"action_dim": action_dim,
"max_action": max_action,
"discount": args.discount,
"tau": args.tau,
}
# Initialize policy
if args.policy == "TD3":
# Target policy smoothing is scaled wrt the action scale
kwargs["policy_noise"] = args.policy_noise * max_action
kwargs["noise_clip"] = args.noise_clip * max_action
kwargs["policy_freq"] = args.policy_freq
policy = TD3.TD3(**kwargs)
variant = dict(
algorithm='TD3',
env=args.env,
)
elif args.policy in ["TD3_FORK","TD3_FORK_F","TD3_FORK_DQ","TD3_FORK_DQ_F","TD3_FORK_Q","TD3_FORK_Q_F","TD3_FORK_S","TD3_FORK_S_F"]:
# Target policy smoothing is scaled wrt the action scale
kwargs["env"] = env
kwargs["policy"] = args.policy
kwargs["policy_noise"] = args.policy_noise * max_action
kwargs["noise_clip"] = args.noise_clip * max_action
kwargs["policy_freq"] = args.policy_freq
kwargs["sys_weight"] = args.sys_weight
kwargs["sys_weight2"] = args.sys_weight2
kwargs["sys_threshold"] = args.sys_threshold
kwargs["sys1_units"] = args.sys_neurons1
kwargs["sys2_units"] = args.sys_neurons2
kwargs["r1_units"] = args.r_neurons1
kwargs["r2_units"] = args.r_neurons2
policy = TD3_FORK.TD3_FORK(**kwargs)
variant = dict(
algorithm=args.policy,
env=args.env,
sys_weight=args.sys_weight,
sys_threshold=args.sys_threshold,
max_reward=args.max_reward,
sys1_units=args.sys_neurons1,
sys2_units=args.sys_neurons2,
r1_units=args.r_neurons1,
r2_units=args.r_neurons2
)
else:
raise Exception("invaled policy!!!")
if not os.path.exists(f"./data/{args.env}/{args.policy}/seed{args.seed}"):
os.makedirs(f'./data/{args.env}/{args.policy}/seed{args.seed}')
with open(f'./data/{args.env}/{args.policy}/seed{int(args.seed)}/variant.json', 'w') as outfile:
json.dump(variant,outfile)
if args.load_model != "":
policy_file = file_name if args.load_model == "default" else args.load_model
policy.load(f"./models/{policy_file}")
replay_buffer = utils.ReplayBuffer(state_dim, action_dim)
# Evaluate untrained policy
evaluations = [eval_policy(policy, args.env)]
state, done = env.reset(), False
episode_reward = 0
episode_timesteps = 0
episode_num = 0
policy.update_sys = 0 #monitoring how many updated times of FORK
ep_reward_list = []
base_weight = args.base_weight
for t in range(int(args.max_timesteps)):
episode_timesteps += 1
# Select action randomly or according to policy
if t < args.start_timesteps:
action = env.action_space.sample()
else:
action = (
policy.select_action(np.array(state))
+ np.random.normal(0, max_action * args.expl_noise, size=action_dim)
).clip(-max_action, max_action)
# Perform action
next_state, reward, done, _ = env.step(action)
done_bool = float(done) if episode_timesteps < env._max_episode_steps else 0
# Store data in replay buffer
replay_buffer.add(state, action, next_state, reward, done_bool)
state = next_state
# Store observation and reward bounds
policy.obs_upper_bound = np.amax(state) if policy.obs_upper_bound < np.amax(state) else policy.obs_upper_bound
policy.obs_lower_bound = np.amin(state) if policy.obs_lower_bound > np.amin(state) else policy.obs_lower_bound
policy.reward_lower_bound = (reward) if policy.reward_lower_bound > reward else policy.reward_lower_bound
policy.reward_upper_bound = (reward) if policy.reward_upper_bound < reward else policy.reward_upper_bound
episode_reward += reward
# Train agent after collecting sufficient data
if args.training_mode == 'Online':
if t >= args.start_timesteps:
policy.train(replay_buffer, args.batch_size,train_steps = 1)
if done:
ep_reward_list.append(episode_reward)
if args.sys_dynamic_weight == "True":
policy.sys_weight = np.round((1 - np.clip(np.mean(ep_reward_list[-100:])/args.max_reward, 0, 1)),4) * base_weight
if args.policy in ["TD3_FORK","TD3_FORK_F","TD3_FORK_DQ","TD3_FORK_DQ_F","TD3_FORK_Q","TD3_FORK_Q_F","TD3_FORK_S","TD3_FORK_S_F"]:
print(f"Total T: {t+1} Episode Num: {episode_num+1} Episode T: {episode_timesteps} Reward: {episode_reward:.3f} Sysmodel_Loss: {policy.sysmodel_loss} Reward_loss: {policy.sysr_loss} Sys updated times: {policy.update_sys} Sys_weight: {policy.sys_weight}")
policy.update_sys = 0
else:
print(f"Total T: {t+1} Episode Num: {episode_num+1} Episode T: {episode_timesteps} Reward: {episode_reward:.3f}")
if args.training_mode == 'Offline':
if t >= args.start_timesteps:
policy.train(replay_buffer, args.batch_size,train_steps = episode_timesteps)
# Reset environment
state, done = env.reset(), False
episode_reward = 0
episode_timesteps = 0
episode_num += 1
# Evaluate episode
if (t + 1) % args.eval_freq == 0:
evaluations.append(eval_policy(policy, args.env))
if args.save_model == "True":
policy.save(f"./models/{file_name}")
data = np.array(evaluations)
df = pd.DataFrame(data=data,columns=["Average Return"]).reset_index()
df['Timesteps'] = df['index'] * args.eval_freq
df['env'] = args.env
df['algorithm_name'] = args.policy
df.to_csv(f'./data/{args.env}/{args.policy}/seed{args.seed}/progress.csv', index = False)
| 9,548 | 44.255924 | 258 |
py
|
FORK
|
FORK-master/SAC-FORK/replay_memory.py
|
import random
import numpy as np
class ReplayMemory:
def __init__(self, capacity, seed):
random.seed(seed)
self.capacity = capacity
self.buffer = []
self.position = 0
def push(self, state, action, reward, next_state, done):
if len(self.buffer) < self.capacity:
self.buffer.append(None)
self.buffer[self.position] = (state, action, reward, next_state, done)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
batch = random.sample(self.buffer, batch_size)
state, action, reward, next_state, done = map(np.stack, zip(*batch))
return state, action, reward, next_state, done
def __len__(self):
return len(self.buffer)
| 765 | 30.916667 | 78 |
py
|
FORK
|
FORK-master/SAC-FORK/SAC.py
|
import os
import torch
import torch.nn.functional as F
from torch.optim import Adam
from utils import soft_update, hard_update
from model import GaussianPolicy, QNetwork, DeterministicPolicy
class SAC(object):
def __init__(self, num_inputs, action_space, args):
self.gamma = args.gamma
self.tau = args.tau
self.alpha = args.alpha
self.policy_type = args.policy_type
self.target_update_interval = args.target_update_interval
self.automatic_entropy_tuning = args.automatic_entropy_tuning
self.device = torch.device("cuda" if args.cuda else "cpu")
self.critic = QNetwork(num_inputs, action_space.shape[0], args.hidden_size).to(device=self.device)
self.critic_optim = Adam(self.critic.parameters(), lr=args.lr)
self.critic_target = QNetwork(num_inputs, action_space.shape[0], args.hidden_size).to(self.device)
hard_update(self.critic_target, self.critic)
self.obs_upper_bound,self.obs_lower_bound = 0,0
self.reward_lower_bound,self.reward_upper_bound=0,0
if self.policy_type == "Gaussian":
# Target Entropy = −dim(A) (e.g. , -6 for HalfCheetah-v2) as given in the paper
if self.automatic_entropy_tuning is True:
self.target_entropy = -torch.prod(torch.Tensor(action_space.shape).to(self.device)).item()
self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device)
self.alpha_optim = Adam([self.log_alpha], lr=args.lr)
self.policy = GaussianPolicy(num_inputs, action_space.shape[0], args.hidden_size, action_space).to(self.device)
self.policy_optim = Adam(self.policy.parameters(), lr=args.lr)
else:
self.alpha = 0
self.automatic_entropy_tuning = False
self.policy = DeterministicPolicy(num_inputs, action_space.shape[0], args.hidden_size, action_space).to(self.device)
self.policy_optim = Adam(self.policy.parameters(), lr=args.lr)
def select_action(self, state, evaluate=False):
state = torch.FloatTensor(state).to(self.device).unsqueeze(0)
if evaluate is False:
action, _, _ = self.policy.sample(state)
else:
_, _, action = self.policy.sample(state)
return action.detach().cpu().numpy()[0]
def update_parameters(self, memory, batch_size, updates):
# Sample a batch from memory
state_batch, action_batch, reward_batch, next_state_batch, mask_batch = memory.sample(batch_size=batch_size)
state_batch = torch.FloatTensor(state_batch).to(self.device)
next_state_batch = torch.FloatTensor(next_state_batch).to(self.device)
action_batch = torch.FloatTensor(action_batch).to(self.device)
reward_batch = torch.FloatTensor(reward_batch).to(self.device).unsqueeze(1)
mask_batch = torch.FloatTensor(mask_batch).to(self.device).unsqueeze(1)
with torch.no_grad():
next_state_action, next_state_log_pi, _ = self.policy.sample(next_state_batch)
qf1_next_target, qf2_next_target = self.critic_target(next_state_batch, next_state_action)
min_qf_next_target = torch.min(qf1_next_target, qf2_next_target) - self.alpha * next_state_log_pi
next_q_value = reward_batch + mask_batch * self.gamma * (min_qf_next_target)
qf1, qf2 = self.critic(state_batch, action_batch) # Two Q-functions to mitigate positive bias in the policy improvement step
qf1_loss = F.mse_loss(qf1, next_q_value) # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
qf2_loss = F.mse_loss(qf2, next_q_value) # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
qf_loss = qf1_loss + qf2_loss
self.critic_optim.zero_grad()
qf_loss.backward()
self.critic_optim.step()
pi, log_pi, _ = self.policy.sample(state_batch)
qf1_pi, qf2_pi = self.critic(state_batch, pi)
min_qf_pi = torch.min(qf1_pi, qf2_pi)
policy_loss = ((self.alpha * log_pi) - min_qf_pi).mean() # Jπ = 𝔼st∼D,εt∼N[α * logπ(f(εt;st)|st) − Q(st,f(εt;st))]
self.policy_optim.zero_grad()
policy_loss.backward()
self.policy_optim.step()
if self.automatic_entropy_tuning:
alpha_loss = -(self.log_alpha * (log_pi + self.target_entropy).detach()).mean()
self.alpha_optim.zero_grad()
alpha_loss.backward()
self.alpha_optim.step()
self.alpha = self.log_alpha.exp()
alpha_tlogs = self.alpha.clone() # For TensorboardX logs
else:
alpha_loss = torch.tensor(0.).to(self.device)
alpha_tlogs = torch.tensor(self.alpha) # For TensorboardX logs
if updates % self.target_update_interval == 0:
soft_update(self.critic_target, self.critic, self.tau)
return qf1_loss.item(), qf2_loss.item(), policy_loss.item(), alpha_loss.item(), alpha_tlogs.item()
# Save model parameters
def save(self, filename):
torch.save(self.critic.state_dict(), filename + "_critic")
torch.save(self.critic_optim.state_dict(), filename + "_critic_optimizer")
torch.save(self.policy.state_dict(), filename + "_actor")
torch.save(self.policy_optim.state_dict(), filename + "_actor_optimizer")
def load(self, filename):
self.critic.load_state_dict(torch.load(filename + "_critic.pth"))
self.critic_optim.load_state_dict(torch.load(filename + "_critic_optimizer"))
self.critic_target = copy.deepcopy(self.critic)
self.policy.load_state_dict(torch.load(filename + "_actor.pth"))
self.policy_optim.load_state_dict(torch.load(filename + "_actor_optimizer"))
| 5,765 | 45.128 | 133 |
py
|
FORK
|
FORK-master/SAC-FORK/utils.py
|
import numpy as np
import torch
import math
class ReplayBuffer(object):
def __init__(self, state_dim, action_dim, max_size=int(1e6)):
self.max_size = max_size
self.ptr = 0
self.size = 0
self.state = np.zeros((max_size, state_dim))
self.action = np.zeros((max_size, action_dim))
self.next_state = np.zeros((max_size, state_dim))
self.reward = np.zeros((max_size, 1))
self.not_done = np.zeros((max_size, 1))
self.welford_state_n = 1
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def normalize_state(self, states, update=True):
"""
Use Welford's algorithm to normalize a state, and optionally update the statistics
for normalizing states using the new state, online.
"""
states = torch.Tensor(states)
states2 = states.data.clone()
ii = 0
for state in states:
if self.welford_state_n == 1:
self.welford_state_mean = torch.zeros(state.size(-1))
self.welford_state_mean_diff = torch.ones(state.size(-1))
if update:
if len(state.size()) == 1: # if we get a single state vector
state_old = self.welford_state_mean
self.welford_state_mean += (state - state_old) / self.welford_state_n
self.welford_state_mean_diff += (state - state_old) * (state - state_old)
self.welford_state_n += 1
else:
raise RuntimeError # this really should not happen
states2[ii] = (state - self.welford_state_mean) / np.sqrt(self.welford_state_mean_diff / self.welford_state_n)
ii += 1
return states2
def add(self, state, action, next_state, reward, done):
self.state[self.ptr] = state
self.action[self.ptr] = action
self.next_state[self.ptr] = next_state
self.reward[self.ptr] = reward
self.not_done[self.ptr] = 1. - done
self.ptr = (self.ptr + 1) % self.max_size
self.size = min(self.size + 1, self.max_size)
def sample(self, batch_size):
ind = np.random.randint(0,int(self.size), size=batch_size)
return (
torch.FloatTensor(self.state[ind]).to(self.device),
#torch.FloatTensor(self.normalize_state(self.state[ind])).to(self.device),
torch.FloatTensor(self.action[ind]).to(self.device),
torch.FloatTensor(self.next_state[ind]).to(self.device),
torch.FloatTensor(self.reward[ind]).to(self.device),
torch.FloatTensor(self.not_done[ind]).to(self.device)
)
def create_log_gaussian(mean, log_std, t):
quadratic = -((0.5 * (t - mean) / (log_std.exp())).pow(2))
l = mean.shape
log_z = log_std
z = l[-1] * math.log(2 * math.pi)
log_p = quadratic.sum(dim=-1) - log_z.sum(dim=-1) - 0.5 * z
return log_p
def logsumexp(inputs, dim=None, keepdim=False):
if dim is None:
inputs = inputs.view(-1)
dim = 0
s, _ = torch.max(inputs, dim=dim, keepdim=True)
outputs = s + (inputs - s).exp().sum(dim=dim, keepdim=True).log()
if not keepdim:
outputs = outputs.squeeze(dim)
return outputs
def soft_update(target, source, tau):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
def hard_update(target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
| 3,165 | 32.680851 | 113 |
py
|
FORK
|
FORK-master/SAC-FORK/model.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Normal
LOG_SIG_MAX = 2
LOG_SIG_MIN = -20
epsilon = 1e-6
# Initialize Policy weights
def weights_init_(m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight, gain=1)
torch.nn.init.constant_(m.bias, 0)
class ValueNetwork(nn.Module):
def __init__(self, num_inputs, hidden_dim):
super(ValueNetwork, self).__init__()
self.linear1 = nn.Linear(num_inputs, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, 1)
self.apply(weights_init_)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
x = self.linear3(x)
return x
class QNetwork(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim):
super(QNetwork, self).__init__()
# Q1 architecture
self.linear1 = nn.Linear(num_inputs + num_actions, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, 1)
# Q2 architecture
self.linear4 = nn.Linear(num_inputs + num_actions, hidden_dim)
self.linear5 = nn.Linear(hidden_dim, hidden_dim)
self.linear6 = nn.Linear(hidden_dim, 1)
self.apply(weights_init_)
def forward(self, state, action):
xu = torch.cat([state, action], 1)
x1 = F.relu(self.linear1(xu))
x1 = F.relu(self.linear2(x1))
x1 = self.linear3(x1)
x2 = F.relu(self.linear4(xu))
x2 = F.relu(self.linear5(x2))
x2 = self.linear6(x2)
return x1, x2
class GaussianPolicy(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim, action_space=None):
super(GaussianPolicy, self).__init__()
self.linear1 = nn.Linear(num_inputs, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.mean_linear = nn.Linear(hidden_dim, num_actions)
self.log_std_linear = nn.Linear(hidden_dim, num_actions)
self.apply(weights_init_)
# action rescaling
if action_space is None:
self.action_scale = torch.tensor(1.)
self.action_bias = torch.tensor(0.)
else:
self.action_scale = torch.FloatTensor(
(action_space.high - action_space.low) / 2.)
self.action_bias = torch.FloatTensor(
(action_space.high + action_space.low) / 2.)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
mean = self.mean_linear(x)
log_std = self.log_std_linear(x)
log_std = torch.clamp(log_std, min=LOG_SIG_MIN, max=LOG_SIG_MAX)
return mean, log_std
def sample(self, state):
mean, log_std = self.forward(state)
std = log_std.exp()
normal = Normal(mean, std)
x_t = normal.rsample() # for reparameterization trick (mean + std * N(0,1))
y_t = torch.tanh(x_t)
action = y_t * self.action_scale + self.action_bias
log_prob = normal.log_prob(x_t)
# Enforcing Action Bound
log_prob -= torch.log(self.action_scale * (1 - y_t.pow(2)) + epsilon)
log_prob = log_prob.sum(1, keepdim=True)
mean = torch.tanh(mean) * self.action_scale + self.action_bias
return action, log_prob, mean
def to(self, device):
self.action_scale = self.action_scale.to(device)
self.action_bias = self.action_bias.to(device)
return super(GaussianPolicy, self).to(device)
class DeterministicPolicy(nn.Module):
def __init__(self, num_inputs, num_actions, hidden_dim, action_space=None):
super(DeterministicPolicy, self).__init__()
self.linear1 = nn.Linear(num_inputs, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.mean = nn.Linear(hidden_dim, num_actions)
self.noise = torch.Tensor(num_actions)
self.apply(weights_init_)
# action rescaling
if action_space is None:
self.action_scale = 1.
self.action_bias = 0.
else:
self.action_scale = torch.FloatTensor(
(action_space.high - action_space.low) / 2.)
self.action_bias = torch.FloatTensor(
(action_space.high + action_space.low) / 2.)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
mean = torch.tanh(self.mean(x)) * self.action_scale + self.action_bias
return mean
def sample(self, state):
mean = self.forward(state)
noise = self.noise.normal_(0., std=0.1)
noise = noise.clamp(-0.25, 0.25)
action = mean + noise
return action, torch.tensor(0.), mean
def to(self, device):
self.action_scale = self.action_scale.to(device)
self.action_bias = self.action_bias.to(device)
self.noise = self.noise.to(device)
return super(DeterministicPolicy, self).to(device)
class Sys_R(nn.Module):
def __init__(self,state_dim, action_dim, fc1_units, fc2_units):
super(Sys_R, self).__init__()
# Q1 architecture
self.l1 = nn.Linear(2 * state_dim + action_dim,fc1_units)
self.l2 = nn.Linear(fc1_units, fc2_units)
self.l3 = nn.Linear(fc2_units, 1)
self.apply(weights_init_)
def forward(self, state,next_state, action):
sa = torch.cat([state,next_state, action], 1)
q1 = F.relu(self.l1(sa))
q1 = F.relu(self.l2(q1))
q1 = self.l3(q1)
return q1
class SysModel(nn.Module):
def __init__(self, state_size, action_size, fc1_units, fc2_units):
super(SysModel, self).__init__()
self.l1 = nn.Linear(state_size + action_size, fc1_units)
self.l2 = nn.Linear(fc1_units, fc2_units)
self.l3 = nn.Linear(fc2_units, state_size)
self.apply(weights_init_)
def forward(self, state, action):
"""Build a system model to predict the next state at a given state."""
xa = torch.cat([state, action], 1)
x1 = F.relu(self.l1(xa))
x1 = F.relu(self.l2(x1))
x1 = self.l3(x1)
return x1
| 6,142 | 31.162304 | 84 |
py
|
FORK
|
FORK-master/SAC-FORK/main_sac_fork.py
|
import argparse
import datetime
import gym
import numpy as np
import itertools
import os
import json
import pandas as pd
import torch
import SAC
import SAC_FORK
from replay_memory import ReplayMemory
def eval_policy(policy, env_name, eval_episodes=10):
eval_env = gym.make(env_name)
avg_reward = 0.
for _ in range(eval_episodes):
state, done = eval_env.reset(), False
while not done:
action = policy.select_action(np.array(state),evaluate=True)
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
print("---------------------------------------")
print(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
print("---------------------------------------")
return avg_reward
parser = argparse.ArgumentParser(description='PyTorch Soft Actor-Critic Args')
parser.add_argument('--env', default="HalfCheetah-v2",
help='Mujoco Gym environment (default: HalfCheetah-v2)')
parser.add_argument('--policy_type', default="Gaussian",
help='Policy Type: Gaussian | Deterministic (default: Gaussian)')
parser.add_argument('--policy', default="SAC",
help='Policy name SAC or SAC-FORK')
parser.add_argument('--eval', type=bool, default=True,
help='Evaluates a policy a policy every 10 episode (default: True)')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G',
help='discount factor for reward (default: 0.99)')
parser.add_argument('--tau', type=float, default=0.005, metavar='G',
help='target smoothing coefficient(τ) (default: 0.005)')
parser.add_argument('--lr', type=float, default=0.0003, metavar='G',
help='learning rate (default: 0.0003)')
parser.add_argument('--alpha', type=float, default=0.2, metavar='G',
help='Temperature parameter α determines the relative importance of the entropy\
term against the reward (default: 0.2)')
parser.add_argument('--automatic_entropy_tuning', type=bool, default=False, metavar='G',
help='Automaically adjust α (default: False)')
parser.add_argument('--seed', type=int, default=123456, metavar='N',
help='random seed (default: 123456)')
parser.add_argument('--batch_size', type=int, default=256, metavar='N',
help='batch size (default: 256)')
parser.add_argument('--num_steps', type=int, default=1000001, metavar='N',
help='maximum number of steps (default: 1000000)')
parser.add_argument('--hidden_size', type=int, default=256, metavar='N',
help='hidden size (default: 256)')
parser.add_argument('--sys_hidden_size', type=int, default=512, metavar='N',
help='sys_hidden_size (default: 512)')
parser.add_argument('--sysr_hidden_size', type=int, default=512, metavar='N',
help='sysr hidden size (default: 512)')
parser.add_argument('--updates_per_step', type=int, default=1, metavar='N',
help='model updates per simulator step (default: 1)')
parser.add_argument('--start_steps', type=int, default=10000, metavar='N',
help='Steps sampling random actions (default: 10000)')
parser.add_argument('--target_update_interval', type=int, default=1, metavar='N',
help='Value target update per no. of updates per step (default: 1)')
parser.add_argument('--replay_size', type=int, default=1000000, metavar='N',
help='size of replay buffer (default: 10000000)')
parser.add_argument("--eval_freq", default=5e3, type=int, help="evaluation frequency")
parser.add_argument("--training_mode", default="Online", help="Online Training or Offline Training")
parser.add_argument('--cuda', action="store_true",
help='run on CUDA (default: False)')
parser.add_argument("--sys_weight", default=0.6,type=float, help="weight for FORK")
parser.add_argument("--base_weight", default=0.6,type=float, help="base weight if using dynamic weight")
parser.add_argument("--sys_threshold", default=0.020,type=float, help="threshold for FORK")
parser.add_argument("--sys_dynamic_weight", default="False",help="whether use dynamic weight or not")
parser.add_argument("--max_reward", default=100, type=int,help="max reward for dynamic weight")
parser.add_argument("--save_model", default="False",help="Save training models")
parser.add_argument("--load_model", default="" ,help="Loding model or not")
args = parser.parse_args()
file_name = f"{args.policy}_{args.env}_{args.seed}_{args.training_mode}"
print("---------------------------------------")
print(f"Policy: {args.policy}, Env: {args.env}, Seed: {args.seed}, Weight: {args.sys_weight},Training_mode: {args.training_mode}, Dynamic_weight: {args.sys_dynamic_weight}")
print("---------------------------------------")
if args.sys_dynamic_weight == 'True':
file_name += f"_DW_{args.sys_dynamic_weight}"
if args.save_model == "True" and not os.path.exists("./models"):
os.makedirs("./models")
# Environment
env = gym.make(args.env)
env.seed(args.seed)
env.action_space.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# Agent
if args.policy == 'SAC':
agent = SAC.SAC(env.observation_space.shape[0], env.action_space, args)
elif args.policy == 'SAC_FORK':
agent = SAC_FORK.SAC_FORK(env.observation_space.shape[0], env.action_space, args)
memory = ReplayMemory(args.replay_size, args.seed)
# Training Loop
total_numsteps = 0
updates = 0
evaluations = [eval_policy(agent, args.env)]
agent.update_sys = 0
base_weight = args.base_weight
ep_reward_list = []
if args.policy == "SAC":
variant = dict(
algorithm='SAC',
env=args.env,
)
elif args.policy == "SAC_FORK":
variant = dict(
algorithm=args.policy,
env=args.env,
sys_weight=args.sys_weight,
sys_threshold=args.sys_threshold,
max_reward=args.max_reward,
sys_hidden_size=args.sys_hidden_size,
sysr_hidden_size=args.sysr_hidden_size,
)
if not os.path.exists(f"./data/{args.env}/{args.policy}/seed{args.seed}"):
os.makedirs(f'./data/{args.env}/{args.policy}/seed{args.seed}')
with open(f'./data/{args.env}/{args.policy}/seed{int(args.seed)}/variant.json', 'w') as outfile:
json.dump(variant,outfile)
for i_episode in itertools.count(1):
episode_reward = 0
episode_steps = 0
done = False
state = env.reset()
while not done:
if args.start_steps > total_numsteps:
action = env.action_space.sample() # Sample random action
else:
action = agent.select_action(state) # Sample action from policy
if len(memory) > args.batch_size:
# Number of updates per step in environment
for i in range(args.updates_per_step):
# Update parameters of all the networks
critic_1_loss, critic_2_loss, policy_loss, ent_loss, alpha = agent.update_parameters(memory, args.batch_size, updates)
updates += 1
next_state, reward, done, _ = env.step(action) # Step
episode_steps += 1
total_numsteps += 1
episode_reward += reward
if (total_numsteps + 1) % args.eval_freq == 0:
eval_reward = eval_policy(agent, args.env)
evaluations.append(eval_reward)
if args.save_model == "True":
agent.save(f"./models/{file_name}")
data = np.array(evaluations)
df = pd.DataFrame(data=data,columns=["Average Return"]).reset_index()
df['Timesteps'] = df['index'] * 5000
df['env'] = args.env
df['algorithm_name'] = args.policy
df.to_csv(f'./data/{args.env}/{args.policy}/seed{args.seed}/progress.csv', index = False)
# Ignore the "done" signal if it comes from hitting the time horizon.
# (https://github.com/openai/spinningup/blob/master/spinup/algos/sac/sac.py)
mask = 1 if episode_steps == env._max_episode_steps else float(not done)
memory.push(state, action, reward, next_state, mask) # Append transition to memory
state = next_state
agent.obs_upper_bound = np.amax(state) if agent.obs_upper_bound < np.amax(state) else agent.obs_upper_bound
agent.obs_lower_bound = np.amin(state) if agent.obs_lower_bound > np.amin(state) else agent.obs_lower_bound
ep_reward_list.append(episode_reward)
if args.sys_dynamic_weight == "True":
agent.sys_weight = np.round((1 - np.clip(np.mean(ep_reward_list[-100:])/args.max_reward, 0, 1)),4) * base_weight
if total_numsteps > args.num_steps:
break
if args.policy == "SAC_FORK":
print(f"Total T: {total_numsteps+1} Episode Num: {i_episode+1} Episode T: {episode_steps} Reward: {episode_reward:.3f} Sysmodel_Loss: {agent.sysmodel_loss} Reward_loss: {agent.sysr_loss} Sys updated times: {agent.update_sys} Sys_weight: {agent.sys_weight}")
else:
print("Episode: {}, total numsteps: {}, episode steps: {}, reward: {}".format(i_episode, total_numsteps, episode_steps, round(episode_reward, 2)))
agent.update_sys = 0
env.close()
| 9,260 | 44.62069 | 265 |
py
|
FORK
|
FORK-master/SAC-FORK/SAC_FORK.py
|
import os
import torch
import torch.nn.functional as F
from torch.optim import Adam
from utils import soft_update, hard_update
from model import GaussianPolicy, QNetwork, DeterministicPolicy, Sys_R, SysModel
class SAC_FORK(object):
def __init__(self, num_inputs, action_space, args):
self.gamma = args.gamma
self.tau = args.tau
self.alpha = args.alpha
self.policy_type = args.policy_type
self.target_update_interval = args.target_update_interval
self.automatic_entropy_tuning = args.automatic_entropy_tuning
self.device = torch.device("cuda" if args.cuda else "cpu")
self.critic = QNetwork(num_inputs, action_space.shape[0], args.hidden_size).to(device=self.device)
self.critic_optim = Adam(self.critic.parameters(), lr=args.lr)
self.critic_target = QNetwork(num_inputs, action_space.shape[0], args.hidden_size).to(self.device)
hard_update(self.critic_target, self.critic)
self.sysmodel = SysModel(num_inputs, action_space.shape[0], args.sys_hidden_size,args.sys_hidden_size).to(self.device)
self.sysmodel_optimizer = Adam(self.sysmodel.parameters(), lr=args.lr)
self.obs_upper_bound = 0 #state space upper bound
self.obs_lower_bound = 0 #state space lower bound
self.sysr = Sys_R(num_inputs, action_space.shape[0],args.sysr_hidden_size,args.sysr_hidden_size).to(self.device)
self.sysr_optimizer = torch.optim.Adam(self.sysr.parameters(), lr=args.lr)
self.sys_threshold = args.sys_threshold
self.sys_weight = args.sys_weight
self.sysmodel_loss = 0
self.sysr_loss = 0
if self.policy_type == "Gaussian":
# Target Entropy = −dim(A) (e.g. , -6 for HalfCheetah-v2) as given in the paper
if self.automatic_entropy_tuning is True:
self.target_entropy = -torch.prod(torch.Tensor(action_space.shape).to(self.device)).item()
self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device)
self.alpha_optim = Adam([self.log_alpha], lr=args.lr)
self.policy = GaussianPolicy(num_inputs, action_space.shape[0], args.hidden_size, action_space).to(self.device)
self.policy_optim = Adam(self.policy.parameters(), lr=args.lr)
else:
self.alpha = 0
self.automatic_entropy_tuning = False
self.policy = DeterministicPolicy(num_inputs, action_space.shape[0], args.hidden_size, action_space).to(self.device)
self.policy_optim = Adam(self.policy.parameters(), lr=args.lr)
def select_action(self, state, evaluate=False):
state = torch.FloatTensor(state).to(self.device).unsqueeze(0)
if evaluate is False:
action, _, _ = self.policy.sample(state)
else:
_, _, action = self.policy.sample(state)
return action.detach().cpu().numpy()[0]
def update_parameters(self, memory, batch_size, updates):
# Sample a batch from memory
state_batch, action_batch, reward_batch, next_state_batch, mask_batch = memory.sample(batch_size=batch_size)
state_batch = torch.FloatTensor(state_batch).to(self.device)
next_state_batch = torch.FloatTensor(next_state_batch).to(self.device)
action_batch = torch.FloatTensor(action_batch).to(self.device)
reward_batch = torch.FloatTensor(reward_batch).to(self.device).unsqueeze(1)
mask_batch = torch.FloatTensor(mask_batch).to(self.device).unsqueeze(1)
with torch.no_grad():
next_state_action, next_state_log_pi, _ = self.policy.sample(next_state_batch)
qf1_next_target, qf2_next_target = self.critic_target(next_state_batch, next_state_action)
min_qf_next_target = torch.min(qf1_next_target, qf2_next_target) - self.alpha * next_state_log_pi
next_q_value = reward_batch + mask_batch * self.gamma * (min_qf_next_target)
qf1, qf2 = self.critic(state_batch, action_batch) # Two Q-functions to mitigate positive bias in the policy improvement step
qf1_loss = F.mse_loss(qf1, next_q_value) # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
qf2_loss = F.mse_loss(qf2, next_q_value) # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
qf_loss = qf1_loss + qf2_loss
self.critic_optim.zero_grad()
qf_loss.backward()
self.critic_optim.step()
predict_next_state = self.sysmodel(state_batch, action_batch)
predict_next_state = predict_next_state.clamp(self.obs_lower_bound,self.obs_upper_bound)
sysmodel_loss = F.smooth_l1_loss(predict_next_state, next_state_batch.detach())
self.sysmodel_optimizer.zero_grad()
sysmodel_loss.backward()
self.sysmodel_optimizer.step()
self.sysmodel_loss = sysmodel_loss.item()
predict_reward = self.sysr(state_batch,next_state_batch,action_batch)
sysr_loss = F.mse_loss(predict_reward, reward_batch.detach())
self.sysr_optimizer.zero_grad()
sysr_loss.backward()
self.sysr_optimizer.step()
self.sysr_loss = sysr_loss.item()
s_flag = 1 if sysmodel_loss.item() < self.sys_threshold else 0
pi, log_pi, _ = self.policy.sample(state_batch)
qf1_pi, qf2_pi = self.critic(state_batch, pi)
min_qf_pi = torch.min(qf1_pi, qf2_pi)
policy_loss = ((self.alpha * log_pi) - min_qf_pi).mean() # Jπ = 𝔼st∼D,εt∼N[α * logπ(f(εt;st)|st) − Q(st,f(εt;st))]
if s_flag == 1 and self.sys_weight != 0:
p_next_state = self.sysmodel(state_batch,pi)
p_next_state = p_next_state.clamp(self.obs_lower_bound,self.obs_upper_bound)
p_next_r = self.sysr(state_batch,p_next_state.detach(),pi)
pi2, log_pi2, _ = self.policy.sample(p_next_state.detach())
p_next_state2 = self.sysmodel(p_next_state,pi2)
p_next_state2 = p_next_state2.clamp(self.obs_lower_bound,self.obs_upper_bound)
p_next_r2 = self.sysr(p_next_state.detach(),p_next_state2.detach(),pi2)
pi3, log_pi3, _ = self.policy.sample(p_next_state2.detach())
qf3_pi, qf4_pi = self.critic(p_next_state2.detach(), pi3)
min_qf_pi2 = torch.min(qf3_pi, qf4_pi)
#sys_loss = (-p_next_r -self.gamma * p_next_r2 + self.gamma ** 2 * ((self.alpha * log_pi3) - min_qf_pi2)).mean()
sys_loss = (-p_next_r + self.alpha * log_pi - self.gamma * (p_next_r2 - self.alpha * log_pi2) + self.gamma ** 2 * ((self.alpha * log_pi3) - min_qf_pi2)).mean()
policy_loss += self.sys_weight * sys_loss
self.update_sys += 1
self.policy_optim.zero_grad()
policy_loss.backward()
self.policy_optim.step()
if self.automatic_entropy_tuning:
alpha_loss = -(self.log_alpha * (log_pi + self.target_entropy).detach()).mean()
self.alpha_optim.zero_grad()
alpha_loss.backward()
self.alpha_optim.step()
self.alpha = self.log_alpha.exp()
alpha_tlogs = self.alpha.clone() # For TensorboardX logs
else:
alpha_loss = torch.tensor(0.).to(self.device)
alpha_tlogs = torch.tensor(self.alpha) # For TensorboardX logs
if updates % self.target_update_interval == 0:
soft_update(self.critic_target, self.critic, self.tau)
return qf1_loss.item(), qf2_loss.item(), policy_loss.item(), alpha_loss.item(), alpha_tlogs.item()
# Save model parameters
def save(self, filename):
torch.save(self.critic.state_dict(), filename + "_critic")
torch.save(self.critic_optim.state_dict(), filename + "_critic_optimizer")
torch.save(self.policy.state_dict(), filename + "_actor")
torch.save(self.policy_optim.state_dict(), filename + "_actor_optimizer")
torch.save(self.sysmodel.state_dict(), filename + "_sysmodel")
torch.save(self.sysmodel_optimizer.state_dict(), filename + "_sysmodel_optimizer")
torch.save(self.sysr.state_dict(), filename + "_reward_model")
torch.save(self.sysr_optimizer.state_dict(), filename + "_reward_model_optimizer")
def load(self, filename):
self.critic.load_state_dict(torch.load(filename + "_critic.pth"))
self.critic_optim.load_state_dict(torch.load(filename + "_critic_optimizer"))
self.critic_target = copy.deepcopy(self.critic)
self.policy.load_state_dict(torch.load(filename + "_actor.pth"))
self.policy_optim.load_state_dict(torch.load(filename + "_actor_optimizer"))
self.sysmodel.load_state_dict(torch.load(filename + "_sysmodel.pth"))
relf.sysmodel_optimizer.load_state_dict(torch.load(filename + "_sysmodel_optimizer"))
self.sysr.load_state_dict(torch.load(filename + "_reward_model.pth"))
relf.sysr_optimizer.load_state_dict(torch.load(filename + "_reward_model_optimizer"))
| 8,966 | 47.733696 | 172 |
py
|
SIVAND
|
SIVAND-master/MyDD.py
|
import DD
import helper as hp
###############################################################
g_model = None
g_original_method_name = None
g_predicted_method_name = None
g_all_data = []
g_cnt_dict = {}
g_cnt_pass = [0, 0, 0]
###############################################################
class MyDD(DD.DD):
def __init__(self):
DD.DD.__init__(self)
def _test(self, _deltas):
if not _deltas:
return self.PASS
try:
g_cnt_pass[0] = g_cnt_pass[0] + 1
_code = hp.deltas_to_code(_deltas)
hp.store_method(hp.g_simp_file, _code)
if hp.is_parsable(_code):
g_cnt_pass[1] = g_cnt_pass[1] + 1
_predict, _score, _loss = hp.prediction_with_M(g_model, hp.g_simp_file)
_time = hp.get_current_time()
print('time = {}, predict = {}, score = {}, loss = {}'.format(_time, _predict, _score, _loss))
if _predict == g_predicted_method_name:
g_cnt_pass[2] = g_cnt_pass[2] + 1
_data = hp.get_json_data(_time, _score, _loss, _code, _deltas[:], g_cnt_pass)
g_all_data.append("{}".format(_data))
return self.FAIL
except Exception:
pass
return self.PASS
if __name__ == '__main__':
g_model = hp.load_model_M()
assert g_model is not None
# do for each file
file_list = hp.get_file_list()
for idx, file_path in enumerate(file_list):
print("\nStart [{}]: {}\n".format(idx + 1, file_path))
g_all_data.clear()
g_cnt_pass = [0, 0, 0]
try:
# get method_name and method_body
g_all_data.append("\npath = {}".format(file_path))
method_name, method_body = hp.load_method(file_path)
assert (len(method_name) > 0) and (len(method_body) > 0)
g_cnt_dict[method_name] = g_cnt_dict.get(method_name, 0) + 1
g_all_data.append("method_name = {}".format(method_name))
g_all_data.append("method_body = {}".format(method_body))
hp.store_method(hp.g_simp_file, method_body)
# check predicted method_name
g_original_method_name = method_name
predict, score, loss = hp.prediction_with_M(g_model, hp.g_simp_file)
g_predicted_method_name = predict
g_all_data.append("predict, score, loss = {}, {}, {}".format(predict, score, loss))
assert g_original_method_name == g_predicted_method_name
# create deltas by char/token
deltas = []
if hp.g_deltas_type == "token":
deltas = hp.get_token_deltas(method_body)
else:
deltas = hp.get_char_deltas(method_body)
# run ddmin to simplify program
try:
mydd = MyDD()
print("Simplifying prediction-preserving input...")
g_all_data.append("\nTrace of simplified code(s):")
c = mydd.ddmin(deltas) # Invoke DDMIN
print("The 1-minimal prediction-preserving input is", c)
print("Removing any element will make the prediction go away.")
program = hp.deltas_to_code(c)
g_all_data.append("\nMinimal simplified code:\n{}".format(program))
except Exception as e:
g_all_data.append("\nException:\n{}".format(str(e)))
# save all simplified traces
save_name = "L{}_{}_{}.txt".format(str(idx + 1), method_name, g_cnt_dict[method_name])
output_file = "dd_data/{}".format(save_name)
hp.save_simplified_code(g_all_data, output_file)
print("\nDone [{}]: {}\n".format(idx + 1, file_path))
except:
print("\nError [{}]: {}\n".format(idx + 1, file_path))
g_model.close_session()
| 3,896 | 37.584158 | 110 |
py
|
SIVAND
|
SIVAND-master/DD.py
|
#! /usr/bin/env python
# $Id: DD.py,v 1.2 2001/11/05 19:53:33 zeller Exp $
# Enhanced Delta Debugging class
# Copyright (c) 1999, 2000, 2001 Andreas Zeller.
# This module (written in Python) implements the base delta debugging
# algorithms and is at the core of all our experiments. This should
# easily run on any platform and any Python version since 1.6.
#
# To plug this into your system, all you have to do is to create a
# subclass with a dedicated `test()' method. Basically, you would
# invoke the DD test case minimization algorithm (= the `ddmin()'
# method) with a list of characters; the `test()' method would combine
# them to a document and run the test. This should be easy to realize
# and give you some good starting results; the file includes a simple
# sample application.
#
# This file is in the public domain; feel free to copy, modify, use
# and distribute this software as you wish - with one exception.
# Passau University has filed a patent for the use of delta debugging
# on program states (A. Zeller: `Isolating cause-effect chains',
# Saarland University, 2001). The fact that this file is publicly
# available does not imply that I or anyone else grants you any rights
# related to this patent.
#
# The use of Delta Debugging to isolate failure-inducing code changes
# (A. Zeller: `Yesterday, my program worked', ESEC/FSE 1999) or to
# simplify failure-inducing input (R. Hildebrandt, A. Zeller:
# `Simplifying failure-inducing input', ISSTA 2000) is, as far as I
# know, not covered by any patent, nor will it ever be. If you use
# this software in any way, I'd appreciate if you include a citation
# such as `This software uses the delta debugging algorithm as
# described in (insert one of the papers above)'.
#
# All about Delta Debugging is found at the delta debugging web site,
#
# http://www.st.cs.uni-sb.de/dd/
#
# Happy debugging,
#
# Andreas Zeller
# Start with some helpers.
class OutcomeCache:
# This class holds test outcomes for configurations. This avoids
# running the same test twice.
# The outcome cache is implemented as a tree. Each node points
# to the outcome of the remaining list.
#
# Example: ([1, 2, 3], PASS), ([1, 2], FAIL), ([1, 4, 5], FAIL):
#
# (2, FAIL)--(3, PASS)
# /
# (1, None)
# \
# (4, None)--(5, FAIL)
def __init__(self):
self.tail = {} # Points to outcome of tail
self.result = None # Result so far
def add(self, c, result):
"""Add (C, RESULT) to the cache. C must be a list of scalars."""
cs = c[:]
cs.sort()
p = self
for start in range(len(c)):
if c[start] not in p.tail:
p.tail[c[start]] = OutcomeCache()
p = p.tail[c[start]]
p.result = result
def lookup(self, c):
"""Return RESULT if (C, RESULT) is in the cache; None, otherwise."""
p = self
for start in range(len(c)):
if c[start] not in p.tail:
return None
p = p.tail[c[start]]
return p.result
def lookup_superset(self, c, start=0):
"""Return RESULT if there is some (C', RESULT) in the cache with
C' being a superset of C or equal to C. Otherwise, return None."""
# FIXME: Make this non-recursive!
if start >= len(c):
if self.result:
return self.result
elif self.tail != {}:
# Select some superset
superset = self.tail[self.tail.keys()[0]]
return superset.lookup_superset(c, start + 1)
else:
return None
if c[start] in self.tail:
return self.tail[c[start]].lookup_superset(c, start + 1)
# Let K0 be the largest element in TAIL such that K0 <= C[START]
k0 = None
for k in self.tail.keys():
if (k0 is None or k > k0) and k <= c[start]:
k0 = k
if k0 is not None:
return self.tail[k0].lookup_superset(c, start)
return None
def lookup_subset(self, c):
"""Return RESULT if there is some (C', RESULT) in the cache with
C' being a subset of C or equal to C. Otherwise, return None."""
p = self
for start in range(len(c)):
if c[start] in p.tail:
p = p.tail[c[start]]
return p.result
# Test the outcome cache
def oc_test():
oc = OutcomeCache()
assert oc.lookup([1, 2, 3]) is None
oc.add([1, 2, 3], 4)
assert oc.lookup([1, 2, 3]) == 4
assert oc.lookup([1, 2, 3, 4]) is None
assert oc.lookup([5, 6, 7]) is None
oc.add([5, 6, 7], 8)
assert oc.lookup([5, 6, 7]) == 8
assert oc.lookup([]) is None
oc.add([], 0)
assert oc.lookup([]) == 0
assert oc.lookup([1, 2]) is None
oc.add([1, 2], 3)
assert oc.lookup([1, 2]) == 3
assert oc.lookup([1, 2, 3]) == 4
assert oc.lookup_superset([1]) == 3 or oc.lookup_superset([1]) == 4
assert oc.lookup_superset([1, 2]) == 3 or oc.lookup_superset([1, 2]) == 4
assert oc.lookup_superset([5]) == 8
assert oc.lookup_superset([5, 6]) == 8
assert oc.lookup_superset([6, 7]) == 8
assert oc.lookup_superset([7]) == 8
assert oc.lookup_superset([]) is not None
assert oc.lookup_superset([9]) is None
assert oc.lookup_superset([7, 9]) is None
assert oc.lookup_superset([-5, 1]) is None
assert oc.lookup_superset([1, 2, 3, 9]) is None
assert oc.lookup_superset([4, 5, 6, 7]) is None
assert oc.lookup_subset([]) == 0
assert oc.lookup_subset([1, 2, 3]) == 4
assert oc.lookup_subset([1, 2, 3, 4]) == 4
assert oc.lookup_subset([1, 3]) is None
assert oc.lookup_subset([1, 2]) == 3
assert oc.lookup_subset([-5, 1]) is None
assert oc.lookup_subset([-5, 1, 2]) == 3
assert oc.lookup_subset([-5]) == 0
# Main Delta Debugging algorithm.
class DD:
# Delta debugging base class. To use this class for a particular
# setting, create a subclass with an overloaded `test()' method.
#
# Main entry points are:
# - `ddmin()' which computes a minimal failure-inducing configuration, and
# - `dd()' which computes a minimal failure-inducing difference.
#
# See also the usage sample at the end of this file.
#
# For further fine-tuning, you can implement an own `resolve()'
# method (tries to add or remove configuration elements in case of
# inconsistencies), or implement an own `split()' method, which
# allows you to split configurations according to your own
# criteria.
#
# The class includes other previous delta debugging alorithms,
# which are obsolete now; they are only included for comparison
# purposes.
# Test outcomes.
PASS = "PASS"
FAIL = "FAIL"
UNRESOLVED = "UNRESOLVED"
# Resolving directions.
ADD = "ADD" # Add deltas to resolve
REMOVE = "REMOVE" # Remove deltas to resolve
# Debugging output (set to 1 to enable)
debug_test = 0
debug_dd = 0
debug_split = 0
debug_resolve = 0
def __init__(self):
self.__resolving = 0
self.__last_reported_length = 0
self.monotony = 0
self.outcome_cache = OutcomeCache()
self.cache_outcomes = 1
self.minimize = 1
self.maximize = 1
self.assume_axioms_hold = 1
# Helpers
def __listminus(self, c1, c2):
"""Return a list of all elements of C1 that are not in C2."""
s2 = {}
for delta in c2:
s2[delta] = 1
c = []
for delta in c1:
if delta not in s2:
c.append(delta)
return c
def __listintersect(self, c1, c2):
"""Return the common elements of C1 and C2."""
s2 = {}
for delta in c2:
s2[delta] = 1
c = []
for delta in c1:
if delta in s2:
c.append(delta)
return c
def __listunion(self, c1, c2):
"""Return the union of C1 and C2."""
s1 = {}
for delta in c1:
s1[delta] = 1
c = c1[:]
for delta in c2:
if delta not in s1:
c.append(delta)
return c
def __listsubseteq(self, c1, c2):
"""Return 1 if C1 is a subset or equal to C2."""
s2 = {}
for delta in c2:
s2[delta] = 1
for delta in c1:
if delta not in s2:
return 0
return 1
# Output
def coerce(self, c):
"""Return the configuration C as a compact string"""
# Default: use printable representation
return repr(c)
def pretty(self, c):
"""Like coerce(), but sort beforehand"""
sorted_c = c[:]
sorted_c.sort()
return self.coerce(sorted_c)
# Testing
def test(self, c):
"""Test the configuration C. Return PASS, FAIL, or UNRESOLVED"""
c.sort()
# If we had this test before, return its result
if self.cache_outcomes:
cached_result = self.outcome_cache.lookup(c)
if cached_result is not None:
return cached_result
if self.monotony:
# Check whether we had a passing superset of this test before
cached_result = self.outcome_cache.lookup_superset(c)
if cached_result == self.PASS:
return self.PASS
cached_result = self.outcome_cache.lookup_subset(c)
if cached_result == self.FAIL:
return self.FAIL
if self.debug_test:
print()
print("test(" + self.coerce(c) + ")...")
outcome = self._test(c)
if self.debug_test:
print("test(" + self.coerce(c) + ") = " + repr(outcome))
if self.cache_outcomes:
self.outcome_cache.add(c, outcome)
return outcome
def _test(self, c):
"""Stub to overload in subclasses"""
return self.UNRESOLVED # Placeholder
# Splitting
def split(self, c, n):
"""Split C into [C_1, C_2, ..., C_n]."""
if self.debug_split:
print("split(" + self.coerce(c) + ", " + repr(n) + ")...")
outcome = self._split(c, n)
if self.debug_split:
print("split(" + self.coerce(c) + ", " + repr(n) + ") = " + repr(outcome))
return outcome
def _split(self, c, n):
"""Stub to overload in subclasses"""
subsets = []
start = 0
for i in range(n):
subset = c[start:start + (len(c) - start) // (n - i)]
subsets.append(subset)
start = start + len(subset)
return subsets
# Resolving
def resolve(self, csub, c, direction):
"""If direction == ADD, resolve inconsistency by adding deltas
to CSUB. Otherwise, resolve by removing deltas from CSUB."""
if self.debug_resolve:
print("resolve(" + repr(csub) + ", " + self.coerce(c) + ", " + repr(direction) + ")...")
outcome = self._resolve(csub, c, direction)
if self.debug_resolve:
print("resolve(" + repr(csub) + ", " + self.coerce(c) + ", " + repr(direction) + ") = " + repr(outcome))
return outcome
def _resolve(self, csub, c, direction):
"""Stub to overload in subclasses."""
# By default, no way to resolve
return None
# Test with fixes
def test_and_resolve(self, csub, r, c, direction):
"""Repeat testing CSUB + R while unresolved."""
initial_csub = csub[:]
c2 = self.__listunion(r, c)
csubr = self.__listunion(csub, r)
t = self.test(csubr)
# necessary to use more resolving mechanisms which can reverse each
# other, can (but needn't) be used in subclasses
self._resolve_type = 0
while t == self.UNRESOLVED:
self.__resolving = 1
csubr = self.resolve(csubr, c, direction)
if csubr is None:
# Nothing left to resolve
break
if len(csubr) >= len(c2):
# Added everything: csub == c2. ("Upper" Baseline)
# This has already been tested.
csubr = None
break
if len(csubr) <= len(r):
# Removed everything: csub == r. (Baseline)
# This has already been tested.
csubr = None
break
t = self.test(csubr)
self.__resolving = 0
if csubr is None:
return self.UNRESOLVED, initial_csub
# assert t == self.PASS or t == self.FAIL
csub = self.__listminus(csubr, r)
return t, csub
# Inquiries
def resolving(self):
"""Return 1 while resolving."""
return self.__resolving
# Logging
def report_progress(self, c, title):
if len(c) != self.__last_reported_length:
print()
print(title + ": " + repr(len(c)) + " deltas left:", self.coerce(c))
self.__last_reported_length = len(c)
# Delta Debugging (old ESEC/FSE version)
def old_dd(self, c, r=[], n=2):
"""Return the failure-inducing subset of C"""
assert self.test([]) == self.PASS
assert self.test(c) == self.FAIL
if self.debug_dd:
print("dd(" + self.pretty(c) + ", " + repr(r) + ", " + repr(n) + ")...")
outcome = self._old_dd(c, r, n)
if self.debug_dd:
print("dd(" + self.pretty(c) + ", " + repr(r) + ", " + repr(n) + ") = " + repr(outcome))
return outcome
def _old_dd(self, c, r, n):
"""Stub to overload in subclasses"""
if r == []:
assert self.test([]) == self.PASS
assert self.test(c) == self.FAIL
else:
assert self.test(r) != self.FAIL
assert self.test(c + r) != self.PASS
assert self.__listintersect(c, r) == []
if len(c) == 1:
# Nothing to split
return c
run = 1
next_c = c[:]
next_r = r[:]
# We replace the tail recursion from the paper by a loop
while 1:
self.report_progress(c, "dd")
cs = self.split(c, n)
print()
print("dd (run #" + repr(run) + "): trying", end=' ')
for i in range(n):
if i > 0:
print("+", end=' ')
print(len(cs[i]), end=' ')
print()
# Check subsets
ts = []
for i in range(n):
if self.debug_dd:
print("dd: trying cs[" + repr(i) + "] =", self.pretty(cs[i]))
t, cs[i] = self.test_and_resolve(cs[i], r, c, self.REMOVE)
ts.append(t)
if t == self.FAIL:
# Found
if self.debug_dd:
print("dd: found", len(cs[i]), "deltas:", end=' ')
print(self.pretty(cs[i]))
return self.dd(cs[i], r)
# Check complements
cbars = []
tbars = []
for i in range(n):
cbar = self.__listminus(c, cs[i] + r)
tbar, cbar = self.test_and_resolve(cbar, r, c, self.ADD)
doubled = self.__listintersect(cbar, cs[i])
if doubled != []:
cs[i] = self.__listminus(cs[i], doubled)
cbars.append(cbar)
tbars.append(tbar)
if ts[i] == self.PASS and tbars[i] == self.PASS:
# Interference
if self.debug_dd:
print("dd: interference of", self.pretty(cs[i]), end=' ')
print("and", self.pretty(cbars[i]))
d = self.dd(cs[i][:], cbars[i] + r)
dbar = self.dd(cbars[i][:], cs[i] + r)
return d + dbar
if ts[i] == self.UNRESOLVED and tbars[i] == self.PASS:
# Preference
if self.debug_dd:
print("dd: preferring", len(cs[i]), "deltas:", end=' ')
print(self.pretty(cs[i]))
return self.dd(cs[i][:], cbars[i] + r)
if ts[i] == self.PASS or tbars[i] == self.FAIL:
if self.debug_dd:
excluded = self.__listminus(next_c, cbars[i])
print("dd: excluding", len(excluded), "deltas:", end=' ')
print(self.pretty(excluded))
if ts[i] == self.PASS:
next_r = self.__listunion(next_r, cs[i])
next_c = self.__listintersect(next_c, cbars[i])
self.report_progress(next_c, "dd")
next_n = min(len(next_c), n * 2)
if next_n == n and next_c[:] == c[:] and next_r[:] == r[:]:
# Nothing left
if self.debug_dd:
print("dd: nothing left")
return next_c
# Try again
if self.debug_dd:
print("dd: try again")
c = next_c
r = next_r
n = next_n
run = run + 1
def test_mix(self, csub, c, direction):
if self.minimize:
(t, csub) = self.test_and_resolve(csub, [], c, direction)
if t == self.FAIL:
return (t, csub)
if self.maximize:
csubbar = self.__listminus(self.CC, csub)
cbar = self.__listminus(self.CC, c)
if direction == self.ADD:
directionbar = self.REMOVE
else:
directionbar = self.ADD
(tbar, csubbar) = self.test_and_resolve(csubbar, [], cbar, directionbar)
csub = self.__listminus(self.CC, csubbar)
if tbar == self.PASS:
t = self.FAIL
elif tbar == self.FAIL:
t = self.PASS
else:
t = self.UNRESOLVED
return (t, csub)
# Delta Debugging (new ISSTA version)
def ddgen(self, c, minimize, maximize):
"""Return a 1-minimal failing subset of C"""
self.minimize = minimize
self.maximize = maximize
n = 2
self.CC = c
if self.debug_dd:
print("dd(" + self.pretty(c) + ", " + repr(n) + ")...")
outcome = self._dd(c, n)
if self.debug_dd:
print("dd(" + self.pretty(c) + ", " + repr(n) + ") = " + repr(outcome))
return outcome
def _dd(self, c, n):
"""Stub to overload in subclasses"""
assert self.test([]) == self.PASS
run = 1
cbar_offset = 0
# We replace the tail recursion from the paper by a loop
while 1:
tc = self.test(c)
assert tc == self.FAIL or tc == self.UNRESOLVED
if n > len(c):
# No further minimizing
print("dd: done")
return c
self.report_progress(c, "dd")
cs = self.split(c, n)
print()
print("dd (run #" + repr(run) + "): trying", end=' ')
for i in range(n):
if i > 0:
print("+", end=' ')
print(len(cs[i]), end=' ')
print()
c_failed = 0
cbar_failed = 0
next_c = c[:]
next_n = n
# Check subsets
for i in range(n):
if self.debug_dd:
print("dd: trying", self.pretty(cs[i]))
(t, cs[i]) = self.test_mix(cs[i], c, self.REMOVE)
if t == self.FAIL:
# Found
if self.debug_dd:
print("dd: found", len(cs[i]), "deltas:", end=' ')
print(self.pretty(cs[i]))
c_failed = 1
next_c = cs[i]
next_n = 2
cbar_offset = 0
self.report_progress(next_c, "dd")
break
if not c_failed:
# Check complements
cbars = n * [self.UNRESOLVED]
# print("cbar_offset =", cbar_offset)
for j in range(n):
i = (j + int(cbar_offset)) % n
cbars[i] = self.__listminus(c, cs[i])
t, cbars[i] = self.test_mix(cbars[i], c, self.ADD)
doubled = self.__listintersect(cbars[i], cs[i])
if doubled != []:
cs[i] = self.__listminus(cs[i], doubled)
if t == self.FAIL:
if self.debug_dd:
print("dd: reduced to", len(cbars[i]), end=' ')
print("deltas:", end=' ')
print(self.pretty(cbars[i]))
cbar_failed = 1
next_c = self.__listintersect(next_c, cbars[i])
next_n = next_n - 1
self.report_progress(next_c, "dd")
# In next run, start removing the following subset
cbar_offset = i
break
if not c_failed and not cbar_failed:
if n >= len(c):
# No further minimizing
print("dd: done")
return c
next_n = min(len(c), n * 2)
print("dd: increase granularity to", next_n)
cbar_offset = (cbar_offset * next_n) / n
c = next_c
n = next_n
run = run + 1
def ddmin(self, c):
return self.ddgen(c, 1, 0)
def ddmax(self, c):
return self.ddgen(c, 0, 1)
def ddmix(self, c):
return self.ddgen(c, 1, 1)
# General delta debugging (new TSE version)
def dddiff(self, c):
n = 2
if self.debug_dd:
print("dddiff(" + self.pretty(c) + ", " + repr(n) + ")...")
outcome = self._dddiff([], c, n)
if self.debug_dd:
print("dddiff(" + self.pretty(c) + ", " + repr(n) + ") = " + repr(outcome))
return outcome
def _dddiff(self, c1, c2, n):
run = 1
cbar_offset = 0
# We replace the tail recursion from the paper by a loop
while 1:
if self.debug_dd:
print("dd: c1 =", self.pretty(c1))
print("dd: c2 =", self.pretty(c2))
if self.assume_axioms_hold:
t1 = self.PASS
t2 = self.FAIL
else:
t1 = self.test(c1)
t2 = self.test(c2)
assert t1 == self.PASS
assert t2 == self.FAIL
assert self.__listsubseteq(c1, c2)
c = self.__listminus(c2, c1)
if self.debug_dd:
print("dd: c2 - c1 =", self.pretty(c))
if n > len(c):
# No further minimizing
print("dd: done")
return (c, c1, c2)
self.report_progress(c, "dd")
cs = self.split(c, n)
print()
print("dd (run #" + repr(run) + "): trying", end=' ')
for i in range(n):
if i > 0:
print("+", end=' ')
print(len(cs[i]), end=' ')
print()
progress = 0
next_c1 = c1[:]
next_c2 = c2[:]
next_n = n
# Check subsets
for j in range(n):
i = (j + int(cbar_offset)) % n
if self.debug_dd:
print("dd: trying", self.pretty(cs[i]))
(t, csub) = self.test_and_resolve(cs[i], c1, c, self.REMOVE)
csub = self.__listunion(c1, csub)
if t == self.FAIL and t1 == self.PASS:
# Found
progress = 1
next_c2 = csub
next_n = 2
cbar_offset = 0
if self.debug_dd:
print("dd: reduce c2 to", len(next_c2), "deltas:", end=' ')
print(self.pretty(next_c2))
break
if t == self.PASS and t2 == self.FAIL:
# Reduce to complement
progress = 1
next_c1 = csub
next_n = max(next_n - 1, 2)
cbar_offset = i
if self.debug_dd:
print("dd: increase c1 to", len(next_c1), "deltas:", end=' ')
print(self.pretty(next_c1))
break
csub = self.__listminus(c, cs[i])
(t, csub) = self.test_and_resolve(csub, c1, c, self.ADD)
csub = self.__listunion(c1, csub)
if t == self.PASS and t2 == self.FAIL:
# Found
progress = 1
next_c1 = csub
next_n = 2
cbar_offset = 0
if self.debug_dd:
print("dd: increase c1 to", len(next_c1), "deltas:", end=' ')
print(self.pretty(next_c1))
break
if t == self.FAIL and t1 == self.PASS:
# Increase
progress = 1
next_c2 = csub
next_n = max(next_n - 1, 2)
cbar_offset = i
if self.debug_dd:
print("dd: reduce c2 to", len(next_c2), "deltas:", end=' ')
print(self.pretty(next_c2))
break
if progress:
self.report_progress(self.__listminus(next_c2, next_c1), "dd")
else:
if n >= len(c):
# No further minimizing
print("dd: done")
return (c, c1, c2)
next_n = min(len(c), n * 2)
print("dd: increase granularity to", next_n)
cbar_offset = (cbar_offset * next_n) / n
c1 = next_c1
c2 = next_c2
n = next_n
run = run + 1
def dd(self, c):
return self.dddiff(c) # Backwards compatibility
if __name__ == '__main__':
# Test the outcome cache
oc_test()
# Define our own DD class, with its own test method
class MyDD(DD):
def _test_a(self, c):
"""Test the configuration C. Return PASS, FAIL, or UNRESOLVED."""
# Just a sample
# if 2 in c and not 3 in c:
# return self.UNRESOLVED
# if 3 in c and not 7 in c:
# return self.UNRESOLVED
if 7 in c and not 2 in c:
return self.UNRESOLVED
if 5 in c and 8 in c:
return self.FAIL
return self.PASS
def _test_b(self, c):
if c == []:
return self.PASS
if 1 in c and 2 in c and 3 in c and 4 in c and \
5 in c and 6 in c and 7 in c and 8 in c:
return self.FAIL
return self.UNRESOLVED
def _test_c(self, c):
if 1 in c and 2 in c and 3 in c and 4 in c and \
6 in c and 8 in c:
if 5 in c and 7 in c:
return self.UNRESOLVED
else:
return self.FAIL
if 1 in c or 2 in c or 3 in c or 4 in c or \
6 in c or 8 in c:
return self.UNRESOLVED
return self.PASS
def __init__(self):
self._test = self._test_c
DD.__init__(self)
print("WYNOT - a tool for delta debugging.")
mydd = MyDD()
# mydd.debug_test = 1 # Enable debugging output
# mydd.debug_dd = 1 # Enable debugging output
# mydd.debug_split = 1 # Enable debugging output
# mydd.debug_resolve = 1 # Enable debugging output
# mydd.cache_outcomes = 0
# mydd.monotony = 0
print("Minimizing failure-inducing input...")
c = mydd.ddmin([1, 2, 3, 4, 5, 6, 7, 8]) # Invoke DDMIN
print("The 1-minimal failure-inducing input is", c)
print("Removing any element will make the failure go away.")
print()
print("Computing the failure-inducing difference...")
(c, c1, c2) = mydd.dd([1, 2, 3, 4, 5, 6, 7, 8]) # Invoke DD
print("The 1-minimal failure-inducing difference is", c)
print(c1, "passes,", c2, "fails")
# Local Variables:
# mode: python
# End:
| 28,834 | 30.342391 | 116 |
py
|
SIVAND
|
SIVAND-master/helper.py
|
import pandas as pd
import subprocess
import javalang
from datetime import datetime
import json
###############################################################
g_deltas_types = ["token", "char"]
g_simp_file = "data/tmp/sm_test.java"
JAR_LOAD_JAVA_METHOD = "others/LoadJavaMethod/target/jar/LoadJavaMethod.jar"
# TODO - update file_path and delta_type
g_test_file = "data/selected_file/mn_c2x/c2x_jl_test_correct_prediction_samefile.txt"
g_deltas_type = g_deltas_types[0]
###############################################################
def get_file_list():
file_list = []
try:
df = pd.read_csv(g_test_file)
file_list = df["path"].tolist()[:1000]
except Exception:
pass
return file_list
def get_current_time():
return str(datetime.now())
def get_char_deltas(program):
data = list(program) # ['a',...,'z']
deltas = list(zip(range(len(data)), data)) # [('a',0), ..., ('z',n)]
return deltas
def get_token_deltas(program):
token, tokens = "", []
for c in program:
if not c.isalpha():
tokens.append(token)
tokens.append(c)
token = ""
else:
token = token + c
tokens.append(token)
tokens = [token for token in tokens if len(token) != 0]
deltas = list(zip(range(len(tokens)), tokens))
return deltas
def deltas_to_code(d):
return "".join([c[1] for c in d])
def is_parsable(code):
try:
# Example: check whether <code> (JAVA program) is parsable
tree = javalang.parse.parse("class Test { " + code + " }")
assert tree is not None
except Exception:
return False
return True
def get_json_data(time, score, loss, code, tokens=None, n_pass=None):
score, loss = str(round(float(score), 4)), str(round(float(loss), 4))
data = {'time': time, 'score': score, 'loss': loss, 'code': code}
if tokens:
data['n_tokens'] = len(tokens)
if n_pass:
data['n_pass'] = n_pass
j_data = json.dumps(data)
return j_data
###############################################################
def load_model_M(model_path=""):
model = None
# TODO: load target model from <model_path>
# Example: check <models/dd-M/dd_M.py>
return model
def prediction_with_M(model, file_path):
pred, score, loss = None, None, None
# TODO: preprocess <file_path> and evaluate with <model>
# and get predicted name, score, and loss
# Example: check <models/dd-M/sm_helper.py>
return pred, score, loss
###############################################################
def load_method(file_path):
try:
# Example: extract name and body from method of JAVA program.
cmd = ['java', '-jar', JAR_LOAD_JAVA_METHOD, file_path]
contents = subprocess.check_output(cmd, encoding="utf-8", close_fds=True)
contents = contents.split()
method_name = contents[0]
method_body = " ".join(contents[1:])
return method_name, method_body
except Exception:
return "", ""
def store_method(sm_file, method_body):
with open(sm_file, "w") as f:
f.write(method_body + "\n")
def save_simplified_code(all_methods, output_file):
open(output_file, 'w').close()
with open(output_file, 'a') as f:
for jCode in all_methods:
print(jCode)
f.write(jCode + "\n")
f.write("\n")
| 3,405 | 26.031746 | 85 |
py
|
SIVAND
|
SIVAND-master/others/load_java_method.py
|
import subprocess
from pathlib import Path
JAR_LOAD_JAVA_METHOD = "LoadJavaMethod/target/jar/LoadJavaMethod.jar"
def load_method(file_path):
cmd = ['java', '-jar', JAR_LOAD_JAVA_METHOD, file_path]
contents = subprocess.check_output(cmd, encoding="utf-8", close_fds=True)
contents = contents.split()
name, body = contents[0], " ".join(contents[1:])
return name, body
if __name__ == '__main__':
input_path = 'sample_input.java'
program = Path(input_path).read_text()
print("program:\n{}".format(program))
method_name, single_line = load_method(input_path)
print("method_name = {}".format(method_name))
print("single_line = {}".format(single_line))
| 695 | 30.636364 | 77 |
py
|
SIVAND
|
SIVAND-master/others/get_ast_nodes.py
|
import subprocess
from pathlib import Path
JAR_JAVA_AST_DATA = "GetAstNodes/target/jar/GetAstNodes.jar"
INNER_DELIMITER = " ___INNER___ "
OUTER_DELIMITER = " ___OUTER___ "
def get_ast_data(str_code):
cmd = ['java', '-jar', JAR_JAVA_AST_DATA, str_code]
content = subprocess.check_output(cmd, encoding="utf-8", close_fds=True)
[all_terminals, all_classes] = content.strip().split(OUTER_DELIMITER)
all_terminals = all_terminals.split(INNER_DELIMITER)
all_classes = all_classes.split(INNER_DELIMITER)
return all_terminals, all_classes
if __name__ == '__main__':
program = Path('sample_input.java').read_text()
print("program:\n{}".format(program))
ast_terminals, ast_classes = get_ast_data(program)
print("ast_terminals = {}".format(ast_terminals))
print("ast_classes = {}".format(ast_classes))
ast_nodes = list(set(ast_terminals + ast_classes))
print("all_nodes = {}".format(ast_nodes))
| 941 | 35.230769 | 76 |
py
|
SIVAND
|
SIVAND-master/others/get_tokens_java.py
|
import javalang
from pathlib import Path
def get_tokens(str_code):
tokens = list(javalang.tokenizer.tokenize(str_code))
tokens = [token.value for token in tokens]
return tokens
if __name__ == '__main__':
program = Path('sample_input.java').read_text()
print("program:\n{}".format(program))
print("tokens = {}".format(get_tokens(program)))
| 367 | 23.533333 | 56 |
py
|
SIVAND
|
SIVAND-master/models/dd-great/sm_helper.py
|
import os
import json
from datetime import datetime
#######################################
root_path = "/scratch/rabin/deployment/root-simplify/sm-great/"
vocabulary_path = root_path + "vocab.txt"
config_path = root_path + "config.yml"
vm_json_root_path = "/scratch/rabin/deployment/root-simplify/data_selection/vm_rnn_transformer/"
vm_json_paths = {
"rnn_buggy": vm_json_root_path + "buggy_correct_prediction_rnn_samefile.txt",
"rnn_nonbuggy": vm_json_root_path + "nonbuggy_correct_prediction_rnn_samefile.txt",
"transformer_buggy": vm_json_root_path + "buggy_correct_prediction_transformer_samefile.txt",
"transformer_nonbuggy": vm_json_root_path + "nonbuggy_correct_prediction_transformer_samefile.txt"
}
vm_json_path = None
vm_model_paths = {
"rnn": "/scratch/rabin/models/great/vm/rnn/checkpoints/",
"transformer": "/scratch/rabin/models/great/vm/transformer/checkpoints/"
}
vm_model_path = None
g_buggy_types = ["buggy", "nonbuggy"]
data_path = root_path + "sm_data/tmp/great/"
eval_tmp = data_path + "eval/tmp.txt"
dd_file = root_path + "sm_data/dd_data/{}"
# <modify here>
g_buggy_type = g_buggy_types[0]
g_dd_count = 1000
#######################################
sample_keys = ["has_bug", "source_tokens", "error_location", "repair_targets", "repair_candidates"]
marker_keys = ["error_location", "repair_targets", "repair_candidates"]
g_sample = {}
g_marker = {}
#######################################
def set_model_json_from_configuration(m_type):
global vm_json_path, vm_model_path
vm_json_path = vm_json_paths["{}_{}".format(m_type, g_buggy_type)]
vm_model_path = vm_model_paths[m_type]
def get_current_time():
return str(datetime.now())
def get_eval_txt_files():
txt_files = []
try:
vm_data_path = "/scratch/rabin/data/vm/great/"
eval_path = vm_data_path + "eval/"
txt_files = [eval_path + f for f in os.listdir(eval_path) if '.txt' in f]
except:
pass
return txt_files
def get_eval_tmp_json():
try:
with open(eval_tmp, 'r') as f:
for l in f:
if l.strip():
return json.loads(l)
except:
return ''
def set_eval_tmp_json(sample):
try:
with open(eval_tmp, 'w') as f:
json.dump(sample, f)
except:
pass
def check_eval_tmp(line, sample=None):
try:
if not sample:
sample = json.loads(line)
if g_buggy_type == g_buggy_types[0]: # buggy
assert len(sample["repair_targets"]) > 0
else:
assert len(sample["repair_targets"]) == 0
sample["repair_candidates"] = [t for t in sample["repair_candidates"] if isinstance(t, int)]
js_org = sample.copy()
set_eval_tmp_json(js_org.copy())
js_dup = get_eval_tmp_json()
if len(js_dup) > 0 and js_dup == js_org:
global g_sample, g_marker
g_sample, g_marker = get_marker_tokens(js_dup.copy())
return g_sample
except:
pass
return None
def get_marker_tokens(sample):
sample["marker_tokens"] = list(sample["source_tokens"])
sample["error_location"] = [sample["error_location"]]
marker_tokens = {k: [] for k in marker_keys}
for k in marker_keys:
for i, p in enumerate(sample[k]):
t = "<{}_{}>".format(k, i) + sample["marker_tokens"][p] + "<\\{}_{}>".format(k, i)
marker_tokens[k].append(t)
sample["marker_tokens"][p] = t
return sample, marker_tokens
def update_marker_positions(deltas):
sample = g_sample.copy()
sample["source_tokens"] = list(deltas)
sample["marker_tokens"] = list(deltas)
for k in marker_keys[::-1]:
sample[k] = []
for i, t in enumerate(g_marker[k]):
if t in deltas:
p = deltas.index(t)
sample[k].append(p)
t = t.replace("<{}_{}>".format(k, i), '').replace("<\\{}_{}>".format(k, i), '')
sample["source_tokens"][p] = t
deltas[p] = t
else:
return ""
sample["error_location"] = sample["error_location"][0]
return sample
def get_pretty_sample(sample):
sample.pop("edges")
sample.pop("marker_tokens")
# sample = json.dumps(sample, indent=4, separators=(',', ': '))
sample = json.dumps(sample)
return sample
def filter_keys(sample):
all_keys = list(sample.keys())
for k in all_keys:
if k not in sample_keys:
sample.pop(k)
return sample
def get_dd_json_data(time, n_pass, result, sample):
pred, loss, accuracy = result[0], result[1], result[2]
loc_pred, rep_pred, tar_pred = pred[0][0], pred[1][0], pred[2][0]
sample = filter_keys(sample.copy())
error_location_pred = [p for i, p in enumerate(loc_pred) if i in [sample["error_location"]]]
repair_targets_pred = [p for i, p in enumerate(rep_pred) if i in sample["repair_targets"]]
repair_candidates_pred = [p for i, p in enumerate(rep_pred) if i in sample["repair_candidates"]]
j_data = []
data1 = {"result": {
"time": time, "n_pass": n_pass, "n_token": len(sample["source_tokens"]),
"loss": loss, "accuracy": accuracy
}}
data2 = {"sample": {
"has_bug": sample["has_bug"],
"source_tokens": sample["source_tokens"]
}}
data3 = {"position": {
"error_location": sample["error_location"],
"repair_targets": sample["repair_targets"],
"repair_candidates": sample["repair_candidates"]
}}
data4 = {"prediction": {
"error_location": error_location_pred[0],
"repair_targets": repair_targets_pred,
"repair_candidates": repair_candidates_pred,
"target_probs": tar_pred
}}
for j in [data1, data2, data3, data4]:
j_data += [json.dumps(j)]
return j_data
def save_simplified_code(all_methods, output_file):
open(output_file, 'w').close()
with open(output_file, 'a') as f:
for jCode in all_methods:
print(jCode)
f.write(jCode)
f.write("\n")
| 6,095 | 30.42268 | 102 |
py
|
SIVAND
|
SIVAND-master/models/dd-great/attn_model.py
|
import sys
sys.path.append('.')
sys.path.insert(0, "..")
import yaml
import tensorflow as tf
from checkpoint_tracker import Tracker
from data import data_loader, vocabulary
from meta_model import VarMisuseModel
import sm_helper as hp
###############################################################
g_model = None
g_data = None
g_all_data = []
g_cnt_dict = {}
###############################################################
def evaluate_single_data(data, model):
losses, accs, preds, attns = [], [], [], [] # get_metrics()
for batch in data.batcher(mode='eval'):
tokens, edges, error_loc, repair_targets, repair_candidates = batch
token_mask = tf.clip_by_value(tf.reduce_sum(tokens, -1), 0, 1)
pointer_preds, attns = model(tokens, token_mask, edges, training=False)
batch_loss, batch_acc, batch_pred = model.get_loss(pointer_preds, token_mask, error_loc, repair_targets,
repair_candidates)
losses, accs, preds = batch_loss, batch_acc, batch_pred
break # single sample only
accs = [a.numpy().tolist() for a in accs]
losses = [l.numpy().tolist() for l in losses]
preds = [p.numpy().tolist() for p in preds]
attns = [a.numpy().tolist() for a in attns]
return [attns, preds, losses, accs]
###############################################################
if __name__ == '__main__':
config = yaml.safe_load(open(hp.config_path))
print("Configuration:", config)
hp.set_model_json_from_configuration(config["model"]["configuration"])
if hp.vm_model_path is None:
raise ValueError("Must provide a path to pre-trained models when running final evaluation")
data = data_loader.DataLoader(hp.data_path, config["data"], vocabulary.Vocabulary(hp.vocabulary_path))
model = VarMisuseModel(config['model'], data.vocabulary.vocab_dim)
model.run_dummy_input()
tracker = Tracker(model, hp.vm_model_path)
tracker.restore_single_ckpt()
# g_data/g_model for DD
g_data, g_model = data, model
# apply_dd_eval(g_data, g_model)
js_count, dd_count = 0, 0
with open(hp.vm_json_path) as file:
for line in file:
if not line.strip(): continue
js_count += 1
try:
print("\nStart: {}\n".format(js_count))
g_all_data.clear()
g_cnt_pass = [0, 0, 0]
sample = hp.check_eval_tmp(line.strip())
assert sample["results"]["model"] == config["model"]["configuration"]
results = evaluate_single_data(data, model)
if hp.g_buggy_type == hp.g_buggy_types[0]: # buggy
assert results[-1][0] == 0 and results[-1][-1] == 1.0
else: # nonbuggy
assert results[-1][0] == 1.0 and results[-1][1] == 0 and results[-1][2] == 0
# original sample
g_all_data.append("\nOriginal sample:\n")
g_all_data.append(hp.get_pretty_sample(sample.copy()))
# save filename
save_name = "{}___L{}.txt".format(sample["txt_file"], sample["js_count"])
# attentions of tokens
sample_tokens = sample["source_tokens"]
g_all_data.append("\n\nAll source tokens:\n")
g_all_data.append(str(sample_tokens))
sample_attns = results[0][0]
g_all_data.append("\n\nAll attention probs:\n")
g_all_data.append(str(sample_attns))
# top-k index
attn_idx = sorted(range(len(sample_attns)), key=lambda i: sample_attns[i], reverse=True)[:10]
topk_tokens = [sample_tokens[i] for i in attn_idx]
g_all_data.append("\n\nTop-k source tokens:\n")
g_all_data.append(str(topk_tokens))
topk_attns = [sample_attns[i] for i in attn_idx]
g_all_data.append("\n\nTop-k attention probs:\n")
g_all_data.append(str(topk_attns))
# print/save all simplified code
dd_count += 1
output_file = hp.dd_file.format(save_name)
hp.save_simplified_code(g_all_data, output_file)
print("\nDone: {}-{}\n".format(js_count, dd_count))
if dd_count >= hp.g_dd_count:
quit()
except Exception as e:
print("\nError: {}\n{}".format(js_count, str(e)))
| 4,510 | 38.570175 | 112 |
py
|
SIVAND
|
SIVAND-master/models/dd-great/dd_model.py
|
import sys
sys.path.append('.')
sys.path.insert(0, "..")
import yaml
import tensorflow as tf
from checkpoint_tracker import Tracker
from data import data_loader, vocabulary
from meta_model import VarMisuseModel
import DD
import sm_helper as hp
###############################################################
g_model = None
g_data = None
g_cnt_pass = [0, 0, 0]
g_all_data = []
g_cnt_dict = {}
###############################################################
def check_dd_results(d_result):
if hp.g_buggy_type == hp.g_buggy_types[0]: # buggy
return d_result[-1][0] == 0 and d_result[-1][-1] == 1.0
elif hp.g_buggy_type == hp.g_buggy_types[1]: # nonbuggy
return d_result[-1][0] == 1.0 and d_result[-1][1] == 0 and d_result[-1][2] == 0
else:
return False
class MyDD(DD.DD):
def __init__(self):
DD.DD.__init__(self)
def _test(self, deltas):
if not deltas:
return self.PASS
try:
g_cnt_pass[0] = g_cnt_pass[0] + 1
d_tokens = [d[1] for d in deltas]
d_sample = hp.update_marker_positions(d_tokens[:])
if len(d_sample) > 0: # TODO: Is parsable?
g_cnt_pass[1] = g_cnt_pass[1] + 1
hp.set_eval_tmp_json(d_sample)
d_result = evaluate_single_data(g_data, g_model)
time = hp.get_current_time()
print('time = {}, result = {}'.format(time, d_result))
if check_dd_results(d_result):
g_cnt_pass[2] = g_cnt_pass[2] + 1
hp.g_sample = d_sample.copy()
j_data = hp.get_dd_json_data(time, g_cnt_pass, d_result, d_sample.copy())
for j in j_data:
g_all_data.append(j)
g_all_data.append('\n')
return self.FAIL
except:
pass
return self.PASS
###############################################################
def evaluate_single_data(data, model):
losses, accs, preds = [], [], [] # get_metrics()
for batch in data.batcher(mode='eval'):
tokens, edges, error_loc, repair_targets, repair_candidates = batch
token_mask = tf.clip_by_value(tf.reduce_sum(tokens, -1), 0, 1)
pointer_preds, _ = model(tokens, token_mask, edges, training=False)
batch_loss, batch_acc, batch_pred = model.get_loss(pointer_preds, token_mask, error_loc, repair_targets,
repair_candidates)
losses, accs, preds = batch_loss, batch_acc, batch_pred
break # single sample only
accs = [a.numpy().tolist() for a in accs]
losses = [l.numpy().tolist() for l in losses]
preds = [p.numpy().tolist() for p in preds]
return [preds, losses, accs]
###############################################################
if __name__ == '__main__':
config = yaml.safe_load(open(hp.config_path))
print("Configuration:", config)
hp.set_model_json_from_configuration(config["model"]["configuration"])
if hp.vm_model_path is None:
raise ValueError("Must provide a path to pre-trained models when running final evaluation")
data = data_loader.DataLoader(hp.data_path, config["data"], vocabulary.Vocabulary(hp.vocabulary_path))
model = VarMisuseModel(config['model'], data.vocabulary.vocab_dim)
model.run_dummy_input()
tracker = Tracker(model, hp.vm_model_path)
tracker.restore_single_ckpt()
# g_data/g_model for DD
g_data, g_model = data, model
# apply_dd_eval(g_data, g_model)
js_count, dd_count = 0, 0
with open(hp.vm_json_path) as file:
for line in file:
if not line.strip(): continue
js_count += 1
try:
print("\nStart: {}\n".format(js_count))
g_all_data.clear()
g_cnt_pass = [0, 0, 0]
sample = hp.check_eval_tmp(line.strip())
assert sample["results"]["model"] == config["model"]["configuration"]
results = evaluate_single_data(data, model)
if hp.g_buggy_type == hp.g_buggy_types[0]: # buggy
assert results[-1][0] == 0 and results[-1][-1] == 1.0
else: # nonbuggy
assert results[-1][0] == 1.0 and results[-1][1] == 0 and results[-1][2] == 0
# original sample
g_all_data.append("\nOriginal sample:\n")
g_all_data.append(hp.get_pretty_sample(sample.copy()))
# save filename
save_name = "{}___L{}.txt".format(sample["txt_file"], sample["js_count"])
# create deltas by tokens
tokens = list(sample["marker_tokens"])
deltas = list(zip(range(len(tokens)), tokens))
try:
# run ddmin
mydd = MyDD()
print("Simplifying failure-inducing input...")
g_all_data.append("\n\nTrace of simplified code(s):\n")
c = mydd.ddmin(deltas) # Invoke DDMIN
print("The 1-minimal failure-inducing input is", c)
print("Removing any element will make the failure go away.")
# TODO: c to code?
c = [d[1] for d in c]
s = hp.update_marker_positions(c[:])
g_all_data.append("\n\nMinimal simplified tokens:\n")
g_all_data.append(str(s["source_tokens"]))
dd_count += 1
except Exception as e:
g_all_data.append("\n\nException:\n{}".format(str(e)))
# print/save all simplified code
output_file = hp.dd_file.format(save_name)
hp.save_simplified_code(g_all_data, output_file)
print("\nDone: {}-{}\n".format(js_count, dd_count))
if dd_count >= hp.g_dd_count:
quit()
except Exception as e:
print("\nError: {}\n{}".format(js_count, str(e)))
| 6,118 | 36.310976 | 112 |
py
|
SIVAND
|
SIVAND-master/models/dd-code2seq/dd_code2seq.py
|
from argparse import ArgumentParser
import numpy as np
import tensorflow as tf
from config import Config
from dd_model import Model
import DD
import sm_helper as hp
###############################################################
g_model = None
g_original_method_name = None
g_predicted_method_name = None
g_cnt_pass = [0, 0, 0]
g_all_data = []
g_cnt_dict = {}
###############################################################
def deltas_to_code(d):
s = "".join([c[1] for c in d])
return s
class MyDD(DD.DD):
def __init__(self):
DD.DD.__init__(self)
def _test(self, deltas):
if not deltas:
return self.PASS
try:
g_cnt_pass[0] = g_cnt_pass[0] + 1
code = deltas_to_code(deltas[:])
hp.store_method(hp.g_simp_file, code)
if hp.is_parsable(code, g_original_method_name, hp.g_simp_file):
g_cnt_pass[1] = g_cnt_pass[1] + 1
predict, score, loss = hp.prediction_with_c2s(g_model, hp.g_root_path, hp.g_simp_file)
time = hp.get_current_time()
print('time = {}, predict = {}, score = {}, loss = {}'.format(time, predict, score, loss))
if predict == g_predicted_method_name:
g_cnt_pass[2] = g_cnt_pass[2] + 1
j_data = hp.get_json_data(time, score, loss, code, deltas[:], g_cnt_pass)
g_all_data.append("{}".format(j_data))
return self.FAIL
except:
pass
return self.PASS
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("-d", "--data", dest="data_path",
help="path to preprocessed dataset", required=False)
parser.add_argument("-te", "--test", dest="test_path",
help="path to test file", metavar="FILE", required=False)
parser.add_argument("-s", "--save_prefix", dest="save_path_prefix",
help="path to save file", metavar="FILE", required=False)
parser.add_argument("-l", "--load", dest="load_path",
help="path to saved file", metavar="FILE", required=False)
parser.add_argument('--release', action='store_true',
help='release the loaded model for a smaller model size.')
parser.add_argument('--predict', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--seed', type=int, default=239)
args = parser.parse_args()
np.random.seed(args.seed)
tf.set_random_seed(args.seed)
if args.debug:
config = Config.get_debug_config(args)
else:
config = Config.get_default_config(args)
g_model = Model(config)
print('Created model')
assert g_model is not None
# read file
file_list = hp.get_file_list()
for idx, java_file in enumerate(file_list):
print("\nStart [{}]: {}\n".format(idx + 1, java_file))
g_all_data.clear()
g_cnt_pass = [0, 0, 0]
try:
# method_name and method_body
g_all_data.append("\npath = {}".format(java_file))
method_name, method_body = hp.load_method(java_file)
assert (len(method_name) > 0) and (len(method_body) > 0)
g_cnt_dict[method_name] = g_cnt_dict.get(method_name, 0) + 1
g_all_data.append("method_name = {}".format(method_name))
g_all_data.append("method_body = {}".format(method_body))
hp.store_method(hp.g_simp_file, method_body)
# set predicted method_name as global method_name
g_original_method_name = method_name
predict, score, loss = hp.prediction_with_c2s(g_model, hp.g_root_path, hp.g_simp_file)
g_predicted_method_name = predict
g_all_data.append("predict, score, loss = {}, {}, {}".format(predict, score, loss))
assert g_original_method_name == g_predicted_method_name
# create deltas by char/token
deltas = []
if hp.g_deltas_type == "token":
deltas = hp.get_token_deltas(method_body)
else:
deltas = hp.get_char_deltas(method_body)
try:
# run ddmin
mydd = MyDD()
print("Simplifying failure-inducing input...")
g_all_data.append("\nTrace of simplified code(s):")
c = mydd.ddmin(deltas) # Invoke DDMIN
print("The 1-minimal failure-inducing input is", c)
print("Removing any element will make the failure go away.")
javaCode = deltas_to_code(c)
g_all_data.append("\nMinimal simplified code:\n{}".format(javaCode))
except Exception as e:
g_all_data.append("\nException:\n{}".format(str(e)))
# print/save all simplified code
save_name = "L{}_{}_{}.txt".format(str(idx + 1), method_name, g_cnt_dict[method_name])
output_file = "dd_data/{}".format(save_name)
hp.save_simplified_code(g_all_data, output_file)
print("\nDone [{}]: {}\n".format(idx + 1, java_file))
except:
print("\nError [{}]: {}\n".format(idx + 1, java_file))
g_model.close_session()
| 5,296 | 37.384058 | 106 |
py
|
SIVAND
|
SIVAND-master/models/dd-code2seq/sm_helper.py
|
import json
import re
import statistics
import subprocess
from datetime import datetime
import javalang
import pandas as pd
###############################################################
# TODO: all (if)
g_root_path = "/scratch/rabin/deployment/root-simplify/sm-code2seq"
g_c2s_model = "/scratch/rabin/models/code2seq/main/java-large/saved_model_iter52"
g_files_root = "/scratch/rabin/deployment/root-simplify/data_selection"
g_test_files = {
"correct_samefile": g_files_root + "/mn_c2x/c2x_jl_test_correct_prediction_samefile.txt"
}
g_test_loc = "correct_samefile"
g_test_file = g_test_files[g_test_loc]
g_c2s_test = g_root_path + "/data/sm/sm.test.c2s"
g_db_name = "sm"
JAR_LOAD_JAVA_METHOD = g_files_root + "/LoadJavaMethod.jar"
JAR_C2S_JAVA_EXTRACTOR = g_root_path + "/JavaExtractor/JPredict/target/JavaExtractor-0.0.1-SNAPSHOT.jar"
###############################################################
# TODO: DD (if)
g_deltas_types = ["token", "char"]
g_deltas_type = g_deltas_types[0]
g_simp_file = "data/tmp/sm_test.java"
###############################################################
# TODO: attn (if)
g_topk_attn = 200 # topk attentions
###############################################################
def get_file_list():
file_list = []
try:
df = pd.read_csv(g_test_file)
file_list = df["path"].tolist()[:1000]
except:
pass
return file_list
def get_subtoken_name(name):
name = '|'.join(re.sub(r"([A-Z])", r" \1", name).split())
return name.lower()
def get_flat_name(name):
name = name.split('|')
if len(name) < 2:
return ''.join(name)
else:
return name[0] + ''.join([x.capitalize() for x in name[1:]])
def fix_internal_delimiter(name): # a|bb|ccc
name = name.split('|')
name = [name[0]] + [n.title() for n in name[1:]]
return "".join(name) # aBbCcc
def get_current_time():
return str(datetime.now())
def is_parsable(src, original_method_name, java_file):
try:
tree = javalang.parse.parse("class Test { " + src + " }")
assert tree is not None
method_name, _ = load_method(java_file)
assert method_name == original_method_name
except:
return False
return True
def load_method(java_file):
try:
cmd = ['java', '-jar', JAR_LOAD_JAVA_METHOD, java_file]
contents = subprocess.check_output(cmd, encoding="utf-8", close_fds=True)
contents = contents.split()
method_name = contents[0]
method_body = " ".join(contents[1:])
return method_name, method_body
except:
return "", ""
def store_method(dd_file, method_body):
with open(dd_file, "w") as f:
f.write(method_body + "\n")
def prediction_with_c2s(model, root_path, java_file):
try:
# preprocess data
open(g_c2s_test, 'w').close()
cmd = ['/bin/sh', g_root_path + '/preprocess_test.sh',
root_path, java_file, g_db_name] # source --> /bin/sh
subprocess.call(cmd, close_fds=True)
c2s_after = open(g_c2s_test, 'r').read()
assert len(c2s_after.strip()) > 0
# evaluate model
result = model.evaluate()
assert result is not None and len(result) > 0
[predict, score, loss] = result
return get_flat_name(predict), statistics.mean(score), float(loss)
except:
return ""
def save_simplified_code(all_methods, output_file):
open(output_file, 'w').close()
with open(output_file, 'a') as f:
for jCode in all_methods:
print(jCode)
f.write(jCode + "\n")
f.write("\n")
def get_json_data(time, score, loss, code, tokens=None, n_pass=None):
score, loss = str(round(float(score), 4)), str(round(float(loss), 4))
data = {'time': time, 'score': score, 'loss': loss, 'code': code}
if tokens:
data['n_tokens'] = len(tokens)
if n_pass:
data['n_pass'] = n_pass
j_data = json.dumps(data)
return j_data
###############################################################
def get_char_deltas(program):
data = list(program) # ['a',...,'z']
deltas = list(zip(range(len(data)), data)) # [('a',0), ..., ('z',n)]
return deltas
def get_token_deltas(program):
token, tokens = "", []
for c in program:
if not c.isalpha():
tokens.append(token)
tokens.append(c)
token = ""
else:
token = token + c
tokens.append(token)
tokens = [token for token in tokens if len(token) != 0]
deltas = list(zip(range(len(tokens)), tokens))
return deltas
def get_substr_deltas(program, n):
substrs = [program[i: i + n] for i in range(0, len(program), n)]
deltas = list(zip(range(len(substrs)), substrs))
return deltas
###############################################################
c2s_ast_map = {'ArAc': 'ArrayAccessExpr', 'ArBr': 'ArrayBracketPair', 'ArCr': 'ArrayCreationExpr',
'ArCrLvl': 'ArrayCreationLevel', 'ArIn': 'ArrayInitializerExpr', 'ArTy': 'ArrayType',
'Asrt': 'AssertStmt', 'AsAn': 'AssignExpr:and', 'As': 'AssignExpr:assign', 'AsLS': 'AssignExpr:lShift',
'AsMi': 'AssignExpr:minus', 'AsOr': 'AssignExpr:or', 'AsP': 'AssignExpr:plus', 'AsRe': 'AssignExpr:rem',
'AsRSS': 'AssignExpr:rSignedShift', 'AsRUS': 'AssignExpr:rUnsignedShift', 'AsSl': 'AssignExpr:slash',
'AsSt': 'AssignExpr:star', 'AsX': 'AssignExpr:xor', 'And': 'BinaryExpr:and',
'BinAnd': 'BinaryExpr:binAnd', 'BinOr': 'BinaryExpr:binOr', 'Div': 'BinaryExpr:divide',
'Eq': 'BinaryExpr:equals', 'Gt': 'BinaryExpr:greater', 'Geq': 'BinaryExpr:greaterEquals',
'Ls': 'BinaryExpr:less', 'Leq': 'BinaryExpr:lessEquals', 'LS': 'BinaryExpr:lShift',
'Minus': 'BinaryExpr:minus', 'Neq': 'BinaryExpr:notEquals', 'Or': 'BinaryExpr:or',
'Plus': 'BinaryExpr:plus', 'Mod': 'BinaryExpr:remainder', 'RSS': 'BinaryExpr:rSignedShift',
'RUS': 'BinaryExpr:rUnsignedShift', 'Mul': 'BinaryExpr:times', 'Xor': 'BinaryExpr:xor',
'Bk': 'BlockStmt', 'BoolEx': 'BooleanLiteralExpr', 'Cast': 'CastExpr', 'Catch': 'CatchClause',
'CharEx': 'CharLiteralExpr', 'ClsEx': 'ClassExpr', 'ClsD': 'ClassOrInterfaceDeclaration',
'Cls': 'ClassOrInterfaceType', 'Cond': 'ConditionalExpr', 'Ctor': 'ConstructorDeclaration',
'Do': 'DoStmt', 'Dbl': 'DoubleLiteralExpr', 'Emp': 'EmptyMemberDeclaration', 'Enc': 'EnclosedExpr',
'ExpCtor': 'ExplicitConstructorInvocationStmt', 'Ex': 'ExpressionStmt', 'Fld': 'FieldAccessExpr',
'FldDec': 'FieldDeclaration', 'Foreach': 'ForeachStmt', 'For': 'ForStmt', 'If': 'IfStmt',
'Init': 'InitializerDeclaration', 'InstanceOf': 'InstanceOfExpr', 'IntEx': 'IntegerLiteralExpr',
'IntMinEx': 'IntegerLiteralMinValueExpr', 'Labeled': 'LabeledStmt', 'Lambda': 'LambdaExpr',
'LongEx': 'LongLiteralExpr', 'MarkerExpr': 'MarkerAnnotationExpr', 'Mvp': 'MemberValuePair',
'Cal': 'MethodCallExpr', 'Mth': 'MethodDeclaration', 'MethRef': 'MethodReferenceExpr', 'Nm': 'NameExpr',
'NormEx': 'NormalAnnotationExpr', 'Null': 'NullLiteralExpr', 'ObjEx': 'ObjectCreationExpr',
'Prm': 'Parameter', 'Prim': 'PrimitiveType', 'Qua': 'QualifiedNameExpr', 'Ret': 'ReturnStmt',
'SMEx': 'SingleMemberAnnotationExpr', 'StrEx': 'StringLiteralExpr', 'SupEx': 'SuperExpr',
'SwiEnt': 'SwitchEntryStmt', 'Switch': 'SwitchStmt', 'Sync': 'SynchronizedStmt', 'This': 'ThisExpr',
'Thro': 'ThrowStmt', 'Try': 'TryStmt', 'TypeDec': 'TypeDeclarationStmt', 'Type': 'TypeExpr',
'TypePar': 'TypeParameter', 'Inverse': 'UnaryExpr:inverse', 'Neg': 'UnaryExpr:negative',
'Not': 'UnaryExpr:not', 'PosDec': 'UnaryExpr:posDecrement', 'PosInc': 'UnaryExpr:posIncrement',
'Pos': 'UnaryExpr:positive', 'PreDec': 'UnaryExpr:preDecrement', 'PreInc': 'UnaryExpr:preIncrement',
'Unio': 'UnionType', 'VDE': 'VariableDeclarationExpr', 'VD': 'VariableDeclarator',
'VDID': 'VariableDeclaratorId', 'Void': 'VoidType', 'While': 'WhileStmt', 'Wild': 'WildcardType'}
def get_full_path_context(short_path_context):
full_ast_path = ''
for short_ast_node in short_path_context[1].split('|'):
node_parts = re.split(r'(\d+)', short_ast_node)
node_parts = [n for n in node_parts]
node_parts[0] = c2s_ast_map[node_parts[0]]
full_ast_path += ''.join(node_parts) + '|'
return "{},{},{}".format(short_path_context[0], full_ast_path[:-1], short_path_context[-1])
def get_attention(model, method_name, root_path, java_file):
try:
# preprocess data
open(g_c2s_test, 'w').close()
cmd = ['/bin/sh', 'preprocess_test.sh',
root_path, java_file, g_db_name] # source --> /bin/sh
subprocess.call(cmd, close_fds=True)
c2s_after = open(g_c2s_test, 'r').read()
assert len(c2s_after.strip()) > 0
# set topk for attention
ast_paths = c2s_after.strip().split()[1:]
l_topk_attn = len(ast_paths) if len(ast_paths) < g_topk_attn else g_topk_attn
# get topk path-context
attentions = model.get_attention()
assert attentions is not None
attn_tokens = get_subtoken_name(method_name).split('|')
topk_attns, topk_paths = [], []
for attn_idx in range(len(attn_tokens)):
sub_attn = attentions[attn_idx][:l_topk_attn]
topk_idx = sorted(range(len(sub_attn)), key=sub_attn.__getitem__, reverse=True)[:l_topk_attn]
topk_path = [ast_paths[i] for i in topk_idx]
topk_paths.append(topk_path)
topk_attn = [sub_attn[i] for i in topk_idx]
topk_attns.append(topk_attn)
return attn_tokens, topk_attns, topk_paths
except:
return ""
| 10,010 | 38.72619 | 119 |
py
|
SIVAND
|
SIVAND-master/models/dd-code2seq/attn_code2seq.py
|
from argparse import ArgumentParser
import numpy as np
import tensorflow as tf
from config import Config
from dd_model import Model
import sm_helper as hp
###############################################################
g_model = None
g_all_data = []
g_cnt_dict = {}
###############################################################
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("-d", "--data", dest="data_path",
help="path to preprocessed dataset", required=False)
parser.add_argument("-te", "--test", dest="test_path",
help="path to test file", metavar="FILE", required=False)
parser.add_argument("-s", "--save_prefix", dest="save_path_prefix",
help="path to save file", metavar="FILE", required=False)
parser.add_argument("-l", "--load", dest="load_path",
help="path to saved file", metavar="FILE", required=False)
parser.add_argument('--release', action='store_true',
help='release the loaded model for a smaller model size.')
parser.add_argument('--predict', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--seed', type=int, default=239)
args = parser.parse_args()
np.random.seed(args.seed)
tf.set_random_seed(args.seed)
if args.debug:
config = Config.get_debug_config(args)
else:
config = Config.get_default_config(args)
g_model = Model(config)
print('Created model')
assert g_model is not None
# read file
file_list = hp.get_file_list()
for idx, java_file in enumerate(file_list):
print("\nStart [{}]: {}\n".format(idx + 1, java_file))
g_all_data.clear()
try:
# method_name and method_body
g_all_data.append("\npath = {}".format(java_file))
method_name, method_body = hp.load_method(java_file)
assert (len(method_name) > 0) and (len(method_body) > 0)
g_cnt_dict[method_name] = g_cnt_dict.get(method_name, 0) + 1
g_all_data.append("method_name = {}".format(method_name))
g_all_data.append("method_body = {}".format(method_body))
hp.store_method(hp.g_simp_file, method_body)
# check if prediction is correct
predict, _, _ = hp.prediction_with_c2s(g_model, hp.g_root_path, hp.g_simp_file)
assert method_name == predict
# get path-context and attention
attn_tokens, topk_attns, topk_paths = hp.get_attention(g_model, method_name, hp.g_root_path, hp.g_simp_file)
for i in range(len(topk_paths)):
g_all_data.append("\ntopk path-contexts for subtoken-{}: {}".format(i + 1, attn_tokens[i]))
topk_terminal = []
for j in range(len(topk_paths[i])):
short_path_context = topk_paths[i][j].strip().split(',')
full_path_context = hp.get_full_path_context(short_path_context)
topk_terminal.append([short_path_context[0], short_path_context[-1]])
g_all_data.append("[{}] {}".format(f"{topk_attns[i][j]:.4f}", full_path_context))
g_all_data.append(
"\ntopk terminals for subtoken-{}: {}\n{}".format(i + 1, attn_tokens[i], topk_terminal))
# print/save path-context and attention
save_name = "L{}_{}_{}.txt".format(str(idx + 1), method_name, g_cnt_dict[method_name])
output_file = "attn_data/{}".format(save_name)
hp.save_simplified_code(g_all_data, output_file)
print("\nDone [{}]: {}\n".format(idx + 1, java_file))
except:
print("\nError [{}]: {}\n".format(idx + 1, java_file))
g_model.close_session()
| 3,832 | 42.556818 | 120 |
py
|
SIVAND
|
SIVAND-master/models/dd-code2seq/dd_model.py
|
import _pickle as pickle
import tensorflow as tf
import reader
from common import Common
class Model:
topk = 10
num_batches_to_log = 100
def __init__(self, config):
self.config = config
self.sess = tf.Session()
self.eval_queue = None
self.predict_queue = None
self.eval_placeholder = None
self.predict_placeholder = None
self.eval_predicted_indices_op, self.eval_top_values_op, self.eval_true_target_strings_op, \
self.eval_topk_values, self.eval_attentions_op, self.eval_losses_op = None, None, None, None, None, None
self.predict_top_indices_op, self.predict_top_scores_op, self.predict_target_strings_op = None, None, None
self.subtoken_to_index = None
if config.LOAD_PATH:
self.load_model(sess=None)
else:
with open('{}.dict.c2s'.format(config.TRAIN_PATH), 'rb') as file:
subtoken_to_count = pickle.load(file)
node_to_count = pickle.load(file)
target_to_count = pickle.load(file)
max_contexts = pickle.load(file)
self.num_training_examples = pickle.load(file)
print('Dictionaries loaded.')
if self.config.DATA_NUM_CONTEXTS <= 0:
self.config.DATA_NUM_CONTEXTS = max_contexts
self.subtoken_to_index, self.index_to_subtoken, self.subtoken_vocab_size = \
Common.load_vocab_from_dict(subtoken_to_count, add_values=[Common.PAD, Common.UNK],
max_size=config.SUBTOKENS_VOCAB_MAX_SIZE)
print('Loaded subtoken vocab. size: %d' % self.subtoken_vocab_size)
self.target_to_index, self.index_to_target, self.target_vocab_size = \
Common.load_vocab_from_dict(target_to_count, add_values=[Common.PAD, Common.UNK, Common.SOS],
max_size=config.TARGET_VOCAB_MAX_SIZE)
print('Loaded target word vocab. size: %d' % self.target_vocab_size)
self.node_to_index, self.index_to_node, self.nodes_vocab_size = \
Common.load_vocab_from_dict(node_to_count, add_values=[Common.PAD, Common.UNK], max_size=None)
print('Loaded nodes vocab. size: %d' % self.nodes_vocab_size)
self.epochs_trained = 0
# move from evaluate to here
if self.eval_queue is None:
self.eval_queue = reader.Reader(subtoken_to_index=self.subtoken_to_index,
node_to_index=self.node_to_index,
target_to_index=self.target_to_index,
config=self.config, is_evaluating=True)
reader_output = self.eval_queue.get_output()
self.eval_predicted_indices_op, self.eval_topk_values, _, self.eval_attentions_op, self.eval_losses_op = \
self.build_test_graph(reader_output)
self.eval_true_target_strings_op = reader_output[reader.TARGET_STRING_KEY]
self.saver = tf.train.Saver(max_to_keep=10)
if self.config.LOAD_PATH and not self.config.TRAIN_PATH:
self.initialize_session_variables(self.sess)
self.load_model(self.sess)
def close_session(self):
self.sess.close()
def evaluate(self):
self.eval_queue.reset(self.sess)
try:
while True:
predicted_indices, true_target_strings, top_values, losses = self.sess.run(
[self.eval_predicted_indices_op, self.eval_true_target_strings_op,
self.eval_topk_values, self.eval_losses_op],
)
true_target_strings = Common.binary_to_string_list(true_target_strings)
if self.config.BEAM_WIDTH > 0:
# predicted indices: (batch, time, beam_width)
predicted_strings = [[[self.index_to_target[i] for i in timestep] for timestep in example] for
example in predicted_indices]
predicted_strings = [list(map(list, zip(*example))) for example in
predicted_strings] # (batch, top-k, target_length)
else:
predicted_strings = [[self.index_to_target[i] for i in example]
for example in predicted_indices]
return self.update_correct_predictions(zip(true_target_strings, predicted_strings, top_values, [losses]))
except tf.errors.OutOfRangeError:
pass
def get_attention(self):
self.eval_queue.reset(self.sess)
try:
while True:
attentions, losses = self.sess.run(
[self.eval_attentions_op, self.eval_losses_op],
)
return attentions
except tf.errors.OutOfRangeError:
pass
def update_correct_predictions(self, results):
try:
for original_name, predicted, top_values, losses in results:
original_name_parts = original_name.split(Common.internal_delimiter) # list
predicted_first = predicted
loss_first = losses
value_first = top_values
if self.config.BEAM_WIDTH > 0:
predicted_first = predicted[0]
loss_first = losses[0]
value_first = top_values[0]
filtered_predicted_first_parts = Common.filter_impossible_names(predicted_first) # list
predicted_first_join = Common.internal_delimiter.join(filtered_predicted_first_parts)
value_first_parts = [val[0] for val in value_first[:len(filtered_predicted_first_parts)]]
rtn_results = [predicted_first_join, value_first_parts, loss_first]
return rtn_results # list
except:
return ""
def decode_outputs(self, target_words_vocab, target_input, batch_size, batched_contexts, valid_mask,
is_evaluating=False):
num_contexts_per_example = tf.count_nonzero(valid_mask, axis=-1)
start_fill = tf.fill([batch_size],
self.target_to_index[Common.SOS]) # (batch, )
decoder_cell = tf.nn.rnn_cell.MultiRNNCell([
tf.nn.rnn_cell.LSTMCell(self.config.DECODER_SIZE) for _ in range(self.config.NUM_DECODER_LAYERS)
])
contexts_sum = tf.reduce_sum(batched_contexts * tf.expand_dims(valid_mask, -1),
axis=1) # (batch_size, dim * 2 + rnn_size)
contexts_average = tf.divide(contexts_sum, tf.to_float(tf.expand_dims(num_contexts_per_example, -1)))
fake_encoder_state = tuple(tf.nn.rnn_cell.LSTMStateTuple(contexts_average, contexts_average) for _ in
range(self.config.NUM_DECODER_LAYERS))
projection_layer = tf.layers.Dense(self.target_vocab_size, use_bias=False)
if is_evaluating and self.config.BEAM_WIDTH > 0:
batched_contexts = tf.contrib.seq2seq.tile_batch(batched_contexts, multiplier=self.config.BEAM_WIDTH)
num_contexts_per_example = tf.contrib.seq2seq.tile_batch(num_contexts_per_example,
multiplier=self.config.BEAM_WIDTH)
attention_mechanism = tf.contrib.seq2seq.LuongAttention(
num_units=self.config.DECODER_SIZE,
memory=batched_contexts
)
# TF doesn't support beam search with alignment history
should_save_alignment_history = is_evaluating and self.config.BEAM_WIDTH == 0
decoder_cell = tf.contrib.seq2seq.AttentionWrapper(decoder_cell, attention_mechanism,
attention_layer_size=self.config.DECODER_SIZE,
alignment_history=should_save_alignment_history)
if is_evaluating:
if self.config.BEAM_WIDTH > 0:
decoder_initial_state = decoder_cell.zero_state(dtype=tf.float32,
batch_size=batch_size * self.config.BEAM_WIDTH)
decoder_initial_state = decoder_initial_state.clone(
cell_state=tf.contrib.seq2seq.tile_batch(fake_encoder_state, multiplier=self.config.BEAM_WIDTH))
decoder = tf.contrib.seq2seq.BeamSearchDecoder(
cell=decoder_cell,
embedding=target_words_vocab,
start_tokens=start_fill,
end_token=self.target_to_index[Common.PAD],
initial_state=decoder_initial_state,
beam_width=self.config.BEAM_WIDTH,
output_layer=projection_layer,
length_penalty_weight=0.0)
else:
helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(target_words_vocab, start_fill, 0)
initial_state = decoder_cell.zero_state(batch_size, tf.float32).clone(cell_state=fake_encoder_state)
decoder = tf.contrib.seq2seq.BasicDecoder(cell=decoder_cell, helper=helper, initial_state=initial_state,
output_layer=projection_layer)
else:
decoder_cell = tf.nn.rnn_cell.DropoutWrapper(decoder_cell,
output_keep_prob=self.config.RNN_DROPOUT_KEEP_PROB)
target_words_embedding = tf.nn.embedding_lookup(target_words_vocab,
tf.concat([tf.expand_dims(start_fill, -1), target_input],
axis=-1)) # (batch, max_target_parts, dim * 2 + rnn_size)
helper = tf.contrib.seq2seq.TrainingHelper(inputs=target_words_embedding,
sequence_length=tf.ones([batch_size], dtype=tf.int32) * (
self.config.MAX_TARGET_PARTS + 1))
initial_state = decoder_cell.zero_state(batch_size, tf.float32).clone(cell_state=fake_encoder_state)
decoder = tf.contrib.seq2seq.BasicDecoder(cell=decoder_cell, helper=helper, initial_state=initial_state,
output_layer=projection_layer)
outputs, final_states, final_sequence_lengths = tf.contrib.seq2seq.dynamic_decode(decoder,
maximum_iterations=self.config.MAX_TARGET_PARTS + 1)
return outputs, final_states
def calculate_path_abstraction(self, path_embed, path_lengths, valid_contexts_mask, is_evaluating=False):
return self.path_rnn_last_state(is_evaluating, path_embed, path_lengths, valid_contexts_mask)
def path_rnn_last_state(self, is_evaluating, path_embed, path_lengths, valid_contexts_mask):
# path_embed: (batch, max_contexts, max_path_length+1, dim)
# path_length: (batch, max_contexts)
# valid_contexts_mask: (batch, max_contexts)
max_contexts = tf.shape(path_embed)[1]
flat_paths = tf.reshape(path_embed, shape=[-1, self.config.MAX_PATH_LENGTH,
self.config.EMBEDDINGS_SIZE]) # (batch * max_contexts, max_path_length+1, dim)
flat_valid_contexts_mask = tf.reshape(valid_contexts_mask, [-1]) # (batch * max_contexts)
lengths = tf.multiply(tf.reshape(path_lengths, [-1]),
tf.cast(flat_valid_contexts_mask, tf.int32)) # (batch * max_contexts)
if self.config.BIRNN:
rnn_cell_fw = tf.nn.rnn_cell.LSTMCell(self.config.RNN_SIZE / 2)
rnn_cell_bw = tf.nn.rnn_cell.LSTMCell(self.config.RNN_SIZE / 2)
if not is_evaluating:
rnn_cell_fw = tf.nn.rnn_cell.DropoutWrapper(rnn_cell_fw,
output_keep_prob=self.config.RNN_DROPOUT_KEEP_PROB)
rnn_cell_bw = tf.nn.rnn_cell.DropoutWrapper(rnn_cell_bw,
output_keep_prob=self.config.RNN_DROPOUT_KEEP_PROB)
_, (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=rnn_cell_fw,
cell_bw=rnn_cell_bw,
inputs=flat_paths,
dtype=tf.float32,
sequence_length=lengths)
final_rnn_state = tf.concat([state_fw.h, state_bw.h], axis=-1) # (batch * max_contexts, rnn_size)
else:
rnn_cell = tf.nn.rnn_cell.LSTMCell(self.config.RNN_SIZE)
if not is_evaluating:
rnn_cell = tf.nn.rnn_cell.DropoutWrapper(rnn_cell, output_keep_prob=self.config.RNN_DROPOUT_KEEP_PROB)
_, state = tf.nn.dynamic_rnn(
cell=rnn_cell,
inputs=flat_paths,
dtype=tf.float32,
sequence_length=lengths
)
final_rnn_state = state.h # (batch * max_contexts, rnn_size)
return tf.reshape(final_rnn_state,
shape=[-1, max_contexts, self.config.RNN_SIZE]) # (batch, max_contexts, rnn_size)
def compute_contexts(self, subtoken_vocab, nodes_vocab, source_input, nodes_input,
target_input, valid_mask, path_source_lengths, path_lengths, path_target_lengths,
is_evaluating=False):
source_word_embed = tf.nn.embedding_lookup(params=subtoken_vocab,
ids=source_input) # (batch, max_contexts, max_name_parts, dim)
path_embed = tf.nn.embedding_lookup(params=nodes_vocab,
ids=nodes_input) # (batch, max_contexts, max_path_length+1, dim)
target_word_embed = tf.nn.embedding_lookup(params=subtoken_vocab,
ids=target_input) # (batch, max_contexts, max_name_parts, dim)
source_word_mask = tf.expand_dims(
tf.sequence_mask(path_source_lengths, maxlen=self.config.MAX_NAME_PARTS, dtype=tf.float32),
-1) # (batch, max_contexts, max_name_parts, 1)
target_word_mask = tf.expand_dims(
tf.sequence_mask(path_target_lengths, maxlen=self.config.MAX_NAME_PARTS, dtype=tf.float32),
-1) # (batch, max_contexts, max_name_parts, 1)
source_words_sum = tf.reduce_sum(source_word_embed * source_word_mask,
axis=2) # (batch, max_contexts, dim)
path_nodes_aggregation = self.calculate_path_abstraction(path_embed, path_lengths, valid_mask,
is_evaluating) # (batch, max_contexts, rnn_size)
target_words_sum = tf.reduce_sum(target_word_embed * target_word_mask, axis=2) # (batch, max_contexts, dim)
context_embed = tf.concat([source_words_sum, path_nodes_aggregation, target_words_sum],
axis=-1) # (batch, max_contexts, dim * 2 + rnn_size)
if not is_evaluating:
context_embed = tf.nn.dropout(context_embed, self.config.EMBEDDINGS_DROPOUT_KEEP_PROB)
batched_embed = tf.layers.dense(inputs=context_embed, units=self.config.DECODER_SIZE,
activation=tf.nn.tanh, trainable=not is_evaluating, use_bias=False)
return batched_embed
def build_test_graph(self, input_tensors):
target_index = input_tensors[reader.TARGET_INDEX_KEY]
target_lengths = input_tensors[reader.TARGET_LENGTH_KEY]
path_source_indices = input_tensors[reader.PATH_SOURCE_INDICES_KEY]
node_indices = input_tensors[reader.NODE_INDICES_KEY]
path_target_indices = input_tensors[reader.PATH_TARGET_INDICES_KEY]
valid_mask = input_tensors[reader.VALID_CONTEXT_MASK_KEY]
path_source_lengths = input_tensors[reader.PATH_SOURCE_LENGTHS_KEY]
path_lengths = input_tensors[reader.PATH_LENGTHS_KEY]
path_target_lengths = input_tensors[reader.PATH_TARGET_LENGTHS_KEY]
with tf.variable_scope('model', reuse=self.get_should_reuse_variables()):
subtoken_vocab = tf.get_variable('SUBTOKENS_VOCAB',
shape=(self.subtoken_vocab_size, self.config.EMBEDDINGS_SIZE),
dtype=tf.float32, trainable=False)
target_words_vocab = tf.get_variable('TARGET_WORDS_VOCAB',
shape=(self.target_vocab_size, self.config.EMBEDDINGS_SIZE),
dtype=tf.float32, trainable=False)
nodes_vocab = tf.get_variable('NODES_VOCAB',
shape=(self.nodes_vocab_size, self.config.EMBEDDINGS_SIZE),
dtype=tf.float32, trainable=False)
batched_contexts = self.compute_contexts(subtoken_vocab=subtoken_vocab, nodes_vocab=nodes_vocab,
source_input=path_source_indices, nodes_input=node_indices,
target_input=path_target_indices,
valid_mask=valid_mask,
path_source_lengths=path_source_lengths,
path_lengths=path_lengths, path_target_lengths=path_target_lengths,
is_evaluating=True)
outputs, final_states = self.decode_outputs(target_words_vocab=target_words_vocab,
target_input=target_index, batch_size=tf.shape(target_index)[0],
batched_contexts=batched_contexts, valid_mask=valid_mask,
is_evaluating=True)
if self.config.BEAM_WIDTH > 0:
predicted_indices = outputs.predicted_ids
topk_values = outputs.beam_search_decoder_output.scores
attention_weights = [tf.no_op()]
else:
predicted_indices = outputs.sample_id
topk_values = tf.constant(1, shape=(1, 1), dtype=tf.float32)
attention_weights = tf.squeeze(final_states.alignment_history.stack(), 1)
logits = outputs.rnn_output # (batch, ?, dim * 2 + rnn_size)
topk_candidates = tf.nn.top_k(logits, k=tf.minimum(self.topk, self.target_vocab_size))
topk_values = topk_candidates.values
topk_values = tf.nn.softmax(topk_values)
paddings = [[0, 0], [0, self.config.MAX_TARGET_PARTS + 1 - tf.shape(logits)[1]], [0,0]]
logits_pad = tf.pad(logits, paddings, 'CONSTANT', constant_values=0) # (batch, max_output_length, dim * 2 + rnn_size)
crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target_index, logits=logits_pad)
target_words_nonzero = tf.sequence_mask(target_lengths + 1, maxlen=self.config.MAX_TARGET_PARTS + 1, dtype=tf.float32)
m_batch_size = tf.shape(target_index)[0]
losses = tf.reduce_sum(crossent * target_words_nonzero) / tf.to_float(m_batch_size)
return predicted_indices, topk_values, target_index, attention_weights, losses
@staticmethod
def get_attention_per_path(source_strings, path_strings, target_strings, attention_weights):
# attention_weights: (time, contexts)
results = []
for time_step in attention_weights:
attention_per_context = {}
for source, path, target, weight in zip(source_strings, path_strings, target_strings, time_step):
string_triplet = (
Common.binary_to_string(source), Common.binary_to_string(path), Common.binary_to_string(target))
attention_per_context[string_triplet] = weight
results.append(attention_per_context)
return results
def load_model(self, sess):
if not sess is None:
self.saver.restore(sess, self.config.LOAD_PATH)
print('Done loading model')
with open(self.config.LOAD_PATH + '.dict', 'rb') as file:
if self.subtoken_to_index is not None:
return
print('Loading dictionaries from: ' + self.config.LOAD_PATH)
self.subtoken_to_index = pickle.load(file)
self.index_to_subtoken = pickle.load(file)
self.subtoken_vocab_size = pickle.load(file)
self.target_to_index = pickle.load(file)
self.index_to_target = pickle.load(file)
self.target_vocab_size = pickle.load(file)
self.node_to_index = pickle.load(file)
self.index_to_node = pickle.load(file)
self.nodes_vocab_size = pickle.load(file)
self.num_training_examples = pickle.load(file)
self.epochs_trained = pickle.load(file)
saved_config = pickle.load(file)
self.config.take_model_hyperparams_from(saved_config)
print('Done loading dictionaries')
@staticmethod
def initialize_session_variables(sess):
sess.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer(), tf.tables_initializer()))
def get_should_reuse_variables(self):
if self.config.TRAIN_PATH:
return True
else:
return None
| 21,800 | 56.827586 | 142 |
py
|
SIVAND
|
SIVAND-master/models/dd-code2vec/dd_code2vec.py
|
from config import Config
from dd_tensorflow_model import Code2VecModel
import DD
import sm_helper as hp
###############################################################
g_model = None
g_original_method_name = None
g_predicted_method_name = None
g_all_data = []
g_cnt_dict = {}
g_cnt_pass = [0, 0, 0]
###############################################################
def deltas_to_code(d):
s = "".join([c[1] for c in d])
return s
class MyDD(DD.DD):
def __init__(self):
DD.DD.__init__(self)
def _test(self, deltas):
if not deltas:
return self.PASS
try:
g_cnt_pass[0] = g_cnt_pass[0] + 1
code = deltas_to_code(deltas)
hp.store_method(hp.g_simp_file, code)
if hp.is_parsable(code, g_original_method_name, hp.g_simp_file):
g_cnt_pass[1] = g_cnt_pass[1] + 1
predict, score, loss = hp.prediction_with_c2v(g_model, hp.g_root_path, hp.g_simp_file)
time = hp.get_current_time()
print('time = {}, predict = {}, score = {}, loss = {}'.format(time, predict, score, loss))
if predict == g_predicted_method_name:
g_cnt_pass[2] = g_cnt_pass[2] + 1
j_data = hp.get_json_data(time, score, loss, code, deltas[:], g_cnt_pass)
g_all_data.append("{}".format(j_data))
return self.FAIL
except:
pass
return self.PASS
if __name__ == '__main__':
config = Config(set_defaults=True, load_from_args=True, verify=True)
g_model = Code2VecModel(config)
print('Done creating code2vec model')
assert g_model is not None
# read file
file_list = hp.get_file_list()
for idx, java_file in enumerate(file_list):
print("\nStart [{}]: {}\n".format(idx + 1, java_file))
g_all_data.clear()
g_cnt_pass = [0, 0, 0]
try:
# method_name and method_body
g_all_data.append("\npath = {}".format(java_file))
method_name, method_body = hp.load_method(java_file)
assert (len(method_name) > 0) and (len(method_body) > 0)
g_cnt_dict[method_name] = g_cnt_dict.get(method_name, 0) + 1
g_all_data.append("method_name = {}".format(method_name))
g_all_data.append("method_body = {}".format(method_body))
hp.store_method(hp.g_simp_file, method_body)
# set predicted method_name as global method_name
g_original_method_name = method_name
predict, score, loss = hp.prediction_with_c2v(g_model, hp.g_root_path, hp.g_simp_file)
g_predicted_method_name = predict
g_all_data.append("predict, score, loss = {}, {}, {}".format(predict, score, loss))
assert g_original_method_name == g_predicted_method_name
# create deltas by char/token
deltas = []
if hp.g_deltas_type == "token":
deltas = hp.get_token_deltas(method_body)
else:
deltas = hp.get_char_deltas(method_body)
try:
# run ddmin
mydd = MyDD()
print("Simplifying failure-inducing input...")
g_all_data.append("\nTrace of simplified code(s):")
c = mydd.ddmin(deltas) # Invoke DDMIN
print("The 1-minimal failure-inducing input is", c)
print("Removing any element will make the failure go away.")
javaCode = deltas_to_code(c)
g_all_data.append("\nMinimal simplified code:\n{}".format(javaCode))
except Exception as e:
g_all_data.append("\nException:\n{}".format(str(e)))
# print/save all simplified code
save_name = "L{}_{}_{}.txt".format(str(idx + 1), method_name, g_cnt_dict[method_name])
output_file = "dd_data/{}".format(save_name)
hp.save_simplified_code(g_all_data, output_file)
print("\nDone [{}]: {}\n".format(idx + 1, java_file))
except:
print("\nError [{}]: {}\n".format(idx + 1, java_file))
g_model.close_session()
| 4,192 | 36.774775 | 106 |
py
|
SIVAND
|
SIVAND-master/models/dd-code2vec/sm_helper.py
|
import json
import re
import subprocess
from datetime import datetime
import javalang
import pandas as pd
###############################################################
# TODO: all (if)
g_root_path = "/scratch/rabin/deployment/root-simplify/sm-code2vec"
g_c2v_model = "/scratch/rabin/models/code2vec/main/java-large/saved_model_iter3"
g_files_root = "/scratch/rabin/deployment/root-simplify/data_selection"
g_test_files = {
"correct_samefile": g_files_root + "/mn_c2x/c2x_jl_test_correct_prediction_samefile.txt"
}
g_test_loc = "correct_samefile"
g_test_file = g_test_files[g_test_loc]
g_c2v_test = g_root_path + "/data/sm/sm.test.c2v"
g_db_name = "sm"
JAR_LOAD_JAVA_METHOD = g_files_root + "/LoadJavaMethod.jar"
JAR_C2V_JAVA_EXTRACTOR = g_root_path + "/JavaExtractor/JPredict/target/JavaExtractor-0.0.1-SNAPSHOT.jar"
###############################################################
# TODO: DD (if)
g_deltas_types = ["token", "char"]
g_deltas_type = g_deltas_types[0]
g_simp_file = "data/tmp/sm_test.java"
###############################################################
# TODO: attn (if)
g_topk_attn = 200 # topk attentions
###############################################################
def get_file_list():
file_list = []
try:
df = pd.read_csv(g_test_file)
file_list = df["path"].tolist()[:1000]
except:
pass
return file_list
def get_subtoken_name(name):
name = '|'.join(re.sub(r"([A-Z])", r" \1", name).split())
return name.lower()
def get_flat_name(name):
name = name.split('|')
if len(name) < 2:
return ''.join(name)
else:
return name[0] + ''.join([x.capitalize() for x in name[1:]])
def fix_internal_delimiter(name): # a|bb|ccc
name = name.split('|')
name = [name[0]] + [n.title() for n in name[1:]]
return "".join(name) # aBbCcc
def get_current_time():
return str(datetime.now())
def is_parsable(src, original_method_name, java_file):
try:
tree = javalang.parse.parse("class Test { " + src + " }")
assert tree is not None
method_name, _ = load_method(java_file)
assert method_name == original_method_name
except:
return False
return True
def load_method(java_file):
try:
cmd = ['java', '-jar', JAR_LOAD_JAVA_METHOD, java_file]
contents = subprocess.check_output(cmd, encoding="utf-8", close_fds=True)
contents = contents.split()
method_name = contents[0]
method_body = " ".join(contents[1:])
return method_name, method_body
except:
return "", ""
def store_method(sm_file, method_body):
with open(sm_file, "w") as f:
f.write(method_body + "\n")
def prediction_with_c2v(model, root_path, java_file):
try:
# preprocess data
open(g_c2v_test, 'w').close()
cmd = ['/bin/sh', g_root_path + '/preprocess_test.sh',
root_path, java_file, g_db_name] # source --> /bin/sh
subprocess.call(cmd, close_fds=True)
c2v_after = open(g_c2v_test, 'r').read()
assert len(c2v_after.strip()) > 0
# evaluate model
result = model.evaluate()
assert result is not None and len(result) > 0
pred, score, loss = result.split(",")
return get_flat_name(pred), float(score), float(loss)
except:
return ""
def save_simplified_code(all_methods, output_file):
open(output_file, 'w').close()
with open(output_file, 'a') as f:
for jCode in all_methods:
print(jCode)
f.write(jCode + "\n")
f.write("\n")
def get_json_data(time, score, loss, code, tokens=None, n_pass=None):
score, loss = str(round(float(score), 4)), str(round(float(loss), 4))
data = {'time': time, 'score': score, 'loss': loss, 'code': code}
if tokens:
data['n_tokens'] = len(tokens)
if n_pass:
data['n_pass'] = n_pass
j_data = json.dumps(data)
return j_data
###############################################################
def get_char_deltas(program):
data = list(program) # ['a',...,'z']
deltas = list(zip(range(len(data)), data)) # [('a',0), ..., ('z',n)]
return deltas
def get_token_deltas(program):
token, tokens = "", []
for c in program:
if not c.isalpha():
tokens.append(token)
tokens.append(c)
token = ""
else:
token = token + c
tokens.append(token)
tokens = [token for token in tokens if len(token) != 0]
deltas = list(zip(range(len(tokens)), tokens))
return deltas
def get_substr_deltas(program, n):
substrs = [program[i: i + n] for i in range(0, len(program), n)]
deltas = list(zip(range(len(substrs)), substrs))
return deltas
###############################################################
def get_nohash_path_context(java_file):
try:
cmd = ['java', '-cp', JAR_C2V_JAVA_EXTRACTOR, 'JavaExtractor.App',
'--max_path_length', '8', '--max_path_width', '2', '--num_threads', '64',
'--no_hash', '--file', java_file]
content = subprocess.check_output(cmd, encoding="utf-8", close_fds=True)
return content
except:
return ""
def get_attention(model, root_path, java_file):
try:
# preprocess data
open(g_c2v_test, 'w').close()
cmd = ['/bin/sh', 'preprocess_test.sh',
root_path, java_file, g_db_name] # source --> /bin/sh
subprocess.call(cmd, close_fds=True)
c2v_hash = open(g_c2v_test, 'r').read()
assert len(c2v_hash.strip()) > 0
# ast path instead of hash value [--no_hash]
c2v_nohash = get_nohash_path_context(java_file)
assert len(c2v_nohash.strip()) > 0
# set topk for attention
ast_paths = c2v_nohash.strip().split()[1:]
l_topk_attn = len(ast_paths) if len(ast_paths) < g_topk_attn else g_topk_attn
# get topk path from attention
attentions = model.get_attention()
assert attentions is not None
attentions = attentions[:l_topk_attn]
topk_idx = sorted(range(len(attentions)), key=attentions.__getitem__, reverse=True)[:l_topk_attn]
topk_path = [ast_paths[i] for i in topk_idx]
topk_attn = [attentions[i][0] for i in topk_idx]
return topk_attn, topk_path
except:
return ""
| 6,378 | 28.396313 | 105 |
py
|
SIVAND
|
SIVAND-master/models/dd-code2vec/attn_code2vec.py
|
from config import Config
from dd_tensorflow_model import Code2VecModel
import sm_helper as hp
###############################################################
g_model = None
g_all_data = []
g_cnt_dict = {}
###############################################################
if __name__ == '__main__':
config = Config(set_defaults=True, load_from_args=True, verify=True)
g_model = Code2VecModel(config)
print('Done creating code2vec model')
assert g_model is not None
# read file
file_list = hp.get_file_list()
for idx, java_file in enumerate(file_list):
print("\nStart [{}]: {}\n".format(idx + 1, java_file))
g_all_data.clear()
try:
# method_name and method_body
g_all_data.append("\npath = {}".format(java_file))
method_name, method_body = hp.load_method(java_file)
assert (len(method_name) > 0) and (len(method_body) > 0)
g_cnt_dict[method_name] = g_cnt_dict.get(method_name, 0) + 1
g_all_data.append("method_name = {}".format(method_name))
g_all_data.append("method_body = {}".format(method_body))
hp.store_method(hp.g_simp_file, method_body)
# check if prediction is correct
predict, _, _ = hp.prediction_with_c2v(g_model, hp.g_root_path, hp.g_simp_file)
assert method_name == predict
# get path-context and attention
topk_attn, topk_path = hp.get_attention(g_model, hp.g_root_path, hp.g_simp_file)
topk_terminal = []
g_all_data.append("\ntopk path-contexts:")
for i in range(len(topk_path)):
path_context = topk_path[i].strip().split(',')
topk_terminal.append([path_context[0], path_context[-1]])
g_all_data.append("[{}] {}".format(f"{topk_attn[i]:.4f}", topk_path[i]))
g_all_data.append("\ntopk terminals:\n{}".format(topk_terminal))
# print/save path-context and attention
save_name = "L{}_{}_{}.txt".format(str(idx + 1), method_name, g_cnt_dict[method_name])
output_file = "attn_data/{}".format(save_name)
hp.save_simplified_code(g_all_data, output_file)
print("\nDone [{}]: {}\n".format(idx + 1, java_file))
except:
print("\nError [{}]: {}\n".format(idx + 1, java_file))
g_model.close_session()
| 2,398 | 40.362069 | 98 |
py
|
SIVAND
|
SIVAND-master/models/dd-code2vec/dd_tensorflow_model.py
|
import tensorflow as tf
from typing import Dict, Optional
from path_context_reader import PathContextReader, ModelInputTensorsFormer, ReaderInputTensors, EstimatorAction
from common import common
from vocabularies import VocabType
from config import Config
from dd_model_base import Code2VecModelBase
tf.compat.v1.disable_eager_execution()
class Code2VecModel(Code2VecModelBase):
def __init__(self, config: Config):
self.sess = tf.compat.v1.Session()
self.saver = None
self.eval_reader = None
self.eval_input_iterator_reset_op = None
self.predict_reader = None
# self.eval_placeholder = None
self.predict_placeholder = None
self.eval_top_words_op, self.eval_top_values_op, self.eval_original_names_op, self.eval_code_vectors, \
self.eval_attentions_op, self.eval_losses_op = None, None, None, None, None, None
self.predict_top_words_op, self.predict_top_values_op, self.predict_original_names_op = None, None, None
self.vocab_type_to_tf_variable_name_mapping: Dict[VocabType, str] = {
VocabType.Token: 'WORDS_VOCAB',
VocabType.Target: 'TARGET_WORDS_VOCAB',
VocabType.Path: 'PATHS_VOCAB'
}
super(Code2VecModel, self).__init__(config)
# move from evaluate to here
if self.eval_reader is None:
self.eval_reader = PathContextReader(vocabs=self.vocabs,
model_input_tensors_former=_TFEvaluateModelInputTensorsFormer(),
config=self.config, estimator_action=EstimatorAction.Evaluate)
input_iterator = tf.compat.v1.data.make_initializable_iterator(self.eval_reader.get_dataset())
self.eval_input_iterator_reset_op = input_iterator.initializer
input_tensors = input_iterator.get_next()
self.eval_top_words_op, self.eval_top_values_op, self.eval_original_names_op, _, _, _, \
self.eval_code_vectors, self.eval_attentions_op, self.eval_losses_op = \
self._build_tf_test_graph(input_tensors, normalize_scores=True)
if self.saver is None:
self.saver = tf.compat.v1.train.Saver()
if self.config.MODEL_LOAD_PATH and not self.config.TRAIN_DATA_PATH_PREFIX:
self._initialize_session_variables()
self._load_inner_model(self.sess)
def evaluate(self) -> Optional[str]:
self.sess.run(self.eval_input_iterator_reset_op)
# Run evaluation in a loop until iterator is exhausted.
# Each iteration = batch. We iterate as long as the tf iterator (reader) yields batches.
try:
while True:
top_words, top_scores, original_names, code_vectors, losses = self.sess.run(
[self.eval_top_words_op, self.eval_top_values_op,
self.eval_original_names_op, self.eval_code_vectors, self.eval_losses_op],
)
# shapes:
# top_words: (batch, top_k); top_scores: (batch, top_k)
# original_names: (batch, ); code_vectors: (batch, code_vector_size)
top_words = common.binary_to_string_matrix(top_words) # (batch, top_k)
original_names = common.binary_to_string_list(original_names) # (batch,)
return self._log_predictions_during_evaluation(zip(original_names, top_words, top_scores, losses))
except tf.errors.OutOfRangeError:
pass # reader iterator is exhausted and have no more batches to produce.
def get_attention(self) -> Optional[str]:
self.sess.run(self.eval_input_iterator_reset_op)
# Run evaluation in a loop until iterator is exhausted.
try:
while True:
attentions, losses = self.sess.run(
[self.eval_attentions_op, self.eval_losses_op],
)
for sub_attr in attentions:
return sub_attr # only first sub token
except tf.errors.OutOfRangeError:
pass # reader iterator is exhausted and have no more batches to produce.
def _calculate_weighted_contexts(self, tokens_vocab, paths_vocab, attention_param, source_input, path_input,
target_input, valid_mask, is_evaluating=False):
source_word_embed = tf.nn.embedding_lookup(params=tokens_vocab, ids=source_input) # (batch, max_contexts, dim)
path_embed = tf.nn.embedding_lookup(params=paths_vocab, ids=path_input) # (batch, max_contexts, dim)
target_word_embed = tf.nn.embedding_lookup(params=tokens_vocab, ids=target_input) # (batch, max_contexts, dim)
context_embed = tf.concat([source_word_embed, path_embed, target_word_embed],
axis=-1) # (batch, max_contexts, dim * 3)
if not is_evaluating:
context_embed = tf.nn.dropout(context_embed, rate=1 - self.config.DROPOUT_KEEP_RATE)
flat_embed = tf.reshape(context_embed, [-1, self.config.context_vector_size]) # (batch * max_contexts, dim * 3)
transform_param = tf.compat.v1.get_variable(
'TRANSFORM', shape=(self.config.context_vector_size, self.config.CODE_VECTOR_SIZE), dtype=tf.float32)
flat_embed = tf.tanh(tf.matmul(flat_embed, transform_param)) # (batch * max_contexts, dim * 3)
contexts_weights = tf.matmul(flat_embed, attention_param) # (batch * max_contexts, 1)
batched_contexts_weights = tf.reshape(
contexts_weights, [-1, self.config.MAX_CONTEXTS, 1]) # (batch, max_contexts, 1)
mask = tf.math.log(valid_mask) # (batch, max_contexts)
mask = tf.expand_dims(mask, axis=2) # (batch, max_contexts, 1)
batched_contexts_weights += mask # (batch, max_contexts, 1)
attention_weights = tf.nn.softmax(batched_contexts_weights, axis=1) # (batch, max_contexts, 1)
batched_embed = tf.reshape(flat_embed, shape=[-1, self.config.MAX_CONTEXTS, self.config.CODE_VECTOR_SIZE])
code_vectors = tf.reduce_sum(tf.multiply(batched_embed, attention_weights), axis=1) # (batch, dim * 3)
return code_vectors, attention_weights
def _build_tf_test_graph(self, input_tensors, normalize_scores=False):
with tf.compat.v1.variable_scope('model', reuse=self.get_should_reuse_variables()):
tokens_vocab = tf.compat.v1.get_variable(
self.vocab_type_to_tf_variable_name_mapping[VocabType.Token],
shape=(self.vocabs.token_vocab.size, self.config.TOKEN_EMBEDDINGS_SIZE),
dtype=tf.float32, trainable=False)
targets_vocab = tf.compat.v1.get_variable(
self.vocab_type_to_tf_variable_name_mapping[VocabType.Target],
shape=(self.vocabs.target_vocab.size, self.config.TARGET_EMBEDDINGS_SIZE),
dtype=tf.float32, trainable=False)
attention_param = tf.compat.v1.get_variable(
'ATTENTION', shape=(self.config.context_vector_size, 1),
dtype=tf.float32, trainable=False)
paths_vocab = tf.compat.v1.get_variable(
self.vocab_type_to_tf_variable_name_mapping[VocabType.Path],
shape=(self.vocabs.path_vocab.size, self.config.PATH_EMBEDDINGS_SIZE),
dtype=tf.float32, trainable=False)
targets_vocab = tf.transpose(targets_vocab) # (dim * 3, target_word_vocab)
# Use `_TFEvaluateModelInputTensorsFormer` to access input tensors by name.
input_tensors = _TFEvaluateModelInputTensorsFormer().from_model_input_form(input_tensors)
# shape of (batch, 1) for input_tensors.target_string
# shape of (batch, max_contexts) for the other tensors
code_vectors, attention_weights = self._calculate_weighted_contexts(
tokens_vocab, paths_vocab, attention_param, input_tensors.path_source_token_indices,
input_tensors.path_indices, input_tensors.path_target_token_indices,
input_tensors.context_valid_mask, is_evaluating=True)
scores = tf.matmul(code_vectors, targets_vocab) # (batch, target_word_vocab)
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.reshape(input_tensors.target_index, [-1]),
logits=scores)
topk_candidates = tf.nn.top_k(scores, k=tf.minimum(
self.config.TOP_K_WORDS_CONSIDERED_DURING_PREDICTION, self.vocabs.target_vocab.size))
top_indices = topk_candidates.indices
top_words = self.vocabs.target_vocab.lookup_word(top_indices)
original_words = input_tensors.target_string
top_scores = topk_candidates.values
if normalize_scores:
top_scores = tf.nn.softmax(top_scores)
return top_words, top_scores, original_words, input_tensors.path_source_token_strings, \
input_tensors.path_strings, input_tensors.path_target_token_strings, \
code_vectors, attention_weights, losses
def _load_inner_model(self, sess=None):
if sess is not None:
self.log('Loading model weights from: ' + self.config.MODEL_LOAD_PATH)
self.saver.restore(sess, self.config.MODEL_LOAD_PATH)
self.log('Done loading model weights')
def get_should_reuse_variables(self):
if self.config.TRAIN_DATA_PATH_PREFIX:
return True
else:
return None
def _log_predictions_during_evaluation(self, results):
try:
for original_name, top_predicted_words, top_scores, losses in results:
return "{},{},{}".format(top_predicted_words[0], top_scores[0], losses) # dd for single item
except:
return ""
def close_session(self):
self.sess.close()
def _initialize_session_variables(self):
self.sess.run(tf.group(
tf.compat.v1.global_variables_initializer(),
tf.compat.v1.local_variables_initializer(),
tf.compat.v1.tables_initializer()))
self.log('Initalized variables')
class _TFEvaluateModelInputTensorsFormer(ModelInputTensorsFormer):
def to_model_input_form(self, input_tensors: ReaderInputTensors):
return (input_tensors.target_string, input_tensors.target_index,
input_tensors.path_source_token_indices, input_tensors.path_indices,
input_tensors.path_target_token_indices, input_tensors.context_valid_mask,
input_tensors.path_source_token_strings, input_tensors.path_strings,
input_tensors.path_target_token_strings)
def from_model_input_form(self, input_row) -> ReaderInputTensors:
return ReaderInputTensors(
target_string=input_row[0],
target_index=input_row[1],
path_source_token_indices=input_row[2],
path_indices=input_row[3],
path_target_token_indices=input_row[4],
context_valid_mask=input_row[5],
path_source_token_strings=input_row[6],
path_strings=input_row[7],
path_target_token_strings=input_row[8]
)
| 11,224 | 50.255708 | 120 |
py
|
SIVAND
|
SIVAND-master/models/dd-code2vec/dd_model_base.py
|
import numpy as np
import abc
import os
from typing import Optional, Dict, Tuple, Iterable
from common import common
from vocabularies import Code2VecVocabs, VocabType
from config import Config
class Code2VecModelBase(abc.ABC):
def __init__(self, config: Config):
self.config = config
self.config.verify()
self._log_creating_model()
if not config.RELEASE:
self._init_num_of_examples()
self._log_model_configuration()
self.vocabs = Code2VecVocabs(config)
self.vocabs.target_vocab.get_index_to_word_lookup_table() # just to initialize it (if not already initialized)
self._load_or_create_inner_model()
self._initialize()
def _log_creating_model(self):
self.log('')
self.log('')
self.log('---------------------------------------------------------------------')
self.log('---------------------------------------------------------------------')
self.log('---------------------- Creating code2vec model ----------------------')
self.log('---------------------------------------------------------------------')
self.log('---------------------------------------------------------------------')
def _log_model_configuration(self):
self.log('---------------------------------------------------------------------')
self.log('----------------- Configuration - Hyper Parameters ------------------')
longest_param_name_len = max(len(param_name) for param_name, _ in self.config)
for param_name, param_val in self.config:
self.log('{name: <{name_len}}{val}'.format(
name=param_name, val=param_val, name_len=longest_param_name_len + 2))
self.log('---------------------------------------------------------------------')
@property
def logger(self):
return self.config.get_logger()
def log(self, msg):
self.logger.info(msg)
def _init_num_of_examples(self):
self.log('Checking number of examples ...')
if self.config.is_training:
self.config.NUM_TRAIN_EXAMPLES = self._get_num_of_examples_for_dataset(self.config.train_data_path)
self.log(' Number of train examples: {}'.format(self.config.NUM_TRAIN_EXAMPLES))
if self.config.is_testing:
self.config.NUM_TEST_EXAMPLES = self._get_num_of_examples_for_dataset(self.config.TEST_DATA_PATH)
self.log(' Number of test examples: {}'.format(self.config.NUM_TEST_EXAMPLES))
@staticmethod
def _get_num_of_examples_for_dataset(dataset_path: str) -> int:
dataset_num_examples_file_path = dataset_path + '.num_examples'
if os.path.isfile(dataset_num_examples_file_path):
with open(dataset_num_examples_file_path, 'r') as file:
num_examples_in_dataset = int(file.readline())
else:
num_examples_in_dataset = common.count_lines_in_file(dataset_path)
with open(dataset_num_examples_file_path, 'w') as file:
file.write(str(num_examples_in_dataset))
return num_examples_in_dataset
def load_or_build(self):
self.vocabs = Code2VecVocabs(self.config)
self._load_or_create_inner_model()
def save(self, model_save_path=None):
if model_save_path is None:
model_save_path = self.config.MODEL_SAVE_PATH
model_save_dir = '/'.join(model_save_path.split('/')[:-1])
if not os.path.isdir(model_save_dir):
os.makedirs(model_save_dir, exist_ok=True)
self.vocabs.save(self.config.get_vocabularies_path_from_model_path(model_save_path))
self._save_inner_model(model_save_path)
def _write_code_vectors(self, file, code_vectors):
for vec in code_vectors:
file.write(' '.join(map(str, vec)) + '\n')
def _get_attention_weight_per_context(
self, path_source_strings: Iterable[str], path_strings: Iterable[str], path_target_strings: Iterable[str],
attention_weights: Iterable[float]) -> Dict[Tuple[str, str, str], float]:
attention_weights = np.squeeze(attention_weights, axis=-1) # (max_contexts, )
attention_per_context: Dict[Tuple[str, str, str], float] = {}
# shape of path_source_strings, path_strings, path_target_strings, attention_weights is (max_contexts, )
# iterate over contexts
for path_source, path, path_target, weight in \
zip(path_source_strings, path_strings, path_target_strings, attention_weights):
string_context_triplet = (common.binary_to_string(path_source),
common.binary_to_string(path),
common.binary_to_string(path_target))
attention_per_context[string_context_triplet] = weight
return attention_per_context
def close_session(self):
# can be overridden by the implementation model class.
# default implementation just does nothing.
pass
@abc.abstractmethod
def evaluate(self) -> Optional[str]:
...
def _load_or_create_inner_model(self):
if self.config.is_loading:
self._load_inner_model()
else:
self._create_inner_model()
@abc.abstractmethod
def _load_inner_model(self):
...
def _create_inner_model(self):
# can be overridden by the implementation model class.
# default implementation just does nothing.
pass
def _initialize(self):
# can be overridden by the implementation model class.
# default implementation just does nothing.
pass
| 5,679 | 41.706767 | 119 |
py
|
Higgs-ML
|
Higgs-ML-master/cnn.py
|
from __future__ import print_function
import os, sys
import math
import pandas as pd
import numpy as np
import keras
from keras.models import load_model
from keras import backend as K
from keras.callbacks import ModelCheckpoint, EarlyStopping
from sklearn.model_selection import train_test_split
from sklearn import metrics
from func.figure import LossHistory, ROC_plot, deltaKg_plot
from func.models import our_model
from func.file import removedir
try:
import tkinter
except:
import Tkinter as tkinter
########################################################## load data ##########################################################
dirs=['npydata','model','plot']
for d in dirs:
if os.path.exists(d):
removedir(d)
os.makedirs(d)
img_rows,img_cols =34,66
data1= pd.read_table('data/train.txt', header=None, sep=',')
data2= pd.read_table('data/test.txt', header=None, sep=',')
Train_number = len(data1)
test_number = len(data2)
total_number = len(data1)+len(data2)
print ('total_number:', total_number)
print ('test_number:', test_number)
print ('Train_number:', Train_number)
A1 = data1.values
B1 = data2.values
np.random.shuffle(A1)
np.random.shuffle(B1)
A2 = A1[:,2:img_rows*img_cols+2]
B2 = B1[:,2:img_rows*img_cols+2]
#A2_sum=np.sum(A2, axis = 1)
#A2 = A2.T
#A2 /= (A2_sum+10e-8)
#A2 = A2.T
#A2 -= np.mean(A2, axis = 0)
#A2 /= (np.std(A2, axis = 0)+10e-5)
#B2_sum=np.sum(B2, axis = 1)
#B2 = B2.T
#B2 /= (B2_sum+10e-8)
#B2 = B2.T
#B2 -= np.mean(B2, axis = 0)
#B2 /= (np.std(B2, axis = 0)+10e-5)
Train_image = A2.reshape(Train_number,img_rows,img_cols,1)
Train_label = A1[:,1:2]
Train_weight = A1[:,0:1]
test_image = B2.reshape(test_number,img_rows,img_cols,1)
test_label = B1[:,1:2]
test_weight = B1[:,0:1]
#np.save('npydata/Train_image',Train_image)
#np.save('npydata/Train_label',Train_label)
#np.save('npydata/Train_weight',Train_weight)
#np.save('npydata/test_image',test_image)
#np.save('npydata/test_label',test_label)
#np.save('npydata/test_weight',test_weight)
X_train, X_valid, y_train, y_valid = train_test_split(Train_image,Train_label,test_size=0.1,random_state=22)
print ('train shape:', X_train.shape)
print ('valid shape:', X_valid.shape)
x_train = X_train.astype('float32')
x_valid = X_valid.astype('float32')
############################################## train ######################################################################################################
model=our_model(img_rows,img_cols)
history = LossHistory()
early_stopping = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')
saveBestModel = ModelCheckpoint(filepath='model/best.h5', monitor='val_loss', verbose=0, save_best_only=True, mode='min')
model.fit(x_train, y_train, batch_size=128, epochs=100, verbose=1, validation_data=(x_valid, y_valid),callbacks=[early_stopping, saveBestModel, history])
model.save('model/final.h5')
############################################# evaluate ####################################################################################################
TestPrediction = model.predict_proba(test_image)
fpr, tpr, thresh = metrics.roc_curve(test_label, TestPrediction, pos_label=None, sample_weight=test_weight, drop_intermediate=True)
auc = metrics.auc(fpr, tpr, reorder=True)
print ('AUC :',auc)
Ng, NB=4090, 21141
delta_kg=[]
for i in range(len(tpr)):
if tpr[i]==0:
delta_kg.append(1000)
else:
delta_kg.append(math.sqrt(Ng*tpr[i]+NB*fpr[i])/(2.0*Ng*tpr[i]))
best=min(delta_kg)
min_index=delta_kg.index(best)
print ('best point: (tpr, fpr) = (',tpr[min_index],',',fpr[min_index],')')
print ('minimal delta_kg =',best)
history.loss_plot('epoch')
ROC_plot(tpr, fpr)
deltaKg_plot(tpr, delta_kg)
| 3,700 | 27.689922 | 155 |
py
|
Higgs-ML
|
Higgs-ML-master/func/file.py
|
from __future__ import print_function
import os, sys
import shutil
def removedir(folder):
filelist=[]
rootdir=folder
filelist=os.listdir(rootdir)
for f in filelist:
filepath = os.path.join(rootdir,f)
if os.path.isfile(filepath):
os.remove(filepath)
print(str(filepath)+' removed!')
elif os.path.isdir(filepath):
shutil.rmtree(filepath,True)
print('dir '+str(filepath)+' removed!')
shutil.rmtree(rootdir,True)
print('dir '+folder+' removed!')
| 596 | 28.85 | 49 |
py
|
Higgs-ML
|
Higgs-ML-master/func/figure.py
|
from __future__ import print_function
import keras
import numpy as np
import matplotlib.pyplot as plt
import math
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self,log={}):
self.losses = {'batch':[], 'epoch':[]}
self.accuracy = {'batch':[], 'epoch':[]}
self.val_loss = {'batch':[], 'epoch':[]}
self.val_acc = {'batch':[], 'epoch':[]}
def on_batch_end(self, batch, logs={}):
self.losses['batch'].append(logs.get('loss'))
self.accuracy['batch'].append(logs.get('acc'))
self.val_loss['batch'].append(logs.get('val_loss'))
self.val_acc['batch'].append(logs.get('val_acc'))
def on_epoch_end(self, batch, logs={}):
self.losses['epoch'].append(logs.get('loss'))
self.accuracy['epoch'].append(logs.get('acc'))
self.val_loss['epoch'].append(logs.get('val_loss'))
self.val_acc['epoch'].append(logs.get('val_acc'))
#val_predict=(np.asarray(self.model.predict(self.validation_data[0]))).round()
#val_targ=self.validation_data[1]
#out=np.hstack((val_targ, val_predict))
#TP,TN,FP,FN=0,0,0,0
#for i in range(len(out)):
#print (round(float(out[order-1:order,0]),8), round(float(out[order-1:order,1]),8))
# if out[i,0]==1 and out[i,1]==1:
# TP+=1
# if out[i,0]==0 and out[i,1]==0:
# TN+=1
# if out[i,0]==0 and out[i,1]==1:
# FP+=1
# if out[i,0]==1 and out[i,1]==0:
# FN+=1
#if TP!=0 and TN!=0 and FP!=0 and FN!=0:
# precision=TP/float(TP+FP);
# recall=TP/float(TP+FN);
# f1=2*precision*recall/(precision+recall);
# print("TP:",TP," TN:",TN," FP:",FP," FN:",FN," tpr:",TP/float(TP+FN)," fpr:",FP/float(FP+TN)," precision:",precision," recall:",recall," f1:",f1)
def loss_plot(self, loss_type):
iters = range(len(self.losses[loss_type]))
plt.figure()
plt.plot(iters, self.accuracy[loss_type], 'r', label='train acc', lw=2.0)
plt.plot(iters, self.losses[loss_type], 'g', label='train loss', lw=2.0)
if loss_type == 'epoch':
plt.plot(iters, self.val_acc[loss_type], 'b', label='val acc', lw=2.0)
plt.plot(iters, self.val_loss[loss_type], 'k', label='val loss', lw=2.0)
plt.grid(True)
plt.xlabel(loss_type, fontsize=16)
plt.ylabel('acc-loss', fontsize=16)
plt.legend(loc="upper right")
plt.savefig('plot/loss_acc.png')
#plt.show()
def ROC_plot(tpr, fpr):
plt.figure(figsize=(8.5,3.7))
plt.subplot(1,2,1)
plt.plot(tpr,1-fpr)
plt.xlabel('Signal Efficiency',fontsize=16)
plt.ylabel('Background Rejection',fontsize=16)
plt.xlim(0,1)
plt.ylim(0,1)
plt.title('ROC',fontsize=16)
plt.savefig('plot/ROC.png')
#plt.show()
return
def deltaKg_plot(tpr, delta_kg):
plt.figure(figsize=(8.5,3.7))
plt.subplot(1,2,1)
plt.plot(tpr,delta_kg,lw=2.0)
plt.xlabel('True Positive Rate',fontsize=16)
plt.ylabel('delta_kg',fontsize=16)
plt.xlim(0.4,0.9)
plt.ylim(0.01,0.02)
plt.title('Uncertainty',fontsize=16)
plt.savefig('plot/delta_Kg.png')
#plt.show()
return
| 3,107 | 32.782609 | 158 |
py
|
Higgs-ML
|
Higgs-ML-master/func/models.py
|
import keras
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Activation, Flatten
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.optimizers import SGD, Adam, Nadam
def our_model(img_rows,img_cols):
model=Sequential()
model.add(Conv2D(64,(3,3),padding='valid',kernel_initializer="uniform",input_shape=(img_rows,img_cols,1)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=2))
model.add(Dropout(0.25))
model.add(Conv2D(64,(3,3),padding='same',kernel_initializer="uniform"))
model.add(Activation('relu'))
model.add(Conv2D(64,(3,3),padding='same',kernel_initializer="uniform"))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=2))
model.add(Dropout(0.5))
model.add(Conv2D(128,(3,3),padding='same',kernel_initializer="uniform"))
model.add(Activation('relu'))
model.add(Conv2D(128,(3,3),padding='same',kernel_initializer="uniform"))
model.add(Activation('relu'))
model.add(Conv2D(128,(3,3),padding='same',kernel_initializer="uniform"))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=2))
model.add(Dropout(0.5))
model.add(Conv2D(128,(3,3),padding='same',kernel_initializer="uniform"))
model.add(Activation('relu'))
model.add(Conv2D(128,(3,3),padding='same',kernel_initializer="uniform"))
model.add(Activation('relu'))
model.add(Conv2D(128,(3,3),padding='same',kernel_initializer="uniform"))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=2))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
adam = Adam(lr=0.0005, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(loss='binary_crossentropy',optimizer = adam, metrics=['accuracy'])
return model
| 1,906 | 36.392157 | 107 |
py
|
Higgs-ML
|
Higgs-ML-master/func/__init__.py
| 0 | 0 | 0 |
py
|
|
hyperas
|
hyperas-master/setup.py
|
from setuptools import setup
from setuptools import find_packages
setup(name='hyperas',
version='0.4.1',
description='Simple wrapper for hyperopt to do convenient hyperparameter optimization for Keras models',
url='http://github.com/maxpumperla/hyperas',
download_url='https://github.com/maxpumperla/hyperas/tarball/0.4.1',
author='Max Pumperla',
author_email='[email protected]',
install_requires=['keras', 'hyperopt', 'entrypoints', 'jupyter', 'nbformat', 'nbconvert'],
license='MIT',
packages=find_packages(),
zip_safe=False)
| 600 | 39.066667 | 110 |
py
|
hyperas
|
hyperas-master/hyperas/distributions.py
|
from hyperopt.hp import choice
from hyperopt.hp import randint
from hyperopt.hp import pchoice
from hyperopt.hp import uniform
from hyperopt.hp import quniform
from hyperopt.hp import loguniform
from hyperopt.hp import qloguniform
from hyperopt.hp import normal
from hyperopt.hp import qnormal
from hyperopt.hp import lognormal
from hyperopt.hp import qlognormal
from hyperopt.hp import uniformint
| 397 | 32.166667 | 35 |
py
|
hyperas
|
hyperas-master/hyperas/optim.py
|
import inspect
import os
import re
import sys
import nbformat
import numpy as np
from hyperopt import fmin
from nbconvert import PythonExporter
from .ensemble import VotingModel
from .utils import (
remove_imports, remove_all_comments, extract_imports, temp_string,
write_temp_files, determine_indent, with_line_numbers, unpack_hyperopt_vals,
eval_hyperopt_space, find_signature_end)
sys.path.append(".")
def minimize(model,
data,
algo,
max_evals,
trials,
functions=None,
rseed=1337,
notebook_name=None,
verbose=True,
eval_space=False,
return_space=False,
keep_temp=False,
data_args=None):
"""
Minimize a keras model for given data and implicit hyperparameters.
Parameters
----------
model: A function defining a keras model with hyperas templates, which returns a
valid hyperopt results dictionary, e.g.
return {'loss': -acc, 'status': STATUS_OK}
data: A parameter-less function that defines and return all data needed in the above
model definition.
algo: A hyperopt algorithm, like tpe.suggest or rand.suggest
max_evals: Maximum number of optimization runs
trials: A hyperopt trials object, used to store intermediate results for all
optimization runs
rseed: Integer random seed for experiments
notebook_name: If running from an ipython notebook, provide filename (not path)
verbose: Print verbose output
eval_space: Evaluate the best run in the search space such that 'choice's contain actually meaningful values instead of mere indices
return_space: Return the hyperopt search space object (e.g. for further processing) as last return value
keep_temp: Keep temp_model.py file on the filesystem
data_args: Arguments to be passed to data function
Returns
-------
If `return_space` is False: A pair consisting of the results dictionary of the best run and the corresponding
keras model.
If `return_space` is True: The pair of best result and corresponding keras model, and the hyperopt search space
"""
best_run, space = base_minimizer(model=model,
data=data,
functions=functions,
algo=algo,
max_evals=max_evals,
trials=trials,
rseed=rseed,
full_model_string=None,
notebook_name=notebook_name,
verbose=verbose,
keep_temp=keep_temp,
data_args=data_args)
best_model = None
for trial in trials:
vals = trial.get('misc').get('vals')
# unpack the values from lists without overwriting the mutable dict within 'trial'
unpacked_vals = unpack_hyperopt_vals(vals)
# identify the best_run (comes with unpacked values from the hyperopt function `base.Trials.argmin`)
if unpacked_vals == best_run and 'model' in trial.get('result').keys():
best_model = trial.get('result').get('model')
if eval_space is True:
# evaluate the search space
best_run = eval_hyperopt_space(space, best_run)
if return_space is True:
# return the space as well
return best_run, best_model, space
else:
# the default case for backwards compatibility with expanded return arguments
return best_run, best_model
def base_minimizer(model, data, functions, algo, max_evals, trials,
rseed=1337, full_model_string=None, notebook_name=None,
verbose=True, stack=3, keep_temp=False, data_args=None):
if full_model_string is not None:
model_str = full_model_string
else:
model_str = get_hyperopt_model_string(model, data, functions, notebook_name, verbose, stack, data_args=data_args)
temp_file = './temp_model.py'
write_temp_files(model_str, temp_file)
if 'temp_model' in sys.modules:
del sys.modules["temp_model"]
try:
from temp_model import keras_fmin_fnct, get_space
except:
print("Unexpected error: {}".format(sys.exc_info()[0]))
raise
try:
if not keep_temp:
os.remove(temp_file)
os.remove(temp_file + 'c')
except OSError:
pass
try:
# for backward compatibility.
return (
fmin(keras_fmin_fnct,
space=get_space(),
algo=algo,
max_evals=max_evals,
trials=trials,
rseed=rseed,
return_argmin=True),
get_space()
)
except TypeError:
pass
return (
fmin(keras_fmin_fnct,
space=get_space(),
algo=algo,
max_evals=max_evals,
trials=trials,
#rstate=np.random.RandomState(rseed),
rstate=np.random.default_rng(rseed),
return_argmin=True),
get_space()
)
def best_ensemble(nb_ensemble_models, model, data, algo, max_evals,
trials, voting='hard', weights=None, nb_classes=None, functions=None):
model_list = best_models(nb_models=nb_ensemble_models,
model=model,
data=data,
algo=algo,
max_evals=max_evals,
trials=trials,
functions=functions)
return VotingModel(model_list, voting, weights, nb_classes)
def best_models(nb_models, model, data, algo, max_evals, trials, functions=None, keep_temp=False):
base_minimizer(model=model,
data=data,
functions=functions,
algo=algo,
max_evals=max_evals,
trials=trials,
stack=4,
keep_temp=keep_temp)
if len(trials) < nb_models:
nb_models = len(trials)
scores = [trial.get('result').get('loss') for trial in trials]
cut_off = sorted(scores, reverse=True)[nb_models - 1]
model_list = [trial.get('result').get('model') for trial in trials if trial.get('result').get('loss') >= cut_off]
return model_list
def get_hyperopt_model_string(model, data, functions, notebook_name, verbose, stack, data_args):
model_string = inspect.getsource(model)
model_string = remove_imports(model_string)
if notebook_name:
notebook_path = os.getcwd() + "/{}.ipynb".format(notebook_name)
with open(notebook_path, 'r') as f:
notebook = nbformat.reads(f.read(), nbformat.NO_CONVERT)
exporter = PythonExporter()
source, _ = exporter.from_notebook_node(notebook)
else:
calling_script_file = os.path.abspath(inspect.stack()[stack][1])
with open(calling_script_file, 'r') as f:
source = f.read()
cleaned_source = remove_all_comments(source)
imports = extract_imports(cleaned_source, verbose)
parts = hyperparameter_names(model_string)
aug_parts = augmented_names(parts)
hyperopt_params = get_hyperparameters(model_string)
space = get_hyperopt_space(parts, hyperopt_params, verbose)
functions_string = retrieve_function_string(functions, verbose)
data_string = retrieve_data_string(data, verbose, data_args)
model = hyperopt_keras_model(model_string, parts, aug_parts, verbose)
temp_str = temp_string(imports, model, data_string, functions_string, space)
return temp_str
def get_hyperopt_space(parts, hyperopt_params, verbose=True):
space = "def get_space():\n return {\n"
for name, param in zip(parts, hyperopt_params):
param = re.sub(r"\(", "('" + name + "', ", param, 1)
space += " '" + name + "': hp." + param + ",\n"
space = space[:-1]
space += "\n }\n"
if verbose:
print('>>> Hyperas search space:\n')
print(space)
return space
def retrieve_data_string(data, verbose=True, data_args=None):
data_string = inspect.getsource(data)
first_line = data_string.split("\n")[0]
indent_length = len(determine_indent(data_string))
data_string = data_string.replace(first_line, "")
r = re.compile(r'^\s*return.*')
last_line = [s for s in reversed(data_string.split("\n")) if r.match(s)][0]
data_string = data_string.replace(last_line, "")
required_arguments = inspect.getfullargspec(data).args
if required_arguments:
if data_args is None:
raise ValueError(
"Data function takes arguments {} but no values are passed via data_args".format(required_arguments))
data_string = "\n".join(" {} = {}".format(x, repr(y)) for x, y in zip(required_arguments, data_args)) + data_string
split_data = data_string.split("\n")
for i, line in enumerate(split_data):
split_data[i] = line[indent_length:] + "\n"
data_string = ''.join(split_data)
if verbose:
print(">>> Data")
print(with_line_numbers(data_string))
return data_string
def retrieve_function_string(functions, verbose=True):
function_strings = ''
if functions is None:
return function_strings
for function in functions:
function_string = inspect.getsource(function)
function_strings = function_strings + function_string + '\n'
if verbose:
print(">>> Functions")
print(with_line_numbers(function_strings))
return function_strings
def hyperparameter_names(model_string):
parts = []
params = re.findall(r"(\{\{[^}]+}\})", model_string)
for param in params:
name = re.findall(r"(\w+(?=\s*[\=\(]\s*" + re.escape(param) + r"))", model_string)
if len(name) > 0:
parts.append(name[0])
else:
parts.append(parts[-1])
part_dict = {}
for i, part in enumerate(parts):
if part in part_dict.keys():
part_dict[part] += 1
parts[i] = part + "_" + str(part_dict[part])
else:
part_dict[part] = 0
return parts
def get_hyperparameters(model_string):
hyperopt_params = re.findall(r"(\{\{[^}]+}\})", model_string)
for i, param in enumerate(hyperopt_params):
hyperopt_params[i] = re.sub(r"[\{\}]", '', param)
return hyperopt_params
def augmented_names(parts):
aug_parts = []
for i, part in enumerate(parts):
aug_parts.append("space['" + part + "']")
return aug_parts
def hyperopt_keras_model(model_string, parts, aug_parts, verbose=True):
colon_index = find_signature_end(model_string)
func_sign_line_end = model_string.count("\n", 0, colon_index) + 1
func_sign_lines = "\n".join(model_string.split("\n")[:func_sign_line_end])
model_string = model_string.replace(func_sign_lines, "def keras_fmin_fnct(space):\n")
result = re.sub(r"(\{\{[^}]+}\})", lambda match: aug_parts.pop(0), model_string, count=len(parts))
if verbose:
print('>>> Resulting replaced keras model:\n')
print(with_line_numbers(result))
return result
| 11,352 | 36.468647 | 136 |
py
|
hyperas
|
hyperas-master/hyperas/utils.py
|
import ast
import re
import warnings
from operator import attrgetter
from hyperopt import space_eval
class ImportParser(ast.NodeVisitor):
def __init__(self):
self.lines = []
self.line_numbers = []
def visit_Import(self, node):
line = 'import {}'.format(self._import_names(node.names))
if (self._import_asnames(node.names) != ''):
line += ' as {}'.format(self._import_asnames(node.names))
self.line_numbers.append(node.lineno)
self.lines.append(line)
def visit_ImportFrom(self, node):
line = 'from {}{} import {}'.format(
node.level * '.',
node.module or '',
self._import_names(node.names))
if (self._import_asnames(node.names) != ''):
line += " as {}".format(self._import_asnames(node.names))
self.line_numbers.append(node.lineno)
self.lines.append(line)
def _import_names(self, names):
return ', '.join(map(attrgetter('name'), names))
def _import_asnames(self, names):
asname = map(attrgetter('asname'), names)
return ''.join(filter(None, asname))
def extract_imports(source, verbose=True):
tree = ast.parse(source)
import_parser = ImportParser()
import_parser.visit(tree)
import_lines = ['#coding=utf-8\n']
for line in import_parser.lines:
if 'print_function' in line:
import_lines.append(line + '\n')
# skip imports for pycharm and eclipse
elif '_pydev_' in line or 'java.lang' in line:
continue
else:
import_lines.append('try:\n {}\nexcept:\n pass\n'.format(line))
imports_str = '\n'.join(import_lines)
if verbose:
print('>>> Imports:')
print(imports_str)
return imports_str
def remove_imports(source):
tree = ast.parse(source)
import_parser = ImportParser()
import_parser.visit(tree)
lines = source.split('\n') # the source including all comments, since we parse the line numbers with comments!
lines_to_remove = set(import_parser.line_numbers)
non_import_lines = [line for i, line in enumerate(lines, start=1) if i not in lines_to_remove]
return '\n'.join(non_import_lines)
def remove_all_comments(source):
string = re.sub(re.compile("'''.*?'''", re.DOTALL), "", source) # remove '''...''' comments
string = re.sub(re.compile("(?<!('|\").)*#[^'\"]*?\n"), "\n", string) # remove #...\n comments
return string
def temp_string(imports, model, data, functions, space):
temp = (imports + "from hyperopt import fmin, tpe, hp, STATUS_OK, Trials\n" +
functions + data + model + "\n" + space)
return temp
def write_temp_files(tmp_str, path='./temp_model.py'):
with open(path, 'w') as f:
f.write(tmp_str)
f.close()
return
def with_line_numbers(code):
"""
Adds line numbers to each line of a source code fragment
Parameters
----------
code : string
any multiline text, such as as (fragments) of source code
Returns
-------
str : string
The input with added <n>: for each line
Example
-------
code = "def do_stuff(x):\n\tprint(x)\n"
with_line_numbers(code)
1: def do_stuff(x):
2: print(x)
3:
"""
max_number_length = str(len(str(len(code))))
format_str = "{:>" + max_number_length + "d}: {:}"
return "\n".join([format_str.format(line_number + 1, line) for line_number, line in enumerate(code.split("\n"))])
def determine_indent(str):
"""
Figure out the character(s) used for indents in a given source code fragement.
Parameters
----------
str : string
source code starting at an indent of 0 and containing at least one indented block.
Returns
-------
string
The character(s) used for indenting.
Example
-------
code = "def do_stuff(x)\n print(x)\n"
indent = determine_indent(str)
print("The code '", code, "' is indented with \n'", indent, "' (size: ", len(indent), ")")
"""
indent = None
reg = r"""
^(?P<previous_indent>\s*)\S.+?:\n # line starting a block, i. e. ' for i in x:\n'
((\s*)\n)* # empty lines
(?P=previous_indent)(?P<indent>\s+)\S # first indented line of the new block, i. e. ' d'(..oStuff())
"""
matches = re.compile(reg, re.MULTILINE | re.VERBOSE).finditer(str)
for block_start in matches:
new_indent = block_start.groupdict()['indent']
if indent and new_indent != indent:
warnings.warn('Inconsistent indentation detected.'
'Found "%s" (length: %i) as well as "%s" (length: %i)' % (
indent, len(indent), new_indent, len(new_indent)))
indent = new_indent
return indent
def unpack_hyperopt_vals(vals):
"""
Unpack values from a hyperopt return dictionary where values are wrapped in a list.
:param vals: dict
:return: dict
copy of the dictionary with unpacked values
"""
assert isinstance(vals, dict), "Parameter must be given as dict."
ret = {}
for k, v in list(vals.items()):
try:
ret[k] = v[0]
except (TypeError, IndexError):
ret[k] = v
return ret
def eval_hyperopt_space(space, vals):
"""
Evaluate a set of parameter values within the hyperopt space.
Optionally unpacks the values, if they are wrapped in lists.
:param space: dict
the hyperopt space dictionary
:param vals: dict
the values from a hyperopt trial
:return: evaluated space
"""
unpacked_vals = unpack_hyperopt_vals(vals)
return space_eval(space, unpacked_vals)
def find_signature_end(model_string):
"""
Find the index of the colon in the function signature.
:param model_string: string
source code of the model
:return: int
the index of the colon
"""
index, brace_depth = 0, 0
while index < len(model_string):
ch = model_string[index]
if brace_depth == 0 and ch == ':':
break
if ch == '#': # Ignore comments
index += 1
while index < len(model_string) and model_string[index] != '\n':
index += 1
index += 1
elif ch in ['"', "'"]: # Skip strings
string_depth = 0
while index < len(model_string) and model_string[index] == ch:
string_depth += 1
index += 1
if string_depth == 2:
string_depth = 1
index += string_depth
while index < len(model_string):
if model_string[index] == '\\':
index += 2
elif model_string[index] == ch:
string_depth -= 1
if string_depth == 0:
break
index += 1
else:
index += 1
index += 1
elif ch == '(':
brace_depth += 1
index += 1
elif ch == ')':
brace_depth -= 1
index += 1
else:
index += 1
return index
| 7,226 | 30.285714 | 117 |
py
|
hyperas
|
hyperas-master/hyperas/__init__.py
| 0 | 0 | 0 |
py
|
|
hyperas
|
hyperas-master/hyperas/ensemble.py
|
import numpy as np
from keras.models import model_from_yaml
class VotingModel(object):
def __init__(self, model_list, voting='hard',
weights=None, nb_classes=None):
"""(Weighted) majority vote model for a given list of Keras models.
Parameters
----------
model_list: An iterable of Keras models.
voting: Choose 'hard' for straight-up majority vote of highest model probilities or 'soft'
for a weighted majority vote. In the latter, a weight vector has to be specified.
weights: Weight vector (numpy array) used for soft majority vote.
nb_classes: Number of classes being predicted.
Returns
-------
A voting model that has a predict method with the same signature of a single keras model.
"""
self.model_list = model_list
self.voting = voting
self.weights = weights
self.nb_classes = nb_classes
if voting not in ['hard', 'soft']:
raise 'Voting has to be either hard or soft'
if weights is not None:
if len(weights) != len(model_list):
raise ('Number of models {0} and length of weight vector {1} has to match.'
.format(len(weights), len(model_list)))
def predict(self, X, batch_size=128, verbose=0):
predictions = list(map(lambda model: model.predict(X, batch_size, verbose), self.model_list))
nb_preds = len(X)
if self.voting == 'hard':
for i, pred in enumerate(predictions):
pred = list(map(
lambda probas: np.argmax(probas, axis=-1), pred
))
predictions[i] = np.asarray(pred).reshape(nb_preds, 1)
argmax_list = list(np.concatenate(predictions, axis=1))
votes = np.asarray(list(
map(lambda arr: max(set(arr)), argmax_list)
))
if self.voting == 'soft':
for i, pred in enumerate(predictions):
pred = list(map(lambda probas: probas * self.weights[i], pred))
predictions[i] = np.asarray(pred).reshape(nb_preds, self.nb_classes, 1)
weighted_preds = np.concatenate(predictions, axis=2)
weighted_avg = np.mean(weighted_preds, axis=2)
votes = np.argmax(weighted_avg, axis=1)
return votes
def voting_model_from_yaml(yaml_list, voting='hard', weights=None):
model_list = map(lambda yml: model_from_yaml(yml), yaml_list)
return VotingModel(model_list, voting, weights)
| 2,567 | 39.125 | 101 |
py
|
hyperas
|
hyperas-master/examples/mnist_readme.py
|
from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from keras.datasets import mnist
from keras.layers.core import Dense, Dropout, Activation
from keras.models import Sequential
from keras.utils import np_utils
from hyperas import optim
from hyperas.distributions import choice, uniform
def data():
"""
Data providing function:
This function is separated from model() so that hyperopt
won't reload data for each evaluation run.
"""
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
nb_classes = 10
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
return x_train, y_train, x_test, y_test
def model(x_train, y_train, x_test, y_test):
"""
Model providing function:
Create Keras model with double curly brackets dropped-in as needed.
Return value has to be a valid python dictionary with two customary keys:
- loss: Specify a numeric evaluation metric to be minimized
- status: Just use STATUS_OK and see hyperopt documentation if not feasible
The last one is optional, though recommended, namely:
- model: specify the model just created so that we can later use it again.
"""
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense({{choice([256, 512, 1024])}}))
model.add(Activation({{choice(['relu', 'sigmoid'])}}))
model.add(Dropout({{uniform(0, 1)}}))
# If we choose 'four', add an additional fourth layer
if {{choice(['three', 'four'])}} == 'four':
model.add(Dense(100))
# We can also choose between complete sets of layers
model.add({{choice([Dropout(0.5), Activation('linear')])}})
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', metrics=['accuracy'],
optimizer={{choice(['rmsprop', 'adam', 'sgd'])}})
model.fit(x_train, y_train,
batch_size={{choice([64, 128])}},
epochs=1,
verbose=2,
validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=5,
trials=Trials())
X_train, Y_train, X_test, Y_test = data()
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
print("Best performing model chosen hyper-parameters:")
print(best_run)
| 3,140 | 34.693182 | 83 |
py
|
hyperas
|
hyperas-master/examples/lstm.py
|
from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
from keras.preprocessing import sequence
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM
from keras.callbacks import EarlyStopping, ModelCheckpoint
def data():
maxlen = 100
max_features = 20000
print('Loading data...')
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print("Pad sequences (samples x time)")
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
return X_train, X_test, y_train, y_test, max_features, maxlen
def model(X_train, X_test, y_train, y_test, max_features, maxlen):
model = Sequential()
model.add(Embedding(max_features, 128, input_length=maxlen))
model.add(LSTM(128))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
early_stopping = EarlyStopping(monitor='val_loss', patience=4)
checkpointer = ModelCheckpoint(filepath='keras_weights.hdf5',
verbose=1,
save_best_only=True)
model.fit(X_train, y_train,
batch_size={{choice([32, 64, 128])}},
nb_epoch=1,
validation_split=0.08,
callbacks=[early_stopping, checkpointer])
score, acc = model.evaluate(X_test, y_test, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=10,
trials=Trials())
print(best_run)
| 2,374 | 34.447761 | 80 |
py
|
hyperas
|
hyperas-master/examples/mnist_distributed.py
|
from hyperas import optim
from hyperas.distributions import quniform, uniform
from hyperopt import STATUS_OK, tpe, mongoexp
import keras
from keras.layers import Dense, Dropout
from keras.models import Sequential
from keras.optimizers import RMSprop
from keras.datasets import mnist
import tempfile
def data():
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
num_classes = 10
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
return x_train, y_train, x_test, y_test
def create_model(x_train, y_train, x_test, y_test):
"""
Create your model...
"""
layer_1_size = {{quniform(12, 256, 4)}}
l1_dropout = {{uniform(0.001, 0.7)}}
params = {
'l1_size': layer_1_size,
'l1_dropout': l1_dropout
}
num_classes = 10
model = Sequential()
model.add(Dense(int(layer_1_size), activation='relu'))
model.add(Dropout(l1_dropout))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
model.fit(x_train, y_train, batch_size=128, epochs=10, validation_data=(x_test, y_test))
score, acc = model.evaluate(x_test, y_test, verbose=0)
out = {
'loss': -acc,
'score': score,
'status': STATUS_OK,
'model_params': params,
}
# optionally store a dump of your model here so you can get it from the database later
temp_name = tempfile.gettempdir()+'/'+next(tempfile._get_candidate_names()) + '.h5'
model.save(temp_name)
with open(temp_name, 'rb') as infile:
model_bytes = infile.read()
out['model_serial'] = model_bytes
return out
if __name__ == "__main__":
trials = mongoexp.MongoTrials('mongo://username:[email protected]:27017/jobs/jobs', exp_key='mnist_test')
best_run, best_model = optim.minimize(model=create_model,
data=data,
algo=tpe.suggest,
max_evals=10,
trials=trials,
keep_temp=True) # this last bit is important
print("Best performing model chosen hyper-parameters:")
print(best_run)
| 2,561 | 35.084507 | 109 |
py
|
hyperas
|
hyperas-master/examples/simple.py
|
from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
from keras.datasets import mnist
from keras.utils import np_utils
def data():
'''
Data providing function:
This function is separated from model() so that hyperopt
won't reload data for each evaluation run.
'''
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
nb_classes = 10
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
def model(X_train, Y_train, X_test, Y_test):
'''
Model providing function:
Create Keras model with double curly brackets dropped-in as needed.
Return value has to be a valid python dictionary with two customary keys:
- loss: Specify a numeric evaluation metric to be minimized
- status: Just use STATUS_OK and see hyperopt documentation if not feasible
The last one is optional, though recommended, namely:
- model: specify the model just created so that we can later use it again.
'''
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense({{choice([256, 512, 1024])}}))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense(10))
model.add(Activation('softmax'))
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])
model.fit(X_train, Y_train,
batch_size={{choice([64, 128])}},
nb_epoch=1,
verbose=2,
validation_data=(X_test, Y_test))
score, acc = model.evaluate(X_test, Y_test, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
X_train, Y_train, X_test, Y_test = data()
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=5,
trials=Trials())
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
| 2,737 | 33.225 | 87 |
py
|
hyperas
|
hyperas-master/examples/complex.py
|
from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.datasets import mnist
from keras.utils import np_utils
def data():
'''
Data providing function:
This function is separated from model() so that hyperopt
won't reload data for each evaluation run.
'''
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
nb_classes = 10
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
def model(X_train, Y_train, X_test, Y_test):
'''
Model providing function:
Create Keras model with double curly brackets dropped-in as needed.
Return value has to be a valid python dictionary with two customary keys:
- loss: Specify a numeric evaluation metric to be minimized
- status: Just use STATUS_OK and see hyperopt documentation if not feasible
The last one is optional, though recommended, namely:
- model: specify the model just created so that we can later use it again.
'''
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense({{choice([256, 512, 1024])}}))
model.add(Activation({{choice(['relu', 'sigmoid'])}}))
model.add(Dropout({{uniform(0, 1)}}))
# If we choose 'four', add an additional fourth layer
if {{choice(['three', 'four'])}} == 'four':
model.add(Dense(100))
model.add({{choice([Dropout(0.5), Activation('linear')])}})
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer={{choice(['rmsprop', 'adam', 'sgd'])}},
metrics=['accuracy'])
model.fit(X_train, Y_train,
batch_size={{choice([64, 128])}},
nb_epoch=1,
verbose=2,
validation_data=(X_test, Y_test))
score, acc = model.evaluate(X_test, Y_test, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
trials = Trials()
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=5,
trials=trials)
for trial in trials:
print(trial)
X_train, Y_train, X_test, Y_test = data()
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
| 3,080 | 35.678571 | 83 |
py
|
hyperas
|
hyperas-master/examples/mnist_ensemble.py
|
from __future__ import print_function
from hyperopt import Trials, STATUS_OK, rand
from hyperas import optim
from hyperas.distributions import choice, uniform
from sklearn.metrics import accuracy_score
from keras.utils import np_utils
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
def data():
nb_classes = 10
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, X_test, Y_train, Y_test
def model(X_train, X_test, Y_train, Y_test):
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense({{choice([400, 512, 600])}}))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense(10))
model.add(Activation('softmax'))
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])
nb_epoch = 10
batch_size = 128
model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=2,
validation_data=(X_test, Y_test))
score, acc = model.evaluate(X_test, Y_test, verbose=0)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
X_train, X_test, Y_train, Y_test = data()
'''
Generate ensemble model from optimization run:
First, run hyperas optimization on specified setup, i.e. 10 trials with TPE,
then return the best 5 models and create a majority voting model from it.
'''
ensemble_model = optim.best_ensemble(nb_ensemble_models=5,
model=model, data=data,
algo=rand.suggest, max_evals=10,
trials=Trials(),
voting='hard')
preds = ensemble_model.predict(X_test)
y_test = np_utils.categorical_probas_to_classes(Y_test)
print(accuracy_score(preds, y_test))
| 2,430 | 33.239437 | 87 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.