path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
Scripts/Modeling/NNModel_prediction.ipynb | ###Markdown
Model Building using Deep Learning Model (Keras)
###Code
# Define model
import random
random.seed(42)
from tensorflow.python.keras.layers import Dense
from tensorflow.python.keras import Sequential
model = Sequential()
model.add(Dense(100, input_dim=48, activation= "relu"))
model.add(Dense(50, activation= 'relu'))
model.add(Dense(1))
model.summary() #Print model Summary
# Compile model
#model.compile(loss= "mean_squared_error" , optimizer="Adamax", metrics=["mean_squared_error"])
model.compile(loss= "mse" , optimizer="adam", metrics=["mse"])
# Fit Model
model.fit(X_train, y_train, epochs=3, batch_size=33)
###Output
Epoch 1/3
1451/1451 [==============================] - 5s 3ms/step - loss: 3429727744.0000 - mse: 3430939648.0000
Epoch 2/3
1451/1451 [==============================] - 5s 3ms/step - loss: 3432653824.0000 - mse: 3433865984.0000
Epoch 3/3
1451/1451 [==============================] - 5s 3ms/step - loss: 3431179264.0000 - mse: 3432398080.0000
###Markdown
Fit nn in-sample validation
###Code
from sklearn.metrics import mean_squared_error
model.fit(X_train, y_train, epochs=3, validation_data=(X_valid, y_valid))
###Output
Epoch 1/3
1496/1496 [==============================] - 6s 4ms/step - loss: 3431397376.0000 - mse: 3431825920.0000 - val_loss: 297629152.0000 - val_mse: 298047328.0000
Epoch 2/3
1496/1496 [==============================] - 6s 4ms/step - loss: 3432369920.0000 - mse: 3432799744.0000 - val_loss: 298028032.0000 - val_mse: 298443808.0000
Epoch 3/3
1496/1496 [==============================] - 6s 4ms/step - loss: 3430427648.0000 - mse: 3430852608.0000 - val_loss: 315636768.0000 - val_mse: 315985376.0000
###Markdown
Test NN in test put the result into one dataframe
###Code
#Select the independent variables for test dataset
X_test = test[features].values
#Prediction using Neural Network
y_test_nn = model.predict(X_test)
## need to change the raw df_test dataset
ee_col_te = df_te['ee']
ncat_te = df_te['ncat']
ee_te = pd.DataFrame(ee_col_te)
ncat_te = pd.DataFrame(ncat_te)
pred_test_df = pd.DataFrame(y_test_nn)
nn_test_df = pd.concat([ncat_te, ee_te, pred_test_df], axis=1)
nn_test_df.to_csv('test31_df.csv',index = False)
###Output
_____no_output_____
###Markdown
Save train pred to csv
###Code
## predict in training set
tr = train[features].values
pred_train = model.predict(tr)
## put ee and ncat and pred to csv
ee_col_tr = df_tr['ee']
ncat_tr = df_tr['ncat']
ee_tr = pd.DataFrame(ee_col_tr)
ncat_tr = pd.DataFrame(ncat_tr)
pred_train_df = pd.DataFrame(pred_train)
nn_train_df = pd.concat([ncat_tr, ee_tr, pred_train_df], axis=1)
nn_train_df.to_csv('train31_df.csv',index = False)
###Output
_____no_output_____
###Markdown
evaluate the result in validation dataset(Final_valid) Load the validation daset daset
###Code
df_valid = pd.read_csv('Validation_34.csv')
## cap in 300000
df_valid['ncat'] = np.where(df_valid['ncat'] > 300000, 300000, df_valid['ncat'])
###Output
_____no_output_____
###Markdown
predit in the validation set
###Code
## predict in validation set
val = df_valid[features].values
pred_val = model.predict(val)
## put ee and ncat and pred to csv
ee_col_val = df_valid['ee']
ncat_val = df_valid['ncat']
ee_val = pd.DataFrame(ee_col_val)
ncat_val = pd.DataFrame(ncat_val)
pred_val_df = pd.DataFrame(pred_val)
nn_valid_df = pd.concat([ncat_val, ee_val, pred_val_df], axis=1)
nn_valid_df.to_csv('valid7_df.csv',index = False)
###Output
_____no_output_____
###Markdown
test in full_train and full_test
###Code
## load full train data
df_full_tr = pd.read_csv('Train_full_34.csv')
## predict in full_train set
full_tr = df_full_tr[features].values
pred_full_tr = model.predict(full_tr)
## put ee and ncat and pred to csv
ee_col_full_tr = df_full_tr['ee']
ncat_full_tr = df_full_tr['ncat']
ee_full_tr = pd.DataFrame(ee_col_full_tr)
ncat_full_tr = pd.DataFrame(ncat_full_tr)
pred_full_tr_df = pd.DataFrame(pred_full_tr)
nn_full_tr_df = pd.concat([ncat_full_tr, ee_full_tr, pred_full_tr_df], axis=1)
nn_full_tr_df.to_csv('full_tr7_df.csv',index = False)
###Output
_____no_output_____
###Markdown
Test in full_train dataset
###Code
## load full test dataset
df_full_te = pd.read_csv('Test_full_34.csv')
## predict in full_test set
full_te = df_full_te[features].values
pred_full_te = model.predict(full_te)
## put ee and ncat and pred to csv
ee_col_full_te = df_full_te['ee']
ncat_full_te = df_full_te['ncat']
ee_full_te = pd.DataFrame(ee_col_full_te)
ncat_full_te = pd.DataFrame(ncat_full_te)
pred_full_te_df = pd.DataFrame(pred_full_te)
nn_full_te_df = pd.concat([ncat_full_te, ee_full_te, pred_full_te_df], axis=1)
nn_full_te_df.to_csv('full_te7_df.csv',index = False)
###Output
_____no_output_____ |
ForLoop.ipynb | ###Markdown
###Code
eve=[i for i in range(0,11,2)]
print(eve)
for i in range(1, 5):
for j in range(1,i+1):
print(i, end=' ')
print()
#program for pattern printing using for_loop
for a in range(1,6):
for b in range(1,a+1):
print(b,end=" ")
print("")
for a in range(6,1,-1):
for b in range(1,a):
print(b,end=" ")
print("")
#program for pattern printing using list
list1=[]
for i in range(1,6):
list1.append('#'*i)
print('\n'.join(list1))
list1=[]
for i in range(6,0,-1):
list1.append('#'*i)
print('\n'.join(list1))
num=int(input("enter the number of rows"))
for i in range(0,num):
for k in range(0,num-i- 1):
print(end=" ")
for j in range(0,i+1):
print(j,end=" ")
print()
def pattern(n):
for i in range(0, n):
for j in range(0, i):
print("* ", end="")
print("\r")
for i in range(n, 0 , -1):
for j in range(0, i ):
print("* ", end="")
print("\r")
pattern(10)
###Output
*
* *
* * *
* * * *
* * * * *
* * * * * *
* * * * * * *
* * * * * * * *
* * * * * * * * *
* * * * * * * * * *
* * * * * * * * *
* * * * * * * *
* * * * * * *
* * * * * *
* * * * *
* * * *
* * *
* *
*
|
Assignment_0/.ipynb_checkpoints/test0-checkpoint.ipynb | ###Markdown
Python Basics Edit the function definition of the add function to return the sum of a and b.
###Code
def add(a, b):
"Return the sum of a and b"
"*** YOUR CODE HERE ***"
return 0
###Output
_____no_output_____
###Markdown
Fill in the buyLotsOfFruit(orderList) function to take a list of (fruit,pound) tuples and returns the cost of your list. If there is some fruit in the list which doesn't appear in fruitPrices it should print an error message and return None.
###Code
fruitPrices = {'apples':2.00, 'oranges': 1.50, 'pears': 1.75,
'limes':0.75, 'strawberries':1.00}
def buyLotsOfFruit(orderList):
"""
orderList: List of (fruit, numPounds) tuples
Returns cost of order
"""
totalCost = 0.0
"*** YOUR CODE HERE ***"
totalCost = 0.0
"*** YOUR CODE HERE ***"
return totalCost
orderList = [ ('apples', 2.0), ('pears', 3.0), ('limes', 4.0) ]
print ('Cost of', orderList, 'is', buyLotsOfFruit(orderList))
orderList = [ ('avpc', 2.0), ('pears', 3.0), ('limes', 4.0) ]
print ('Cost of', orderList, 'is', buyLotsOfFruit(orderList))
###Output
_____no_output_____
###Markdown
Fill in the function shopSmart(orders,shops), which takes an orderList (like the kind passed in to buyLotsOfFruit) and a list of FruitShop and returns the FruitShop where your order costs the least amount in total. Note that we will provide the shop.py implementation as a "support" file, so you don't need to submit yours.
###Code
import shop
def shopSmart(orderList, fruitShops):
"""
orderList: List of (fruit, numPound) tuples
fruitShops: List of FruitShops
"""
return bestShop
if __name__ == '__main__':
"This code runs when you invoke the script from the command line"
orders = [('apples',1.0), ('oranges',3.0)]
dir1 = {'apples': 2.0, 'oranges':1.0}
shop1 = shop.FruitShop('shop1',dir1)
dir2 = {'apples': 1.0, 'oranges': 5.0}
shop2 = shop.FruitShop('shop2',dir2)
shops = [shop1, shop2]
print ("For orders ", orders, ", the best shop is", shopSmart(orders, shops).getName())
orders = [('apples',3.0)]
print ("For orders: ", orders, ", the best shop is", shopSmart(orders, shops).getName())
###Output
_____no_output_____
###Markdown
Numpy Basics Import the Numpy package
###Code
# *** YOUR CODE HERE ***
# *** YOUR CODE HERE ***
###Output
_____no_output_____
###Markdown
Convert a 1D array to a 2D matrix
###Code
A = np.array([1,2,3,4,5,6])
# *** YOUR CODE HERE ***
# *** YOUR CODE HERE ***
###Output
_____no_output_____
###Markdown
Given a N-D array A, convert it into an 1-D Array
###Code
A = np.array([[1,2], [3,4], [5,6]])
# *** YOUR CODE HERE ***
# *** YOUR CODE HERE ***
###Output
_____no_output_____
###Markdown
Create two Matrices A and B of size 5X6 and 6X5 respectively, and perform the dot product on them.
###Code
A = # initialize matrix A
B = # initialze matrix B
dot_product = # perform the dot product
###Output
_____no_output_____
###Markdown
Find the maximum value present in each row of Matrix A created in the previous question.
###Code
# *** YOUR CODE HERE ***
# *** YOUR CODE HERE ***
###Output
_____no_output_____
###Markdown
Given a 4X4 matrix pad zeros to it, converting it to a 5X5 matrix
###Code
A = np.ones((4,4))
# *** YOUR CODE HERE ***
# *** YOUR CODE HERE ***
###Output
_____no_output_____
###Markdown
Multiply the Matrix from previous question with a scalar 2
###Code
# *** YOUR CODE HERE ***
# *** YOUR CODE HERE ***
###Output
_____no_output_____
###Markdown
Perform element-wise multiplication Matrices A and B
###Code
A = np.asarray([[2,1,2,1],[1,2,1,2]])
B = np.asarray([[1,2,3,4],[1,2,3,4]])
# *** YOUR CODE HERE ***
# *** YOUR CODE HERE ***
###Output
_____no_output_____ |
IBM_AI_Engineering/Course-4-deep-neural-networks-with-pytorch/Week-5-Deep-Networks/8.5.1BachNorm_v2.ipynb | ###Markdown
Batch Normalization with the MNIST Dataset Table of ContentsIn this lab, you will build a Neural Network using Batch Normalization and compare it to a Neural Network that does not use Batch Normalization. You will use the MNIST dataset to test your network. Neural Network Module and Training FunctionLoad Data Define Several Neural Networks, Criterion function, OptimizerTrain Neural Network using Batch Normalization and no Batch NormalizationAnalyze ResultsEstimated Time Needed: 25 min Preparation We'll need the following libraries:
###Code
# These are the libraries will be used for this lab.
# Using the following line code to install the torchvision library
# !conda install -y torchvision
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as dsets
import torch.nn.functional as F
import matplotlib.pylab as plt
import numpy as np
torch.manual_seed(0)
###Output
_____no_output_____
###Markdown
Neural Network Module and Training Function Define the neural network module or class Neural Network Module with two hidden layers using Batch Normalization
###Code
# Define the Neural Network Model using Batch Normalization
class NetBatchNorm(nn.Module):
# Constructor
def __init__(self, in_size, n_hidden1, n_hidden2, out_size):
super(NetBatchNorm, self).__init__()
self.linear1 = nn.Linear(in_size, n_hidden1)
self.linear2 = nn.Linear(n_hidden1, n_hidden2)
self.linear3 = nn.Linear(n_hidden2, out_size)
self.bn1 = nn.BatchNorm1d(n_hidden1)# input is the number of neurons
self.bn2 = nn.BatchNorm1d(n_hidden2)
# Prediction
def forward(self, x):
x = self.bn1(torch.sigmoid(self.linear1(x)))
x = self.bn2(torch.sigmoid(self.linear2(x)))
x = self.linear3(x)
return x
# Activations, to analyze results
def activation(self, x):
out = []
z1 = self.bn1(self.linear1(x))
out.append(z1.detach().numpy().reshape(-1))
a1 = torch.sigmoid(z1)
out.append(a1.detach().numpy().reshape(-1).reshape(-1))
z2 = self.bn2(self.linear2(a1))
out.append(z2.detach().numpy().reshape(-1))
a2 = torch.sigmoid(z2)
out.append(a2.detach().numpy().reshape(-1))
return out
###Output
_____no_output_____
###Markdown
Neural Network Module with two hidden layers with out Batch Normalization
###Code
# Class Net for Neural Network Model
class Net(nn.Module):
# Constructor
def __init__(self, in_size, n_hidden1, n_hidden2, out_size):
super(Net, self).__init__()
self.linear1 = nn.Linear(in_size, n_hidden1)
self.linear2 = nn.Linear(n_hidden1, n_hidden2)
self.linear3 = nn.Linear(n_hidden2, out_size)
# Prediction
def forward(self, x):
x = torch.sigmoid(self.linear1(x))
x = torch.sigmoid(self.linear2(x))
x = self.linear3(x)
return x
# Activations, to analyze results
def activation(self, x):
out = []
z1 = self.linear1(x)
out.append(z1.detach().numpy().reshape(-1))
a1 = torch.sigmoid(z1)
out.append(a1.detach().numpy().reshape(-1).reshape(-1))
z2 = self.linear2(a1)
out.append(z2.detach().numpy().reshape(-1))
a2 = torch.sigmoid(z2)
out.append(a2.detach().numpy().reshape(-1))
return out
###Output
_____no_output_____
###Markdown
Define a function to train the model. In this case the function returns a Python dictionary to store the training loss and accuracy on the validation data
###Code
# Define the function to train model
def train(model, criterion, train_loader, validation_loader, optimizer, epochs=100):
i = 0
useful_stuff = {'training_loss':[], 'validation_accuracy':[]}
for epoch in range(epochs):
for i, (x, y) in enumerate(train_loader):
model.train()
optimizer.zero_grad()
z = model(x.view(-1, 28 * 28))
loss = criterion(z, y)
loss.backward()
optimizer.step()
useful_stuff['training_loss'].append(loss.data.item())
correct = 0
for x, y in validation_loader:
model.eval()
yhat = model(x.view(-1, 28 * 28))
_, label = torch.max(yhat, 1)
correct += (label == y).sum().item()
accuracy = 100 * (correct / len(validation_dataset))
useful_stuff['validation_accuracy'].append(accuracy)
print('epoch: '+str(epoch)+'/'+str(epochs)+" training_loss: "+str(loss.data.item())+' val_acc: '+str(accuracy))
return useful_stuff
###Output
_____no_output_____
###Markdown
Make Some Data Load the training dataset by setting the parameters train to True and convert it to a tensor by placing a transform object int the argument transform
###Code
# load the train dataset
train_dataset = dsets.MNIST(root='./data', train=True, download=True, transform=transforms.ToTensor())
###Output
_____no_output_____
###Markdown
Load the validating dataset by setting the parameters train False and convert it to a tensor by placing a transform object into the argument transform
###Code
# load the train dataset
validation_dataset = dsets.MNIST(root='./data', train=False, download=True, transform=transforms.ToTensor())
###Output
_____no_output_____
###Markdown
create the training-data loader and the validation-data loader object
###Code
# Create Data Loader for both train and validating
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=2000, shuffle=True)
validation_loader = torch.utils.data.DataLoader(dataset=validation_dataset, batch_size=5000, shuffle=False)
###Output
_____no_output_____
###Markdown
Define Neural Network, Criterion function, Optimizer and Train the Model Create the criterion function
###Code
# Create the criterion function
criterion = nn.CrossEntropyLoss()
###Output
_____no_output_____
###Markdown
Variables for Neural Network Shape hidden_dim used for number of neurons in both hidden layers.
###Code
# Set the parameters
input_dim = 28 * 28
hidden_dim = 100
output_dim = 10
###Output
_____no_output_____
###Markdown
Train Neural Network using Batch Normalization and no Batch Normalization Train Neural Network using Batch Normalization :
###Code
# Create model, optimizer and train the model
model_norm = NetBatchNorm(input_dim, hidden_dim, hidden_dim, output_dim)
optimizer = torch.optim.Adam(model_norm.parameters(), lr = 0.1)
training_results_Norm=train(model_norm , criterion, train_loader, validation_loader, optimizer, epochs=5)
###Output
epoch: 0/5 training_loss: 0.3168555498123169 val_acc: 89.92
epoch: 1/5 training_loss: 0.26121243834495544 val_acc: 92.23
epoch: 2/5 training_loss: 0.19269311428070068 val_acc: 93.4
epoch: 3/5 training_loss: 0.16179583966732025 val_acc: 94.28999999999999
epoch: 4/5 training_loss: 0.1565321683883667 val_acc: 94.46
###Markdown
Train Neural Network with no Batch Normalization:
###Code
# Create model without Batch Normalization, optimizer and train the model
model = Net(input_dim, hidden_dim, hidden_dim, output_dim)
optimizer = torch.optim.Adam(model.parameters(), lr = 0.1)
training_results = train(model, criterion, train_loader, validation_loader, optimizer, epochs=5)
###Output
epoch: 0/5 training_loss: 2.2894840240478516 val_acc: 10.72
epoch: 1/5 training_loss: 1.9654200077056885 val_acc: 21.5
epoch: 2/5 training_loss: 1.7731878757476807 val_acc: 27.029999999999998
epoch: 3/5 training_loss: 1.688306450843811 val_acc: 24.11
epoch: 4/5 training_loss: 1.6406196355819702 val_acc: 31.290000000000003
###Markdown
Analyze Results Compare the histograms of the activation for the first layer of the first sample, for both models.
###Code
model.eval()
model_norm.eval()
out=model.activation(validation_dataset[0][0].reshape(-1,28*28))
plt.hist(out[2],label='model with no batch normalization' )
out_norm=model_norm.activation(validation_dataset[0][0].reshape(-1,28*28))
plt.hist(out_norm[2],label='model with normalization')
plt.xlabel("activation ")
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
We see the activations with Batch Normalization are zero centred and have a smaller variance. Compare the training loss for each iteration
###Code
# Plot the diagram to show the loss
plt.plot(training_results['training_loss'], label='No Batch Normalization')
plt.plot(training_results_Norm['training_loss'], label='Batch Normalization')
plt.ylabel('Cost')
plt.xlabel('iterations ')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Compare the validating accuracy for each iteration
###Code
# Plot the diagram to show the accuracy
plt.plot(training_results['validation_accuracy'],label='No Batch Normalization')
plt.plot(training_results_Norm['validation_accuracy'],label='Batch Normalization')
plt.ylabel('validation accuracy')
plt.xlabel('epochs ')
plt.legend()
plt.show()
###Output
_____no_output_____ |
Code/5_1_GAN.ipynb | ###Markdown
Gumbel Softmax
###Code
def sample_gumbel(shape, eps=GUMBEL_EPS):
unif = torch.rand(*shape).to(device)
g = -torch.log(-torch.log(unif + eps))
return g.to(device)
def sample_gumbel_softmax(logits, temperature):
"""
Input:
logits: Tensor of log probs, shape = BS x k
temperature = scalar
Output: Tensor of values sampled from Gumbel softmax.
These will tend towards a one-hot representation in the limit of temp -> 0
shape = BS x k
"""
g = sample_gumbel(logits.shape)
h = (g + logits)/temperature.to(device)
h_max = h.max(dim=-1, keepdim=True)[0]
h = h - h_max
cache = torch.exp(h)
y = cache / cache.sum(dim=-1, keepdim=True)
return y
###Output
_____no_output_____
###Markdown
Generator
###Code
class Generator (nn.Module):
def __init__(self,
input_size: int,
hidden_size: int,
temperature: float,
cat: Counter):
super(Generator, self).__init__()
self.cat = cat
self.cat_n = list(cat.values())
self.output_size = sum(self.cat.values())
self.temperature = torch.Tensor([temperature]).to(device)
self.l1 = nn.Sequential(
nn.Linear(input_size, hidden_size),
nn.LeakyReLU(negative_slope=0.2),
nn.BatchNorm1d(hidden_size, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.Dropout(0.3)
)
self.l2 = nn.Sequential(
nn.Linear(hidden_size, hidden_size * 2),
nn.LeakyReLU(negative_slope = 0.2),
nn.BatchNorm1d(hidden_size * 2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.Dropout(0.3)
)
self.l3 = nn.Sequential(
nn.Linear(hidden_size * 2, hidden_size * 3),
nn.LeakyReLU(negative_slope = 0.2),
nn.BatchNorm1d(hidden_size * 3, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.Dropout(0.3)
)
self.l4 = nn.Sequential(
nn.Linear(hidden_size * 3, hidden_size * 2),
nn.LeakyReLU(negative_slope = 0.2),
nn.BatchNorm1d(hidden_size * 2, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.Dropout(0.3)
)
self.out = nn.Sequential(
nn.Linear(hidden_size * 2, self.output_size))
def forward(self,x):
x=self.l1(x)
x=self.l2(x)
x=self.l3(x)
x=self.l4(x)
x=self.out(x)
### Softmax per class
x = (x.split(self.cat_n, dim=1))
out = torch.cat([sample_gumbel_softmax(v, temperature = self.temperature) for v in x], dim=1)
return out
###Output
_____no_output_____
###Markdown
Discriminator
###Code
class Discriminator(nn.Module):
def __init__(self, input_size:int, output_size=1):
'''
input_size: size of the data
output_size: is always 1
vanila: if True, Sigmoid is going to applied on the last layer
'''
super(Discriminator,self).__init__()
self.l1 = nn.Sequential(
nn.Linear(input_size, 1024),
nn.LeakyReLU(0.2),
nn.Dropout(0.3)
)
self.l2 = nn.Sequential(
nn.Linear(1024, 512),
nn.LeakyReLU(0.2),
nn.Dropout(0.3)
)
self.l3 = nn.Sequential(
nn.Linear(512, 256),
nn.LeakyReLU(0.2),
nn.Dropout(0.3)
)
self.out = nn.Sequential(
torch.nn.Linear(256, output_size)
)
def forward(self, x):
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.out(x)
return x
def clip(self, thr):
for p in self.parameters():
p.data.clamp_(-thr, thr)
class GP_WGAN():
def __init__(self,
data,
cat,
epochs = 5000,
batch_size=64,
gen_learn_rate=1E-5, # 4
disc_learn_rate=1E-5, # 5
gamma = 10,
temperature = 1E-3,
gen_hidden_size = 512,
pinalize = True):
#Data
self.cat = cat
self.cat_n = list(cat.values())
self.onehot_size = sum(self.cat.values())
self.train_val_split(data, batch_size)
#Networks
self.G = Generator(input_size=INPUT_SIZE, hidden_size=gen_hidden_size,
temperature=temperature, cat=self.cat).to(device)
self.D = Discriminator(input_size=TARGET_NUM).to(device)
#Parameters
self.epochs = epochs
self.batch_size = batch_size
self.gen_learn_rate = gen_learn_rate
self.gen_hidden_size = gen_hidden_size
self.disc_learn_rate = disc_learn_rate
self.gamma = gamma
self.temperature = temperature
self.pinalize = pinalize
''''
ADAM optimizer does not give good results
self.generator_optim = torch.optim.Adam(self.G.parameters(), gen_learn_rate, betas=(0.5, 0.999))
self.discriminator_optim = torch.optim.Adam(self.D.parameters(), disc_learn_rate, betas=(0.5, 0.999))
'''
self.generator_optim = torch.optim.RMSprop(self.G.parameters(),
lr = self.gen_learn_rate,
centered=True)
self.discriminator_optim = torch.optim.RMSprop(self.D.parameters(),
lr = self.disc_learn_rate,
centered=True)
def train_val_split(self, data, batch_size):
train, val = train_test_split(data, test_size=0.3)
self.train = DataLoader(torch.tensor(train.values),
batch_size=batch_size, shuffle=True, num_workers=4)
self.val = DataLoader(torch.tensor(val.values),
batch_size=batch_size, shuffle=True, num_workers=4)
def sample(self, n_samples: int):
'''
Generate the data data with Generator network
n_samples: usually equals to the batch size
'''
z = gen_noise(INPUT_SIZE, n_samples)
z = Variable(z, requires_grad=False).to(device)
return self.G.forward(z)
def reset_gradient(self):
self.D.zero_grad()
self.G.zero_grad()
def grad_penalty(self, data, generated_data):
batch_size = data.size(0)
epsilon = torch.rand(batch_size, TARGET_NUM)
epsilon = epsilon.expand_as(data)
epsilon = epsilon.to(device)
interpolation = epsilon * data + (1 - epsilon) * generated_data
interpolation = Variable(interpolation, requires_grad=True)
interpolation = interpolation.to(device)
interpolation_logits = self.D(interpolation)
grad_outputs = torch.ones(interpolation_logits.size()).to(device)
gradients = torch.autograd.grad(outputs=interpolation_logits,
inputs=interpolation,
grad_outputs=grad_outputs,
create_graph=True,
retain_graph=True)[0]
gradients = gradients.view(batch_size, -1)
gradients_norm = torch.sqrt(torch.sum(gradients ** 2, dim=1) + 1e-12)
return self.gamma * ((gradients_norm - 1) ** 2).mean()
def fit(self, n_critic=1, n_gen=3, to_log=True):
filename = 'Logs/wgan-{date:%Y-%m-%d_%H:%M:%S}'.format( date=datetime.datetime.now() )
filename = filename.replace(':', '')
self.log_setting(filename, n_critic, n_gen)
self.discriminator_loss, self.generator_loss = [], []
for epoch in range(self.epochs):
gen_gradient = 0
batch_d_loss, batch_g_loss = [], []
batch_gp , batch_rs , batch_fs = [], [], []
for x in self.val:
## Reset gradient for both networks (on new epoch)
self.reset_gradient()
a = list(self.G.parameters())[0].clone()
a1 = list(self.G.parameters())[1].clone()
a2 = list(self.G.parameters())[2].clone()
a3 = list(self.G.parameters())[3].clone()
a4 = list(self.G.parameters())[4].clone()
x = Variable(x).float().to(device)
## Determine the batch size
batch_size = x.shape[0]
#STEP 1. TRAIN THE GENERATOR (if n_gen is larger than 1)
if (n_gen-1)>0:
for _ in range(n_gen-1):
x_fake = self.sample(batch_size).to(device)
output = self.D.forward(x_fake)
G_loss = -torch.mean(output)
G_loss.backward()
self.generator_optim.step()
self.reset_gradient()
batch_g_loss.append(G_loss.item())
#for param in self.G.parameters():
# print(param.grad.data.sum())
# start debugger
#import pdb; pdb.set_trace()
# STEP 2. TRAIN THE DISCRIMINATOR (With gradient penalty)
if n_critic <= 0: n_critic=1
for _ in range(n_critic):
output_true = self.D.forward(x)
# Step 2.1 Generate fake data G(z), where z ~ N(0, 1)
# is a latent code.
x_fake = self.sample(batch_size).to(device)
# Step 3. Send fake data through discriminator
# propagate error and update D weights.
# --------------------------------------------
# Note: detach() is used to avoid compounding generator gradients
output_fake = self.D.forward(x_fake.detach())
if self.pinalize:
gp = self.grad_penalty(x, x_fake)
else:
gp = torch.tensor(0)
D_loss = -(torch.mean(output_true) - torch.mean(output_fake)) + gp
D_loss.backward()
self.discriminator_optim.step()
if not self.pinalize:
self.D.clip(0.1)
#Reset the gradient
self.reset_gradient()
batch_d_loss.append(D_loss.item())
batch_gp.append(gp.item())
batch_rs.append(torch.mean(output_true).item())
batch_fs.append(torch.mean(output_fake).item())
# Step 4. Send fake data through discriminator _again_
# propagate the error of the generator and
# update G weights.
#x_fake = self.sample(batch_size).to(device)
#x_fake = (x_fake.split(self.cat_n, dim=1))
#x_fake = torch.cat([sample_gumbel_softmax(v, self.temperature) for v in x_fake], dim=1)
output = self.D.forward(x_fake)
G_loss = -torch.mean(output)
G_loss.backward()
try:
for param in self.G.parameters():
gen_gradient += param.grad.data.sum()
except:
print('Unstable generator')
self.generator_optim.step()
b = list(self.G.parameters())[0].clone()
b1 = list(self.G.parameters())[1].clone()
b2 = list(self.G.parameters())[2].clone()
b3 = list(self.G.parameters())[3].clone()
b4 = list(self.G.parameters())[4].clone()
batch_fs.append(torch.mean(output_fake).item())
batch_g_loss.append(G_loss.item())
self.discriminator_loss.append(np.mean(batch_d_loss))
self.generator_loss.append(np.mean(batch_g_loss))
clear_output()
print("Generator gradient: %.7f" %gen_gradient, 'Weight Update %s %s %s %s %s' % (torch.equal(a.data, b.data),
torch.equal(a1.data, b1.data),
torch.equal(a2.data, b2.data),
torch.equal(a3.data, b3.data),
torch.equal(a4.data, b4.data)
))
#### Output per epoch
print("Epoch: %3d || D Loss: %5.5f (rs:%3.3f fs:%3.3f gp:%3.3f) || G Loss: %5.5f " %(epoch,
np.mean(batch_d_loss),
np.mean(batch_rs),
np.mean(batch_fs),
np.mean(batch_gp),
np.mean(batch_g_loss)))
# -- Plotting --
f, axarr = plt.subplots(1, 2, figsize=(18, 7))
# Loss
axarr[0].set_xlabel('Epoch')
axarr[0].set_ylabel('Loss')
axarr[0].set_title('Discriminator Loss || lr= %s' %self.disc_learn_rate )
axarr[1].set_xlabel('Epoch')
axarr[1].set_ylabel('Loss')
axarr[1].set_title('Generator Loss || lr= %s' %self.gen_learn_rate )
axarr[0].plot(np.arange(epoch+1), self.discriminator_loss)
axarr[1].plot(np.arange(epoch+1), self.generator_loss, linestyle="--")
plt.show()
if to_log:
self.log(filename, epoch, np.mean(batch_d_loss), np.mean(batch_g_loss), np.mean(batch_rs),
np.mean(batch_fs), np.mean(batch_gp))
print(x_fake[0])
print(x[0])
def synthesise(self, num=2):
data_dummy = pd.DataFrame(columns=data.columns, dtype=np.int32)
x_fake = self.sample(num)
x_fake = x_fake.split(self.cat_n, dim=1)
x_fake = torch.cat([softmax2onehot(v) for v in x_fake], dim=1)
x_fake = np.array(x_fake.cpu())[0].astype(int)
data_dummy.loc[-1] =np.array(x_fake)[0].astype(int)
return back_from_dummies(data_dummy)
def log(self, name, epoch, d_loss, g_loss, rs, fs, gp):
fields=[epoch, d_loss, g_loss, rs, fs, gp]
with open(r''+name + '.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow(fields)
if epoch % 50 == 0:
torch.save(self.G, name)
def log_setting(self, name, n_critic, n_gen):
with open(r''+name+ '.txt', 'w') as f:
f.write('BATCH NUM: %s \n' %self.batch_size)
f.write('Latent Space %s \n' %INPUT_SIZE)
f.write('Target Num %s \n'%TARGET_NUM)
f.write('D_LR %s \n' %self.disc_learn_rate)
f.write('G_LR %s \n'%self.gen_learn_rate)
f.write('GP Gamma %s \n' %self.gamma)
f.write( 'Softmax T %s \n' %self.temperature)
f.write( 'G_hidden_size %s \n' %self.gen_hidden_size,)
f.write( 'G NUM %s \n' %n_gen)
f.write( 'C/D NUM %s \n' %n_critic)
f.close()
gan = GP_WGAN(data = data, cat = cat, pinalize=False)
gan.fit(n_critic=5, n_gen=1) ##add num of critics
## batch size change (was the most influential part) + lr increase for generator ## remove category with too much labels
### 4 6 - got a bit down after epoch 7000
### clip the rresult
##batch normalization really helped - discriminator is tricked by gen quite fast
### low temperature did not allow to gradient to flow throught
###Output
_____no_output_____ |
Drug_portfolio.ipynb | ###Markdown
Drug Portfolio SelectionTeam: Adetoun, Chip, Lily, Matthias, YoussefDue: 2021-12-02 Setup
###Code
import gurobipy as gp
from gurobipy import GRB
from math import sqrt
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Data and Assumptions*Author's note: we transformed the drug project data into a tidy format for ease of use. Some steps from the assignment template have been revised accordingly.* Drug project data:
###Code
# Import drug data and transform into a tidy data frame
data = pd.read_csv('drugs.csv', index_col=0, header=None).transpose()
data = data.set_axis(['project', 'ta','time_to_market','revenue','cost'], axis=1, inplace=False)
data = data.astype({'project': int, 'ta': str, 'time_to_market': int, 'revenue': float, 'cost': float})
projects = data['project']
t_area = data['ta'] # therapeutic area
ttm = data['time_to_market'] # in years (whole numbers)
rev = data['revenue'] # in millions
cost = data['cost'] # in millions
#import covariance matrix
cov=pd.read_csv('drugs_cov.csv', index_col=0)
# creates lower triangular matrix needed in Value at Risk analysis in Q4 using Cholesky factorization
# Note that this results in L to be in matrix format (i.e., not in the dataframe format anymore)
L = np.linalg.cholesky(cov)
###Output
_____no_output_____
###Markdown
Therapeutic areas, budgets, and risk-free rate of return:
###Code
#therapeutic areas
ther=[
"Oncology", "Cardiovascular", "Respiratory and dermatology", "Transplantation",
"Rheumatology and hormone therapy", "Central nervous system", "Ophtalmics"]
#budget constraints for therapeutic areas
t_bud={"Oncology": 100,
"Cardiovascular": 200,
"Respiratory and dermatology": 150,
"Transplantation": 100,
"Rheumatology and hormone therapy": 300,
"Central nervous system": 100,
"Ophtalmics": 50}
interest_rate=0.03
base_budget=1000
additional_budget=50
###Output
_____no_output_____
###Markdown
Model 2A - Without $50MM Extra Initialize empty model:
###Code
m = gp.Model('portfolio')
###Output
Academic license - for non-commercial use only - expires 2022-09-25
Using license file /Users/youssefragab/gurobi.lic
###Markdown
Decision variable (whether or not to invest in each project):
###Code
x = pd.Series(m.addVars(projects, vtype = GRB.BINARY), index=projects)
#define covariance for each project
portfolio_risk = np.dot(np.dot(np.transpose(x),cov), x)
###Output
_____no_output_____
###Markdown
Constraints:
###Code
# $1 billion total budget
m.addConstr(sum(x[i] * cost[i] for i in projects) <= base_budget)
# TA-level budgets
for t in ther:
m.addConstr(sum(x[i] * cost[i] for i in projects if t_area[i] == t) <= t_bud[t])
# Pipeline balance
m.addConstr(sum(x[i] for i in projects if ttm[i] == 1) >= 0.15 * sum(x[i] for i in projects))
m.addConstr(sum(x[i] for i in projects if ttm[i] in (2,3)) >= 0.20 * sum(x[i] for i in projects))
m.addConstr(sum(x[i] for i in projects if ttm[i] in (4,5)) >= 0.25 * sum(x[i] for i in projects))
#risk constraint which minimizes the variance of the project returns
#comment constraint out and rerun model at different variance levels to capture return
m.addConstr(portfolio_risk<=1.59E+04)
# For validation
m.write("model.lp")
###Output
Warning: Q constraint 0 doesn't have a name
###Markdown
Objective function (maximize revenue plus return on uninvested funds minus total budget):
###Code
tot_rev = sum(x[i] * rev[i] for i in projects)
tot_cost = sum(x[i] * cost[i] for i in projects)
uninvested_return = interest_rate * (base_budget - tot_cost)
m.setObjective(tot_rev + uninvested_return - base_budget, GRB.MAXIMIZE)
# For validation
m.write("model.lp")
###Output
Warning: Q constraint 0 doesn't have a name
###Markdown
Model 2A Results
###Code
# Optimize model to find max rev
m.optimize()
print('Variance = %g' % portfolio_risk.getValue())
# Create an expression representing the expected return for the portfolio
portfolio_return = tot_rev
# total return = revenue plus return on uninvested funds minus budget
print("total return: ", m.objVal)
for v in m.getVars():
if v.x == 1:
print("Invested in project", v.varName)
# this loop feels a really dumb way to calculate this, feel free to improve
spent = 0
n = 0
for i in projects:
if m.getVars()[i - 1].x == 1:
spent += cost[i]
n += 1
print("Invested", spent, "million dollars into", n, "projects.")
###Output
Invested 65.63 million dollars into 6 projects.
###Markdown
Model 2A Efficient Frontier Illustration Click to view [Efficient Frontier illustration](https://drive.google.com/file/d/1JiSkXC6R83v1NpH0Z7l0lOs70cRGVP9N/view?usp=sharing) Model 2B - With $50MM Extra Run model with different RHS values for the right hand side of the portfolio risk constraint:
###Code
portfolio_risk_rhs = 1.80E+07
# Initialize similar model
m = gp.Model('portfolio')
# With similar decision variables
x = m.addVars(projects, vtype = GRB.BINARY, name = 'x')
###Output
_____no_output_____
###Markdown
Additional budget helpers:
###Code
# Decision variable for assigning additional budget to TAs
b = m.addVars(ther, vtype = GRB.BINARY, name = 'b')
# Dictionary of additional funds per TA
t_bud_extra = t_bud.copy()
# Assign zero or additional budget to the copy dictionary
for t in ther:
t_bud_extra[t] = additional_budget * b[t]
# Allow up to 1 additional budget assignment
m.addConstr(sum(b[t] for t in ther) <= 1)
# Note: the TA-level budget constraint in the next cell has also been updated
###Output
_____no_output_____
###Markdown
Similar constraints and objective function as before (except for the additional budget):
###Code
# $1 billion total budget
m.addConstr(sum(x[i] * cost[i] for i in projects) <= base_budget + additional_budget)
# TA-level budgets (base plus extra $50MM)
for t in ther:
m.addConstr(sum(x[i] * cost[i] for i in projects if t_area[i] == t) <= t_bud[t] + t_bud_extra[t])
# Pipeline balance
m.addConstr(sum(x[i] for i in projects if ttm[i] == 1) >= 0.15 * sum(x[i] for i in projects))
m.addConstr(sum(x[i] for i in projects if ttm[i] in (2,3)) >= 0.20 * sum(x[i] for i in projects))
m.addConstr(sum(x[i] for i in projects if ttm[i] in (4,5)) >= 0.25 * sum(x[i] for i in projects))
# Objective function
tot_rev = sum(x[i] * rev[i] for i in projects)
tot_cost = sum(x[i] * cost[i] for i in projects)
uninvested_return = interest_rate * (base_budget - tot_cost)
m.setObjective(tot_rev + uninvested_return - base_budget, GRB.MAXIMIZE)
# For validation
m.write("model.lp")
###Output
Warning: variable name "b[Respiratory and dermatology]" has a space
Warning: to let Gurobi read it back, use rlp format
###Markdown
Add a portfolio risk constraint. Use a subset of the constraints used to make the efficient frontier.
###Code
#define covariance for each project
x_for_risk = pd.Series(x, index=projects)
portfolio_risk = np.dot(np.dot(np.transpose(x_for_risk),cov), x_for_risk)
m.addConstr(portfolio_risk <= portfolio_risk_rhs)
###Output
_____no_output_____
###Markdown
Model 2B Results
###Code
m.setParam('OutputFlag', 0) # run silently
m.optimize()
print("Total return: ", m.objVal)
print('Variance = %g' % portfolio_risk.getValue())
for v in m.getVars():
if v.x == 1:
print("Invested in project", v.varName)
###Output
Invested in project x[3]
Invested in project x[4]
Invested in project x[5]
Invested in project x[6]
Invested in project x[17]
Invested in project x[20]
Invested in project x[21]
Invested in project x[22]
Invested in project x[24]
Invested in project x[25]
Invested in project x[26]
Invested in project x[27]
Invested in project x[28]
Invested in project x[29]
Invested in project x[30]
Invested in project x[39]
Invested in project x[40]
Invested in project x[42]
Invested in project x[44]
Invested in project x[45]
Invested in project x[46]
Invested in project x[47]
Invested in project x[48]
Invested in project x[50]
Invested in project x[53]
Invested in project x[57]
Invested in project x[58]
Invested in project x[61]
Invested in project x[62]
Invested in project x[66]
Invested in project x[69]
Invested in project x[72]
Invested in project x[76]
Invested in project x[77]
Invested in project x[78]
Invested in project x[86]
Invested in project x[87]
Invested in project x[91]
Invested in project x[94]
Invested in project x[97]
Invested in project x[98]
Invested in project x[99]
Invested in project x[100]
Invested in project x[101]
Invested in project x[102]
Invested in project x[103]
Invested in project x[104]
Invested in project x[105]
Invested in project x[106]
Invested in project x[109]
Invested in project x[110]
Invested in project x[111]
Invested in project x[112]
Invested in project b[Respiratory and dermatology]
|
Data Exploration Analysis and Visualization/H1B-Data-Analysis-master/H1B_Dash_Dashboard.ipynb | ###Markdown
Connecting to the Database
###Code
conn = sqlite3.connect("/Users/ankitkothari/Documents/COMPLETED_PROJECTS/H1B_data_analysis/us_h1b.db")
###Output
_____no_output_____
###Markdown
Filtering Criteria
###Code
filter_query = '''
select
h1b.Employer,
h1b2.Denials,
h1b2.Approvals,
h1b2.Fiscal_Year
from h1b left join
(
select
Employer,
SUM(Initial_Denials) + SUM(Continuing_Denials) Denials,
count(DISTINCT Fiscal_Year) Fiscal_Year,
SUM(h1b.Initial_Approvals)+ SUM(h1b.Continuing_Approvals) Approvals
from h1b
where h1b.Fiscal_Year !='2019'
group by 1
) h1b2 on h1b.Employer = h1b2.Employer
group by 1
having h1b2.Fiscal_Year>9 and h1b2.Denials>2 and h1b2.Approvals >50
;'''
pandas_filter_query = pd.read_sql_query(filter_query, conn)
pandas_filter_query.to_csv("/Users/ankitkothari/Documents/dash-app/pandas_filter_query1.csv")
pandas_filter_query['Denials']=pandas_filter_query['Denials'].astype(int)
print(pandas_filter_query.head())
###Output
Employer Denials Approvals Fiscal_Year
0 3A SOFT INC 3 82 10
1 3CORE SYSTEMS INC 22 163 10
2 3I INFOTECH INC 144 1486 10
3 3K TECHNOLOGIES LLC 13 215 10
4 3M COMPANY 5 240 10
###Markdown
Initializing the DASH APP
###Code
app = dash.Dash()
app.css.append_css({'external_url': 'https://cdn.rawgit.com/plotly/dash-app-stylesheets/2d266c578d2a6e8850ebce48fdb52759b2aef506/stylesheet-oil-and-gas.css'})
###Output
_____no_output_____
###Markdown
Drop Down Menu tp Select Employer
###Code
employer_class = [{'label': str(item),
'value': str(item)}
for item in pandas_filter_query['Employer'].unique()]
employer_class[0:4]
###Output
_____no_output_____
###Markdown
App Layout
###Code
app.layout = html.Div([
html.Div(
[
html.H1(
'H1B VISA TRENDS',
style={'font-family': 'Helvetica',
"margin-top": "0",
"margin-bottom": "0",
"color":"black",
"width": "100%"},
className='eight columns',
),
], className='row', style={'display': 'inline-block'}
),
html.Div(
[
html.Div([
#dcc.Input(id='my-id', value='Choose your Employer:', type='text'),
html.P('Please select the Employer:'),
dcc.Dropdown(
id='employer',
options= employer_class,
multi=False,
value=None
)
],
className='eight columns',
style={'margin-top': '10', 'margin-right': "0"}
),
], className='row',style={'width': '120%', 'display': 'inline-block'} ),
html.Div(
[
dcc.Graph(
id='bar-graph-2',
style={"margin-right": "0"},
className='five columns',
),
dcc.Graph(
id='bar-graph',
style={"margin-left": "10"},
className='five columns',
),
html.H2('Data'),
html.Div([
html.P('1. The Data has been taken from the USCIS website.'),
html.P('2. The Data has been cleaned and analyzed, so there may be inaccuracies'),
html.P('3. This should not be treated as a source of truth'),
html.P('4. New Approvals and Continuing Approvals are combined together.'),
html.P('5. Employers who have used H1B program for atleast 8 fiscal years'),
html.P(' are only counted.'),
])
], className='row',style={'width': '100%', 'display': 'inline-block'}),
html.Div(
[
dcc.Graph(
id='bar-graph-3',
style={"margin-right": "0"},
className='five columns',
),
dcc.Graph(
id='map-graph',
style={"margin-right": "0"},
className='five columns',
),
html.H2('Connect'),
dcc.Markdown('''
[**LinkedIn**](https://www.linkedin.com/in/ankit-kothari-510a9623/)
[**Code**](https://github.com/ankit-kothari/Data-Science-Journey/tree/master/Data%20Exploration%20Analysis%20and%20Visualization/H1B-Data-Analysis-master).
'''),
html.Div([
html.P('Please connect with me if you have any questions or if you like this')])
], className='row', style={'width': '100%', 'display': 'inline-block'}),
])
###Output
_____no_output_____
###Markdown
Querying Approvals and Denials By Fiscal Year for each Employer
###Code
h1b_query20 = '''
with employer_filter as
(
select
h1b.Employer, count(DISTINCT h1b.Fiscal_Year) Fiscal_Year,
h1b2.Denials
from h1b left join
(
select
Employer,
SUM(Initial_Denials) + SUM(Continuing_Denials) Denials,
SUM(h1b.Initial_Approvals)+ SUM(h1b.Continuing_Approvals) Approvals
from h1b
group by 1
) h1b2 on h1b.Employer = h1b2.Employer
where h1b.Fiscal_Year !='2019'
group by 1
having count(DISTINCT h1b.Fiscal_Year)>9 and h1b2.Denials>2 and h1b2.Approvals >50
)
select
h1b.Fiscal_Year,h1b.Employer,
SUM(h1b.Initial_Approvals)+ SUM(h1b.Continuing_Approvals) Approvals,
SUM(h1b.Initial_Denials)+SUM(h1b.Continuing_Denials) AS Denials
from employer_filter ef left join h1b on h1b.Employer=ef.Employer
where h1b.Fiscal_Year !='2019'
group by h1b.Fiscal_Year, h1b.Employer
'''
pandas_fiscal_year = pd.read_sql_query(h1b_query20, conn)
pandas_fiscal_year.to_csv("/Users/ankitkothari/Documents/dash-app/pandas_fiscal_year1.csv")
print(pandas_fiscal_year.head())
###Output
Fiscal_Year Employer Approvals Denials
0 2009 3A SOFT INC 2 0.0
1 2009 3CORE SYSTEMS INC 7 3.0
2 2009 3I INFOTECH INC 20 0.0
3 2009 3K TECHNOLOGIES LLC 16 1.0
4 2009 3M COMPANY 13 1.0
###Markdown
Ploting Approvals and Denials By Fiscal Year for selected Employer
###Code
@app.callback(
dash.dependencies.Output('bar-graph', 'figure'),
[dash.dependencies.Input('employer', 'value')]
)
def fiscal_plot(employer=None):
try:
employer=employer.upper()
except:
employer=None
if employer is not None:
df21=pandas_fiscal_year[pandas_fiscal_year['Employer']==employer]
df21=df21.groupby('Fiscal_Year').sum()
df21=df21.reset_index()
print(df21.head())
else:
employer='APPLE'
df21=pandas_fiscal_year[pandas_fiscal_year['Employer']==employer]
df21=df21.groupby('Fiscal_Year').sum()
df21=df21.reset_index()
print(df21.head())
print(df21)
fig = go.Figure()
fig.add_trace(go.Bar(x=[x for x in df21.Fiscal_Year] , y=df21.Approvals,marker_color='#2677bb', name='Count of Approvals'))
fig.add_trace(go.Scatter(x=[x for x in df21.Fiscal_Year], y=df21.Denials, mode='lines', name='Count of Denials', yaxis="y2", line=dict(color='#bfbabe', width=4)))
fig.update_layout(
plot_bgcolor='rgba(0,0,0,0)'
)
fig.update_xaxes(
dtick=1,showgrid=False
)
fig.update_yaxes(
showgrid=False
)
fig.update_layout(title="Approvals and Denials by Fiscal year for {}".format(employer),
legend=dict(x=.03,y=0.98, traceorder='reversed', font_size=12),
width=800,
height=400,
uniformtext_minsize=8,
uniformtext_mode='hide',
yaxis=dict(
title="Count of Approvals (Bar)",
titlefont=dict(
color="#151515"
),
anchor="x",
tickfont=dict(
color="#151515"
)
),
yaxis2=dict(
title="Count of Denials (line)",
titlefont=dict(
color="#151515"
),
tickfont=dict(
color="#151515"
),
anchor="x",
side="right",
zeroline=False,
overlaying="y",
position=1
),)
fig.update_layout(
plot_bgcolor='#e0e5db'
)
fig.add_shape(
# Rectangle reference to the axes
type="rect",
xref="x",
yref="paper",
x0='2016',
y0=-0.01,
x1='2018',
y1=1.1,
line=dict(
color="#007500",
width=5,
),
)
return fig
###Output
_____no_output_____
###Markdown
Query to how you compare to national Average
###Code
h1b_query26 ='''
with national as
(
select
SUM(Initial_Denials) + SUM(Continuing_Denials) AS Denials,
SUM(Initial_Approvals) + SUM(Continuing_Approvals) + SUM(Initial_Denials) + SUM(Continuing_Denials) AS Totals
from h1b
where Fiscal_Year !='2019'
),
employer as
(
select
Employer, SUM(Initial_Denials) + SUM(Continuing_Denials) AS Denials,
SUM(Initial_Approvals) + SUM(Continuing_Approvals) + SUM(Initial_Denials) + SUM(Continuing_Denials) AS Totals
from h1b
group by Employer
order by 3 desc
)
select
employer.Employer,
CAST(national.Denials AS REAL)/ CAST(national.Totals AS REAL) AS national_average,
CAST(employer.Denials AS REAL)/ CAST(employer.Totals AS REAL) AS employer_average
from national, employer
;'''
pandas_health_query = pd.read_sql_query(h1b_query26, conn)
pandas_health_query.to_csv("/Users/ankitkothari/Documents/dash-app/pandas_health_query1.csv")
pandas_health_query.head()
###Output
_____no_output_____
###Markdown
Ploting how you compare to national Average
###Code
@app.callback(
dash.dependencies.Output('bar-graph-2', 'figure'),
[dash.dependencies.Input('employer', 'value')]
)
def health(employer=None):
try:
employer=employer.upper()
except:
employer=None
if employer is not None:
df35a = pandas_health_query[pandas_health_query['Employer']==employer]
else:
employer='APPLE'
df35a = pandas_health_query[pandas_health_query['Employer']==employer]
print(df35a)
df35a = pd.melt(df35a, id_vars=['Employer'], value_vars=['national_average','employer_average'])
df35a['value']=df35a['value'].apply(lambda x: round(x,2)*100)
colors = ['#2677bb',] * 2
colors[1] = '#007500'
fig = go.Figure(data=[go.Bar(
y=['National <br> (USA)', '{}'.format(employer)],
x=[x for x in df35a['value']],
width=.51,
orientation='h',
marker_color=colors, # marker color can be a single color value or an iterable
text=[int(x) for x in df35a['value']],
textposition='outside'# marker color can be a single color value or an iterable
)])
fig.update_layout(
plot_bgcolor='rgba(0,0,0,0)'
)
fig.update_yaxes(
tickangle = 360,
tickfont=dict(family='Rockwell', color='#151515', size=14))
fig.update_traces(marker_line_width=.5, opacity=0.9)
fig.update_layout(title="How you compare with National Denial Rate",
legend=dict(x=.73,y=0.98, traceorder='reversed', font_size=12),
width=800,
height=400,
uniformtext_minsize=12,
xaxis=dict(
title="H1B Visa Denial Rate %",
titlefont=dict(
color="#151515"
),
tickfont=dict(
color="#151515"
)
),
)
return fig
###Output
_____no_output_____
###Markdown
Query How you compare pre and post 2016 with other Employers
###Code
h1b_query21a= '''
with h1b_table_by_state AS
(
select
h1b.Employer,
SUM(h1b.Initial_Approvals) + SUM(h1b.Continuing_Approvals) AS approvals_pre_2016,
SUM(h1b.Initial_Denials) + SUM(h1b.Continuing_Denials) AS denials_pre_2016,
(CAST(SUM(h1b.Initial_Denials) AS REAL) + CAST(SUM(h1b.Continuing_Denials) AS REAL)) / (CAST(SUM(h1b.Initial_Denials) AS REAL) + CAST(SUM(h1b.Continuing_Denials) AS REAL)+CAST(SUM(h1b.Initial_Approvals) AS REAL) + CAST(SUM(h1b.Continuing_Approvals) AS REAL))*100 AS denial_pre_2016,
h1b2.Employer,
h1b2.approvals_post_2016,
h1b2.denials_post_2016,
h1b2.denial_post_2016
from h1b LEFT JOIN (
select
Employer,
SUM(Initial_Approvals) + SUM(Continuing_Approvals) AS approvals_post_2016,
SUM(Initial_Denials) + SUM(Continuing_Denials) AS denials_post_2016,
(CAST(SUM(Initial_Denials) AS REAL) + CAST(SUM(Continuing_Denials) AS REAL)) / (CAST(SUM(Initial_Denials) AS REAL) + CAST(SUM(Continuing_Denials) AS REAL)+CAST(SUM(Initial_Approvals) AS REAL) + CAST(SUM(Continuing_Approvals) AS REAL))*100 AS denial_post_2016,
Fiscal_Year
from h1b
where Fiscal_Year !='2019' and Fiscal_Year>2016
group by Employer
) h1b2 ON h1b.Employer = h1b2.Employer
where h1b.Fiscal_Year !='2019' and h1b.Fiscal_Year<=2016
group by h1b.Employer
), fiscal_count as
(
select
Employer, count(DISTINCT h1b.Fiscal_Year) Fiscal_Year
from h1b
where h1b.Fiscal_Year !='2019'
group by 1
having count(DISTINCT h1b.Fiscal_Year)>9
)
select
hs.Employer,
fc.Fiscal_Year,
hs.denial_pre_2016 AS denial_rate_pre_2016,
hs.denial_post_2016 AS denial_rate_post_2016,
hs.denial_post_2016 - hs.denial_pre_2016 AS delta_denial_rates_pre_post2016
from h1b_table_by_state hs join fiscal_count fc on hs.Employer=fc.Employer
order by 4 desc
;
'''
pandas_compare_query = pd.read_sql_query(h1b_query21a, conn)
pandas_compare_query.to_csv("/Users/ankitkothari/Documents/dash-app/pandas_compare_query1.csv")
pandas_compare_query.shape
pandas_comparison_query=pandas_filter_query.merge(pandas_compare_query, how='left', left_on='Employer', right_on='Employer')
pandas_comparison_query.shape
###Output
_____no_output_____
###Markdown
Plotting How you compare pre and post 2016 with other Employers
###Code
@app.callback(
dash.dependencies.Output('bar-graph-3', 'figure'),
[dash.dependencies.Input('employer', 'value')]
)
def compare_plot(employer):
try:
employer=employer.upper()
except:
employer=None
if employer is None:
employer='APPLE'
companies=["{}".format(employer),"APPLE","FACEBOOK","AMAZON","MICROSOFT","GOOGLE","TATA", "ACCENTURE", "WIPRO","CAPGEMINI","MINDTREE"]
print(companies)
df21=pandas_comparison_query
df21['companies']= df21['Employer'].apply(lambda x: "US_COMPANY" if x in companies else "NA")
df21=df21[df21['companies'] != "NA"]
df21=df21.sort_values(by=['denial_rate_post_2016'], ascending=True)
df21[['denial_rate_pre_2016','denial_rate_post_2016','delta_denial_rates_pre_post2016']]=df21[['denial_rate_pre_2016','denial_rate_post_2016','delta_denial_rates_pre_post2016']].apply(lambda x: round(x,2))
fig = go.Figure()
print(df21)
y1=[str(x) for x in df21['denial_rate_pre_2016']]
y2=[str(x) for x in df21['denial_rate_post_2016']]
#fig.add_trace(go.Bar(x=df20.Fiscal_Year , y=df20.Approvals, mode='markers+lines', name='JOB TIME', line=dict(color='#e4bd0b', width=2)))
fig.add_trace(go.Bar(y=[x for x in df21.Employer] , x=df21.denial_rate_pre_2016,marker_color='#2677bb',orientation='h', name='Denial Rate Pre 2016', text=y1,
textposition='outside'))
fig.add_trace(go.Bar(y=[x for x in df21.Employer] , x=df21.denial_rate_post_2016,marker_color='#bfbabe',orientation='h', name='Denial Rate Post 2016',text=y2,
textposition='outside'))
#fig.add_trace(go.Scatter(x=[x for x in df20.Fiscal_Year], y=df20.Denials, mode='lines', name='Count of Denials', yaxis="y2", line=dict(color='#bfbabe', width=4)))
fig.update_layout(
plot_bgcolor='rgba(0,0,0,0)'
)
fig.update_xaxes(
tickangle = 0,
tickfont=dict(family='Rockwell', color='#151515', size=16))
fig.update_xaxes(
dtick=2, showgrid=False
)
fig.update_yaxes(
dtick=1,showgrid=False
)
fig.update_yaxes(ticks="outside", tickwidth=3, tickcolor='#e0e5db', ticklen=12)
fig.update_layout(title="How you compare with other Employers?",
legend=dict(x=.73,y=0.78, traceorder='reversed', font_size=12),
width=600,
height=600,
yaxis=dict(
title="",
titlefont=dict(
color="#151515"
),
tickfont=dict(
color="#151515"
)
),
xaxis=dict(title="% Denial Rate",titlefont=dict(color="#151515"),
tickfont=dict(color="#151515")),)
return fig
###Output
_____no_output_____
###Markdown
Query Distribution of Approved Visa Across State
###Code
h1b_query35 = '''
select
h1b.State,
h1b.Employer,
SUM(h1b.Initial_Approvals) + SUM(h1b.Continuing_Approvals) AS total_visas_State
from h1b
where h1b.Fiscal_Year !='2019' and h1b.Employer in (
select
h1b.Employer
from h1b left join
(
select
distinct Employer,
SUM(Initial_Denials) + SUM(Continuing_Denials) Denials,
count(DISTINCT Fiscal_Year) Fiscal_Year,
SUM(h1b.Initial_Approvals)+ SUM(h1b.Continuing_Approvals) Approvals
from h1b
where h1b.Fiscal_Year !='2019'
group by 1
) h1b2 on h1b.Employer = h1b2.Employer
group by 1
having h1b2.Fiscal_Year>9 and h1b2.Denials>2 and h1b2.Approvals >50)
group by 2,1
;'''
map_query = pd.read_sql_query(h1b_query35, conn)
map_query.to_csv("/Users/ankitkothari/Documents/dash-app/map_query1.csv")
map_query['total_visas_State']=map_query['total_visas_State'].astype(float)
map_query[map_query['Employer']=='ACCEL NORTH AMERICA INC']
###Output
_____no_output_____
###Markdown
Plotting Distribution of Approved Visa Across State
###Code
@app.callback(
dash.dependencies.Output('map-graph', 'figure'),
[dash.dependencies.Input('employer', 'value')]
)
def update_graph(employer):
try:
employer=employer.upper()
except:
employer=None
if employer is None:
employer='APPLE'
df35 = map_query[map_query['Employer']==employer]
print(df35)
df35=df35.sort_values(by='total_visas_State', ascending=False)
df35=df35.dropna(how='any')
colors = ["#2677bb" if x < 1000 else '#bfbabe' if x<=10000 else '#007500' for x in df35['total_visas_State']]
print(colors)
fig = go.Figure(data=go.Choropleth(
locations=df35['State'], # Spatial coordinates, # Data to be color-coded
locationmode = 'USA-states', # set of locations match entries in `locations`
z = df35['total_visas_State'].astype(float),
showscale=False,
colorbar = dict(showticklabels=False),
colorscale = colors
,
))
fig.update_layout(
title_text = 'Approved H1B Applications for in US By States'.format(employer),
geo_scope='usa', # limite map scope to USA
)
return fig
if __name__ == '__main__':
app.run_server()
###Output
* Serving Flask app "__main__" (lazy loading)
* Environment: production
WARNING: Do not use the development server in a production environment.
Use a production WSGI server instead.
* Debug mode: off
|
Udemy/Python for Data Science With Real Exercises/Basketball/Free Throws - Challenge/Free Throws.ipynb | ###Markdown
Section 4 Homework dataDear Student,Welcome to the dataset for the homework exercise.**Instructions for this dataset:**You have only been supplied vectors. You will need to create the matrices yourself.Matrices: - FreeThrows - FreeThrowAttemptsSincerely,Kirill Eremenko[Super Data Science](http://www.superdatascience.com)Copyright: These datasets were prepared using publicly available data. However, theses scripts are subject to Copyright Laws. If you wish to use these R scripts outside of the R Programming Course by Kirill Eremenko, you may do so by referencing www.superdatascience.com in your work.*Comments:*Seasons are labeled based on the first year in the seasonE.g. the 2012-2013 season is preseneted as simply 2012Notes and Corrections to the data: - Kevin Durant: 2006 - College Data Used - Kevin Durant: 2005 - Proxied With 2006 Data - Derrick Rose: 2012 - Did Not Play - Derrick Rose: 2007 - College Data Used - Derrick Rose: 2006 - Proxied With 2007 Data - Derrick Rose: 2005 - Proxied With 2007 Data
###Code
#Seasons
Seasons = ["2005","2006","2007","2008","2009","2010","2011","2012","2013","2014"]
#Players
Players = ["KobeBryant","JoeJohnson","LeBronJames","CarmeloAnthony","DwightHoward","ChrisBosh","ChrisPaul","KevinDurant","DerrickRose","DwayneWade"]
#Free Throws
KobeBryant_FT = [696,667,623,483,439,483,381,525,18,196]
JoeJohnson_FT = [261,235,316,299,220,195,158,132,159,141]
LeBronJames_FT = [601,489,549,594,593,503,387,403,439,375]
CarmeloAnthony_FT = [573,459,464,371,508,507,295,425,459,189]
DwightHoward_FT = [356,390,529,504,483,546,281,355,349,143]
ChrisBosh_FT = [474,463,472,504,470,384,229,241,223,179]
ChrisPaul_FT = [394,292,332,455,161,337,260,286,295,289]
KevinDurant_FT = [209,209,391,452,756,594,431,679,703,146]
DerrickRose_FT = [146,146,146,197,259,476,194,0,27,152]
DwayneWade_FT = [629,432,354,590,534,494,235,308,189,284]
#Matrix
#
# <put your code here>
#
#Free Throw Attempts
KobeBryant_FTA = [819,768,742,564,541,583,451,626,21,241]
JoeJohnson_FTA = [330,314,379,362,269,243,186,161,195,176]
LeBronJames_FTA = [814,701,771,762,773,663,502,535,585,528]
CarmeloAnthony_FTA = [709,568,590,468,612,605,367,512,541,237]
DwightHoward_FTA = [598,666,897,849,816,916,572,721,638,271]
ChrisBosh_FTA = [581,590,559,617,590,471,279,302,272,232]
ChrisPaul_FTA = [465,357,390,524,190,384,302,323,345,321]
KevinDurant_FTA = [256,256,448,524,840,675,501,750,805,171]
DerrickRose_FTA = [205,205,205,250,338,555,239,0,32,187]
DwayneWade_FTA = [803,535,467,771,702,652,297,425,258,370]
#Matrix
#
# <put your code here>
#
import numpy as np
import matplotlib.pyplot as plt
Sdict = {"2005":0,"2006":1,"2007":2,"2008":3,"2009":4,"2010":5,"2011":6,"2012":7,"2013":8,"2014":9}
Pdict = {"KobeBryant":0,"JoeJohnson":1,"LeBronJames":2,"CarmeloAnthony":3,"DwightHoward":4,"ChrisBosh":5,"ChrisPaul":6,"KevinDurant":7,"DerrickRose":8,"DwayneWade":9}
###Output
_____no_output_____
###Markdown
Creating the Matrix
###Code
# Matrix for the free throws
FreeThrows = np.array([KobeBryant_FT, JoeJohnson_FT, LeBronJames_FT, CarmeloAnthony_FT, DwightHoward_FT, ChrisBosh_FT, ChrisPaul_FT, KevinDurant_FT, DerrickRose_FT, DwayneWade_FT])
# We don't need the vectors anymore
del (KobeBryant_FT, JoeJohnson_FT, LeBronJames_FT, CarmeloAnthony_FT, DwightHoward_FT, ChrisBosh_FT, ChrisPaul_FT, KevinDurant_FT, DerrickRose_FT, DwayneWade_FT)
# Matrix for the free throws attempts
FreeThrowAttempts = np.array([KobeBryant_FTA, JoeJohnson_FTA, LeBronJames_FTA, CarmeloAnthony_FTA, DwightHoward_FTA, ChrisBosh_FTA, ChrisPaul_FTA, KevinDurant_FTA, DerrickRose_FTA, DwayneWade_FTA])
# We don't need the vectors anymore
del (KobeBryant_FTA, JoeJohnson_FTA, LeBronJames_FTA, CarmeloAnthony_FTA, DwightHoward_FTA, ChrisBosh_FTA, ChrisPaul_FTA, KevinDurant_FTA, DerrickRose_FTA, DwayneWade_FTA)
# Checking the FreeThrows Matrix
FreeThrows
# Checking the FreeThrowAttempts Matrix
FreeThrowAttempts
def myplot(data, playerlist=Players):
colors = {"KobeBryant":"Black","JoeJohnson":"Red","LeBronJames":"Yellow","CarmeloAnthony":"Green","DwightHoward":"Blue","ChrisBosh":"Magenta","ChrisPaul":"Gray","KevinDurant":"orange","DerrickRose":"brown","DwayneWade":"olive"}
mrkers = {"KobeBryant":"*","JoeJohnson":".","LeBronJames":",","CarmeloAnthony":"o","DwightHoward":"v","ChrisBosh":"<","ChrisPaul":">","KevinDurant":"^","DerrickRose":"s","DwayneWade":"p"}
for name in playerlist:
plt.plot(data[Pdict[name]], c= colors[name], ls = '--', marker = mrkers[name], ms = 7, label = name)
plt.legend(loc='upper left', bbox_to_anchor = (1,1))
plt.xticks( list(range(0,10)), Seasons, rotation = 'vertical')
plt.show()
myplot(FreeThrows)
myplot(FreeThrowAttempts)
###Output
_____no_output_____
###Markdown
Part 1 - Free Throw Attempts per game
###Code
#Games
KobeBryant_G = [80,77,82,82,73,82,58,78,6,35]
JoeJohnson_G = [82,57,82,79,76,72,60,72,79,80]
LeBronJames_G = [79,78,75,81,76,79,62,76,77,69]
CarmeloAnthony_G = [80,65,77,66,69,77,55,67,77,40]
DwightHoward_G = [82,82,82,79,82,78,54,76,71,41]
ChrisBosh_G = [70,69,67,77,70,77,57,74,79,44]
ChrisPaul_G = [78,64,80,78,45,80,60,70,62,82]
KevinDurant_G = [35,35,80,74,82,78,66,81,81,27]
DerrickRose_G = [40,40,40,81,78,81,39,0,10,51]
DwayneWade_G = [75,51,51,79,77,76,49,69,54,62]
#Matrix
Games = np.array([KobeBryant_G, JoeJohnson_G, LeBronJames_G, CarmeloAnthony_G, DwightHoward_G, ChrisBosh_G, ChrisPaul_G, KevinDurant_G, DerrickRose_G, DwayneWade_G])
myplot(FreeThrowAttempts / Games)
###Output
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:1: RuntimeWarning: invalid value encountered in true_divide
"""Entry point for launching an IPython kernel.
###Markdown
Chris Paul has very few attempts per game. Part 2 - Free Throw Accuracy
###Code
myplot(FreeThrows / FreeThrowAttempts)
###Output
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:1: RuntimeWarning: invalid value encountered in true_divide
"""Entry point for launching an IPython kernel.
###Markdown
- Dwight Howard's accuracy is very mediocre in comparison with other players. - Chris Paul's accuracy is one of the highests.
###Code
#Field Goals
KobeBryant_FG = [978,813,775,800,716,740,574,738,31,266]
JoeJohnson_FG = [632,536,647,620,635,514,423,445,462,446]
LeBronJames_FG = [875,772,794,789,768,758,621,765,767,624]
CarmeloAnthony_FG = [756,691,728,535,688,684,441,669,743,358]
DwightHoward_FG = [468,526,583,560,510,619,416,470,473,251]
ChrisBosh_FG = [549,543,507,615,600,524,393,485,492,343]
ChrisPaul_FG = [407,381,630,631,314,430,425,412,406,568]
KevinDurant_FG = [306,306,587,661,794,711,643,731,849,238]
DerrickRose_FG = [208,208,208,574,672,711,302,0,58,338]
DwayneWade_FG = [699,472,439,854,719,692,416,569,415,509]
#Matrix
FieldGoals = np.array([KobeBryant_FG, JoeJohnson_FG, LeBronJames_FG, CarmeloAnthony_FG, DwightHoward_FG, ChrisBosh_FG, ChrisPaul_FG, KevinDurant_FG, DerrickRose_FG, DwayneWade_FG])
#Field Goal Attempts
KobeBryant_FGA = [2173,1757,1690,1712,1569,1639,1336,1595,73,713]
JoeJohnson_FGA = [1395,1139,1497,1420,1386,1161,931,1052,1018,1025]
LeBronJames_FGA = [1823,1621,1642,1613,1528,1485,1169,1354,1353,1279]
CarmeloAnthony_FGA = [1572,1453,1481,1207,1502,1503,1025,1489,1643,806]
DwightHoward_FGA = [881,873,974,979,834,1044,726,813,800,423]
ChrisBosh_FGA = [1087,1094,1027,1263,1158,1056,807,907,953,745]
ChrisPaul_FGA = [947,871,1291,1255,637,928,890,856,870,1170]
KevinDurant_FGA = [647,647,1366,1390,1668,1538,1297,1433,1688,467]
DerrickRose_FGA = [436,436,436,1208,1373,1597,695,0,164,835]
DwayneWade_FGA = [1413,962,937,1739,1511,1384,837,1093,761,1084]
#Matrix
FieldGoalAttempts = np.array([KobeBryant_FGA, JoeJohnson_FGA, LeBronJames_FGA, CarmeloAnthony_FGA, DwightHoward_FGA, ChrisBosh_FGA, ChrisPaul_FGA, KevinDurant_FGA, DerrickRose_FGA, DwayneWade_FGA])
myplot(FieldGoals / FieldGoalAttempts)
###Output
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:1: RuntimeWarning: invalid value encountered in true_divide
"""Entry point for launching an IPython kernel.
###Markdown
Part 3 - Player playing style (2 vs 3 points preference) excluding Free Throws
###Code
#Points
KobeBryant_PTS = [2832,2430,2323,2201,1970,2078,1616,2133,83,782]
JoeJohnson_PTS = [1653,1426,1779,1688,1619,1312,1129,1170,1245,1154]
LeBronJames_PTS = [2478,2132,2250,2304,2258,2111,1683,2036,2089,1743]
CarmeloAnthony_PTS = [2122,1881,1978,1504,1943,1970,1245,1920,2112,966]
DwightHoward_PTS = [1292,1443,1695,1624,1503,1784,1113,1296,1297,646]
ChrisBosh_PTS = [1572,1561,1496,1746,1678,1438,1025,1232,1281,928]
ChrisPaul_PTS = [1258,1104,1684,1781,841,1268,1189,1186,1185,1564]
KevinDurant_PTS = [903,903,1624,1871,2472,2161,1850,2280,2593,686]
DerrickRose_PTS = [597,597,597,1361,1619,2026,852,0,159,904]
DwayneWade_PTS = [2040,1397,1254,2386,2045,1941,1082,1463,1028,1331]
#Matrix
Points = np.array([KobeBryant_PTS, JoeJohnson_PTS, LeBronJames_PTS, CarmeloAnthony_PTS, DwightHoward_PTS, ChrisBosh_PTS, ChrisPaul_PTS, KevinDurant_PTS, DerrickRose_PTS, DwayneWade_PTS])
myplot((Points - FreeThrows) / FieldGoals)
###Output
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:1: RuntimeWarning: invalid value encountered in true_divide
"""Entry point for launching an IPython kernel.
|
notebooks Python/chap1_OLS.ipynb | ###Markdown
Chapter 1 - Ordinary Least Squares (OLS) Table of Contents1.2.3 Simulation of the causal effect1.2.4 Averaging to Estimate the Causal Effect1.3.2 Algebraic OLS estimator in Python1.3.4 Multiplying matrices in Python1.3.6 Matrix estimator of OLS in Python1.4.3 Estimating least squares in Python1.4.4 Linear regression in Pythonscipy solutionsklearn solutionstatsmodels solution1.5.1. Data simulations1.5.3 Bootstrap in Python1.6.2 NLSM data1.6.3 Plotting returns to schooling1.6.4 Estimating returns to schooling 1.2.3 Simulation of the causal effect
###Code
# setting the seed in Numpy
# as the function to generate seeds differ in Python and R,
# (and actually even between plain Python and Numpy)
# we will generate differents random data than in the book.
import numpy as np
np.random.seed(123456789)
# main parameters of interest
N = 100
a = 2
b = 3
# create a vector where the observed characteristic, x, is drawn
# from a uniform distribution
x = np.random.rand(N)
# create a vector for the unobserved characteristic, u, from
# a standard normal distribution
u = np.random.normal(size = N)
# create a vector y
y = a + b*x + u
###Output
_____no_output_____
###Markdown
1.2.4 Averaging to Estimate the Causal Effect
###Code
# as there is no simple way to plot linear function in Python,
# we create a dedicated function for it
# credits to David Marx https://stackoverflow.com/questions/7941226/how-to-add-line-based-on-slope-and-intercept-in-matplotlib/43811762
def abline(slope, intercept):
"""Plot a linear function from slope and intercept"""
axes = plt.gca()
x_vals = np.array(axes.get_xlim())
y_vals = intercept + slope * x_vals
plt.plot(x_vals, y_vals, color='dimgray')
import matplotlib.pyplot as plt
# plotting with have a R-looking plotting style
plt.style.use('seaborn-white')
plt.scatter(x,y, facecolors='none', color = 'dimgray')
abline(intercept = 2, slope = 3)
# means takes an average
# the logical expression inside the square brackets
# creates an index for the elements of y where the logical
# expression in x holds
# in Python mean can be found in the Numpy library
np.mean(y[x>0.95])-np.mean(y[x<0.05])
###Output
_____no_output_____
###Markdown
1.3.2 Algebraic OLS estimator in Python
###Code
b_hat = (np.mean(y)-2)/np.mean(x)
b_hat
###Output
_____no_output_____
###Markdown
1.3.4 Multiplying matrices in Python
###Code
x1 = x[0:5] # python start counting at 0 and not at 1 like R
# concatenating the two vectors in a matrix
X1 = np.c_[np.ones(5),x1]
# predicts value of y using the model
X1.dot(np.array([2,3]))
# [2,3] has to be passed through the array() function to get
# reconise as a matrix
# which we can compare to the true values
y[0:5]
###Output
_____no_output_____
###Markdown
1.3.6 Matrix estimator of OLS in Python
###Code
X = np.c_[np.ones(N),x]
# creation of a matrix
A = np.matrix('1 4; 2 5;3 6') # or np.c_[range(1,4), range(4,7)]
# transpose of A
np.transpose(A)
# mutiplication of the transpose by itself
np.transpose(A).dot(A)
# in our problem
np.transpose(X).dot(X)
# in python the inverse matrix can be found using
# the inv() function (from Numpy)
np.linalg.inv( np.transpose(X).dot(X) )
beta_hat = np.linalg.inv(np.transpose(X).dot(X)).dot(np.transpose(X)).dot(y)
beta_hat
# we averaged over the unobserved term to get something close to 0.
np.linalg.inv(np.transpose(X).dot(X)).dot(np.transpose(X)).dot(u)
###Output
_____no_output_____
###Markdown
1.4.3 Estimating least squares in Python
###Code
from scipy.optimize import minimize
def f(b):
"The objective function - sum of squared difference function"
return sum((y-2-b*x)**2)
# the minimise function needs an initial guess
x0 = 1
# we search in the real line from -10 to 10
bounds = [(-10,10)]
# minizing the objective function
minimize(f, x0, bounds = bounds)
# alternatively using the first order condition
(np.mean(x*y)-2*np.mean(x))/np.mean(x*x)
###Output
_____no_output_____
###Markdown
1.4.4 Linear regression in Python
###Code
import pandas as pd
data1 = pd.DataFrame(np.c_[y,x])
data1.columns=['y','x']
data1
###Output
_____no_output_____
###Markdown
scipy solution
###Code
from scipy import stats
stats.linregress(x,y)
###Output
_____no_output_____
###Markdown
sklearn solution
###Code
from sklearn import linear_model
# create linear regression object
regr = linear_model.LinearRegression()
# fit
regr.fit(x.reshape(-1, 1), y)
# since the x is unidimentionnal the sklearn API imposes
# to reshape it before using the fit() function
print('Coefficients: \n', regr.coef_)
print('Intercept: \n', regr.intercept_)
###Output
Coefficients:
[3.04630362]
Intercept:
1.898492537052036
###Markdown
statsmodels solution
###Code
import statsmodels.api as sm
# we add an intercept
X = sm.add_constant(x)
mod = sm.OLS(y,X)
res = mod.fit()
res.summary()
# result from the matrix algebra
np.transpose(beta_hat)
###Output
_____no_output_____
###Markdown
1.5.1. Data simulations
###Code
np.random.seed(123456789)
K = 1000
# create an empty least to fill with the results of the
# data simulation
l = []
for k in range(0,K):
x = np.random.rand(N)
u = np.random.normal(size = N)
y = a + b*x + u
regr = linear_model.LinearRegression()
regr.fit(x.reshape(-1, 1), y)
l.append([regr.intercept_, regr.coef_[0]])
# as we have only one coefficient we access it using
# regr.coef_[0]] - first element of a list of 1 coeff
# stacking all the results in a single dataframe
sim_res = pd.DataFrame(l)
# name the columns of the result matrix
sim_res.columns = ['Est. of a', 'Est. of b']
sim_res.describe()
###Output
_____no_output_____
###Markdown
1.5.3 Bootstrap in Python
###Code
np.random.seed(123456789)
K = 1000
l = []
for k in range(0,K):
#index_k = np.random.randint(N+1) # again, Python start counting at 0
data_k = data1.sample(N+1, replace = True)
regr = linear_model.LinearRegression()
regr.fit(np.array(data_k['x']).reshape(-1, 1), data_k['y'])
l.append([regr.intercept_, regr.coef_[0]])
# stacking all the results in a single dataframe
sim_res = pd.DataFrame(l)
# name the columns of the result matrix
sim_res.columns = ['Est. of a', 'Est. of b']
# bootstrap estimates from the simulation
tab_res = pd.DataFrame()
tab_res['Mean'] = np.mean(sim_res)
tab_res['SD'] = np.std(sim_res)
tab_res['2.5%'] = sim_res.quantile(0.025)
tab_res['97.5%'] = sim_res.quantile(0.975)
tab_res
# the standard errors can be found in the statsmodel solution
###Output
_____no_output_____
###Markdown
1.6.2 NLSM data
###Code
df = pd.read_csv("../data/nls.csv") # convention to name any dataset as df in Python
# converting two variables as numbers, errors are coerced into NAs
df['wage76'] = df['wage76'].apply(pd.to_numeric, errors='coerce')
df['lwage76'] = df['lwage76'].apply(pd.to_numeric, errors='coerce')
# create a new dataset with missing values removed
df1 = df[df['lwage76'].isna()==False]
###Output
_____no_output_____
###Markdown
1.6.3 Plotting returns to schooling
###Code
# create linear regression object
regr = linear_model.LinearRegression()
# fit
x = df1['lwage76']
y = np.array(df1['ed76']).reshape(-1, 1) # need to change the datatype
regr.fit(y, x)
# plotting the dots
plt.scatter(df1['ed76'],
df1['lwage76'],
facecolors='none',
color = 'dimgray')
# and the obtained regression line
abline(intercept = regr.intercept_, slope = regr.coef_[0])
###Output
_____no_output_____
###Markdown
1.6.4 Estimating returns to schooling
###Code
# unfortunately where is no built-in option in sklearn to get statistical
# table similar to the R output.
# here is the result with statsmodel,
# which in addition as an interface very simiar to R's:
from statsmodels.formula.api import ols
result = ols(formula = 'lwage76 ~ ed76', data = df).fit()
result.summary()
# predicted percentage increase in wages for one year of schoolings
np.exp(np.log(np.mean(df1['wage76']))+regr.coef_[0])/np.mean(df1)['wage76']
###Output
_____no_output_____ |
src/Eval_fp16.ipynb | ###Markdown
Plotting Figures for 16-bit FL and HFL
###Code
import pickle
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
MNIST MLP IID
###Code
# ===== MNIST MLP IID =====
datamodelset = "MNIST_MLP_IID"
filename1 = "FL_mnist_mlp_468_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10]_FP16"
filename2 = "HFL2_mnist_mlp_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10]_FP16"
filename3 = "HFL4_mnist_mlp_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10]_FP16"
filename4 = "HFL4_mnist_mlp_30_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10]_FP16"
with open(r"../save/objects_fp16/" + filename1 + ".pkl", "rb") as input_file: data1 = pickle.load(input_file)
with open(r"../save/objects_fp16/" + filename2 + ".pkl", "rb") as input_file: data2 = pickle.load(input_file)
with open(r"../save/objects_fp16/" + filename3 + ".pkl", "rb") as input_file: data3 = pickle.load(input_file)
with open(r"../save/objects_fp16/" + filename4 + ".pkl", "rb") as input_file: data4 = pickle.load(input_file)
trloss1 = data1[0]
trloss2 = data2[0]
trloss3 = data3[0]
trloss4 = data4[0]
tracc1 = data1[1]
tracc2 = data2[1]
tracc3 = data3[1]
tracc4 = data4[1]
# Plot Average Accuracy vs Communication rounds
plt.figure()
plt.title(datamodelset)
plt.plot(range(len(tracc1)), tracc1, label="FL", linewidth=0.9)
plt.plot(range(len(tracc2)), tracc2, label="HFL2", linewidth=0.9)
plt.plot(range(len(tracc3)), tracc3, label="HFL4", linewidth=0.9)
plt.plot(range(len(tracc4)), tracc4, label="HFL8")
plt.legend(loc="lower right")
plt.ylabel('Average Accuracy')
plt.xlabel('Communication Rounds')
plt.savefig('../save/' + datamodelset + '_acc_FP16.png')
# Plot Loss curve
plt.figure()
plt.title(datamodelset)
plt.plot(range(len(trloss1)), trloss1, label="FL", linewidth=0.9)
plt.plot(range(len(trloss2)), trloss2, label="HFL2", linewidth=0.9)
plt.plot(range(len(trloss3)), trloss3, label="HFL4", linewidth=0.9)
plt.plot(range(len(trloss4)), trloss4, label="HFL8")
plt.legend(loc="upper right")
plt.ylabel('Training loss')
plt.xlabel('Communication Rounds')
plt.savefig('../save/' + datamodelset + '_loss_FP16.png')
plt.show
# ===== MNIST MLP NON-IID =====
datamodelset = "MNIST_MLP_NONIID"
filename1 = "FL_mnist_mlp_1196_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10]_FP16"
filename2 = "HFL2_mnist_mlp_100_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10]_FP16"
filename3 = "HFL4_mnist_mlp_150_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10]_FP16"
filename4 = "HFL8_mnist_mlp_30_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10]_FP16"
with open(r"../save/objects_fp16/" + filename1 + ".pkl", "rb") as input_file: data1 = pickle.load(input_file)
with open(r"../save/objects_fp16/" + filename2 + ".pkl", "rb") as input_file: data2 = pickle.load(input_file)
with open(r"../save/objects_fp16/" + filename3 + ".pkl", "rb") as input_file: data3 = pickle.load(input_file)
with open(r"../save/objects_fp16/" + filename4 + ".pkl", "rb") as input_file: data4 = pickle.load(input_file)
trloss1 = data1[0]
trloss2 = data2[0]
trloss3 = data3[0]
trloss4 = data4[0]
tracc1 = data1[1]
tracc2 = data2[1]
tracc3 = data3[1]
tracc4 = data4[1]
# Plot Average Accuracy vs Communication rounds
plt.figure()
plt.title(datamodelset)
plt.plot(range(len(tracc1)), tracc1, label="FL", linewidth=0.9)
plt.plot(range(len(tracc2)), tracc2, label="HFL2", linewidth=0.9)
plt.plot(range(len(tracc3)), tracc3, label="HFL4", linewidth=0.9)
plt.plot(range(len(tracc4)), tracc4, label="HFL8")
plt.legend(loc="lower right")
plt.ylabel('Average Accuracy')
plt.xlabel('Communication Rounds')
plt.savefig('../save/' + datamodelset + '_acc_FP16.png')
# Plot Loss curve
plt.figure()
plt.title(datamodelset)
plt.plot(range(len(trloss1)), trloss1, label="FL", linewidth=0.9)
plt.plot(range(len(trloss2)), trloss2, label="HFL2", linewidth=0.9)
plt.plot(range(len(trloss3)), trloss3, label="HFL4", linewidth=0.9)
plt.plot(range(len(trloss4)), trloss4, label="HFL8")
plt.legend(loc="upper right")
plt.ylabel('Training loss')
plt.xlabel('Communication Rounds')
plt.savefig('../save/' + datamodelset + '_loss_FP16.png')
plt.show
# ===== MNIST CNN IID =====
datamodelset = "MNIST_CNN_IID"
filename1 = "FL_mnist_cnn_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10]_FP16"
filename2 = "HFL2_mnist_cnn_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10]_FP16"
filename3 = "HFL4_mnist_cnn_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10]_FP16"
filename4 = "HFL8_mnist_cnn_30_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10]_FP16"
with open(r"../save/objects_fp16/" + filename1 + ".pkl", "rb") as input_file: data1 = pickle.load(input_file)
with open(r"../save/objects_fp16/" + filename2 + ".pkl", "rb") as input_file: data2 = pickle.load(input_file)
with open(r"../save/objects_fp16/" + filename3 + ".pkl", "rb") as input_file: data3 = pickle.load(input_file)
with open(r"../save/objects_fp16/" + filename4 + ".pkl", "rb") as input_file: data4 = pickle.load(input_file)
trloss1 = data1[0]
trloss2 = data2[0]
trloss3 = data3[0]
trloss4 = data4[0]
tracc1 = data1[1]
tracc2 = data2[1]
tracc3 = data3[1]
tracc4 = data4[1]
# Plot Average Accuracy vs Communication rounds
plt.figure()
plt.title(datamodelset)
plt.plot(range(len(tracc1)), tracc1, label="FL", linewidth=0.9)
plt.plot(range(len(tracc2)), tracc2, label="HFL2", linewidth=0.9)
plt.plot(range(len(tracc3)), tracc3, label="HFL4", linewidth=0.9)
plt.plot(range(len(tracc4)), tracc4, label="HFL8")
plt.legend(loc="lower right")
plt.ylabel('Average Accuracy')
plt.xlabel('Communication Rounds')
plt.savefig('../save/' + datamodelset + '_acc_FP16.png')
# Plot Loss curve
plt.figure()
plt.title(datamodelset)
plt.plot(range(len(trloss1)), trloss1, label="FL", linewidth=0.9)
plt.plot(range(len(trloss2)), trloss2, label="HFL2", linewidth=0.9)
plt.plot(range(len(trloss3)), trloss3, label="HFL4", linewidth=0.9)
plt.plot(range(len(trloss4)), trloss4, label="HFL8")
plt.legend(loc="upper right")
plt.ylabel('Training loss')
plt.xlabel('Communication Rounds')
plt.savefig('../save/' + datamodelset + '_loss_FP16.png')
plt.show
# ===== MNIST CNN NON-IID =====
datamodelset = "MNIST_CNN_NONIID"
filename1 = "FL_mnist_cnn_261_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10]_FP16"
filename2 = "HFL2_mnist_cnn_100_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10]_FP16"
filename3 = "HFL4_mnist_cnn_100_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10]_FP16"
filename4 = "HFL8_mnist_cnn_30_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10]_FP16"
with open(r"../save/objects_fp16/" + filename1 + ".pkl", "rb") as input_file: data1 = pickle.load(input_file)
with open(r"../save/objects_fp16/" + filename2 + ".pkl", "rb") as input_file: data2 = pickle.load(input_file)
with open(r"../save/objects_fp16/" + filename3 + ".pkl", "rb") as input_file: data3 = pickle.load(input_file)
with open(r"../save/objects_fp16/" + filename4 + ".pkl", "rb") as input_file: data4 = pickle.load(input_file)
trloss1 = data1[0]
trloss2 = data2[0]
trloss3 = data3[0]
trloss4 = data4[0]
tracc1 = data1[1]
tracc2 = data2[1]
tracc3 = data3[1]
tracc4 = data4[1]
# Plot Average Accuracy vs Communication rounds
plt.figure()
plt.title(datamodelset)
plt.plot(range(len(tracc1)), tracc1, label="FL", linewidth=0.9)
plt.plot(range(len(tracc2)), tracc2, label="HFL2", linewidth=0.9)
plt.plot(range(len(tracc3)), tracc3, label="HFL4", linewidth=0.9)
plt.plot(range(len(tracc4)), tracc4, label="HFL8")
plt.legend(loc="lower right")
plt.ylabel('Average Accuracy')
plt.xlabel('Communication Rounds')
plt.savefig('../save/' + datamodelset + '_acc_FP16.png')
# Plot Loss curve
plt.figure()
plt.title(datamodelset)
plt.plot(range(len(trloss1)), trloss1, label="FL", linewidth=0.9)
plt.plot(range(len(trloss2)), trloss2, label="HFL2", linewidth=0.9)
plt.plot(range(len(trloss3)), trloss3, label="HFL4", linewidth=0.9)
plt.plot(range(len(trloss4)), trloss4, label="HFL8")
plt.legend(loc="upper right")
plt.ylabel('Training loss')
plt.xlabel('Communication Rounds')
plt.savefig('../save/' + datamodelset + '_loss_FP16.png')
plt.show
###Output
_____no_output_____
###Markdown
Function to find out the number of communication rounds needed to exceed a certain prediction accuracy.
###Code
import pickle
##### CIFAR
#filename1 = "FL_cifar_cnn_500_lr[0.01]_C[0.1]_iid[1]_E[5]_B[50]_FP16"
#filename1 = "HFL2_cifar_cnn_100_lr[0.01]_C[0.1]_iid[1]_E[5]_B[50]_FP16"
#filename1 = "HFL4_cifar_cnn_100_lr[0.01]_C[0.1]_iid[1]_E[5]_B[50]_FP16"
#filename1 = "HFL8_cifar_cnn_100_lr[0.01]_C[0.1]_iid[1]_E[5]_B[50]_FP16"
##### MNIST_MLP_IID
#filename1 = "FL_mnist_mlp_650_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10]_FP16"
#filename1 = "HFL2_mnist_mlp_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10]_FP16"
#filename1 = "HFL4_mnist_mlp_150_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10]_FP16"
#filename1 = "HFL8_mnist_mlp_30_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10]_FP16"
##### MNIST_MLP_NON-IID
#filename1 = "FL_mnist_mlp_1196_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10]_FP16"
#filename1 = "HFL2_mnist_mlp_100_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10]_FP16"
#filename1 = "HFL4_mnist_mlp_150_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10]_FP16"
#filename1 = "HFL8_mnist_mlp_30_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10]_FP16"
##### MNUST_CNN_IID
filename1 = "FL_mnist_cnn_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10]_FP16"
#filename1 = "HFL2_mnist_cnn_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10]_FP16"
#filename1 = "HFL4_mnist_cnn_100_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10]_FP16"
#filename1 = "HFL8_mnist_cnn_30_lr[0.01]_C[0.1]_iid[1]_E[1]_B[10]_FP16"
##### MNIST_CNN_NON-IID
#filename1 = "FL_mnist_cnn_261_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10]_FP16"
#filename1 = "HFL2_mnist_cnn_100_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10]_FP16"
#filename1 = "HFL4_mnist_cnn_100_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10]_FP16"
#filename1 = "HFL8_mnist_cnn_30_lr[0.01]_C[0.1]_iid[0]_E[1]_B[10]_FP16"
with open(r"../save/objects_fp16/" + filename1 + ".pkl", "rb") as input_file: data = pickle.load(input_file)
trloss = data[0]
tracc = data[1]
# using enumerate() + next() to find index of first element just greater than a certain percentage
testacc = 0.97
res = next(x for x, val in enumerate(tracc) if val >= testacc)
# printing result
print ("The number of global training round just greater than " + str(testacc*100) + "% : " + str(res+1))
###Output
The number of global training round just greater than 97.0% : 74
|
K-Means Clustering/K-Means Clustering - Principles and Custom Implementation.ipynb | ###Markdown
K-Means Clustering: Principles and Custom ImplementationIn this notebook, we will demostrate step-by-step, how _K-Means Clustering_ algorithm works.For that purpose, we will use _scikit-learn_ library to generare a simple artificial dataset.Our custom solution will be compared with _scikit-learn_ at the end of the notebook.
###Code
%pylab inline
from sklearn import datasets
from sklearn.cluster import KMeans
from sklearn.datasets import make_blobs
import numpy as np
import pandas as pd
###Output
Populating the interactive namespace from numpy and matplotlib
###Markdown
The datasetAlthough the math stays the same, the clustering algorithm may become less intuitive when the feature space is multidimensional.For this reason, we will generate a 2-dimensional dataset with normally distributied data around three random points thus forming our _clusters_.All of our clusters will contain the exact same standard deviation, so consequently we are contructing of a dataset that is a perfect case for a KMC algorithm, and it will be easier to explain.
###Code
N_FEATURES = 2
K_CLUSTERS = 3
data, targets = make_blobs(
n_samples=400,
n_features=N_FEATURES,
centers=K_CLUSTERS,
cluster_std=1.25,
shuffle=True,
random_state=0)
fig, ax = plt.subplots(figsize=(8, 6))
ax.scatter(data[:, 0], data[:, 1],
c='white', marker='o', edgecolor='black', s=20)
ax.set_xlabel('x0 [a.u.]')
ax.set_ylabel('x1 [a.u.]')
ax.set_title('Our custom dataset')
plt.show()
###Output
_____no_output_____
###Markdown
Here, we are deliberately ingnoring the `target`s, as our problem categorizes itself as **unsupervised learning**, and us such these will present the information that is unknown to us. The algorithm explained DistanceFirst of all, the KMC algorithm (just like _K-Nearest Neighbours_) needs some way of evaluating the points' similarity.For that it requires a definition of _distance_, which we can implement using Minkowki definition, which if $p=2$ becomes the standard Euclidean distance.$$\text{d}(x, y) = \left(\sum_{n = 0}^{N-1} |x_n - y_n|^p \right)^{1/p}$$, where$N$ is a number of features.
###Code
def distance(x, y, p=2):
return ((abs(x - y)**p).sum())**(1/p)
assert distance(np.array([0, 0]), np.array([0, 0])) == 0
assert distance(np.array([1, 0]), np.array([1, 0])) == 0
assert distance(np.array([1, 0]), np.array([0, 0])) == 1
assert distance(np.array([1, 0]), np.array([0, 1])) == np.sqrt(2)
###Output
_____no_output_____
###Markdown
Input formattingBefore we begin, let's encapsulate our dataset in _pandas_ object.Although [not as fast as numpy](https://zerowithdot.com/data-science-computation-harakiri/), it simplifies the analytics by giving us access to more methods.
###Code
FEATURES = ['x' + str(x) for x in range(N_FEATURES)]
X = pd.DataFrame(data, columns=FEATURES)
X.head()
###Output
_____no_output_____
###Markdown
Custom implementation
###Code
def km_clustering(X, K, max_iter=10, eps=1e-3):
Z = X.copy()
MAX_ITER = max_iter
EPSILON = eps
DISTANCES = ['d(x, c{})'.format(k) for k in range(K)]
# step 1. initialization
np.random.seed(1)
idx = np.random.randint(0, Z.shape[0], K)
centroids = X.iloc[idx, :].copy()
centroids = centroids.reset_index(drop='Index')
history = np.zeros((MAX_ITER, K, N_FEATURES))
history[0, :, :] = centroids
# step 4. repeat 2. and 3. until no more change
for i in range(1, MAX_ITER):
# step 2. evaluating minimum distance
for k in range(K):
centroid = centroids.iloc[k].to_numpy()
Z[DISTANCES[k]] = X.apply(lambda x: distance(x, centroid), axis=1)
belongs_to = 'cluster (i={})'.format(i)
Z[belongs_to] = Z[DISTANCES].idxmin(axis=1)
Z[belongs_to] = Z[belongs_to].apply(lambda x: np.argwhere(np.array(DISTANCES) == x)[0][0])
# step 3. calculating new centroids (shifting)
for k in range(K):
centroids.iloc[k] = Z[Z[belongs_to] == k][FEATURES].mean()
history[i, :, :] = centroids
if (abs(history[i-1] - centroids).max().max() < EPSILON):
break
return history, Z[FEATURES + [c for c in Z.columns if c.startswith('cluster')]]
###Output
_____no_output_____
###Markdown
Our function `km_clustering` will accept four inputs:* The dataset `X`, which it will create a copy of to ensure we do not alter the initial dataset,* The number of declared clusters `K`,* `max_iter` to terminate the procedure after this value is reached,* `eps` to be our max tolernce for the so-called _intertia_, also used to terminate the algorithm.**Step 1.** is the _initialization_.Here, line 8 is optionally added to ensure the repeatability of the random number generator.Line 9 picks `K` random integers within the range of our example number.Line 10 then creates a small dataframe to hold our _centroid_'s coordinates, whose values are initalized using the randomly selected points.Then line 11 resets the index of the `centroids` to enumerate the coordinates' vectors.Becasue we would like to demonstrate here of how the algorithm progresses, we will create a snapshot of the `centroids` object-array at any given iteration.We also preinitialize the `history` @0th iteration to the already selected centroids (line 14).**Step 2.** starts with the inner loop over the intended number of clusters `K`.For every centroid `k` (line 20), we measure the distance between it and every example point in our dataset (line 21), and save the result alongside our dataset copy `Z`.Then (22-23), we pick the distance that was of the minimum value and look for index of the cluster to associate the point with (24).This way, every data point has been given an additional index representing the cluster it is now a part of.**Step 3.** is evaluating new centroids' coordinates.After new clusters have been formed, the cetroids can be defined by taking the average of each points' coordinates.At this moment, we can also take a new snapshot (29) and evaluate if the change of the position of the centroids with respect to the last one has changed beyond our targeted tolerance (30). If not, our procedure is finished.**Step 4.** is consecutively repeating steps 2. and 3. until either the maximum number of iteration is reached, or the changes to the centroids' positions becomes so small that is makes no sense to continue.Finally, the function returns both the appended dataset and the history of the centroids' positions. Visualizing progressionNow, let's execute our `km_clustering` function and demonstrate how it operaties.Note that we ask it to formulate three clusters, while we _know_ our dataset has three. This is a highly artificial situation.
###Code
import itertools
K = 3
h, Z = km_clustering(X, K)
ITERS = len([c for c in Z.columns if c.startswith('cluster')]) + 1
fig, axs = plt.subplots(ITERS, 1, figsize=(6, 32))
plt.tight_layout(w_pad=4, h_pad=4)
axs[0].scatter(data[:, 0], data[:, 1],
c='white', marker='o', edgecolor='black', s=20, alpha=0.5)
for k in range(K):
axs[0].scatter(h[0, k, 0], h[0, k, 1], c='k', marker='o', edgecolor='k', s=90)
axs[0].set_xlabel('x0 [a.u.]')
axs[0].set_ylabel('x1 [a.u.]')
axs[0].set_title('Iteration: 0')
for i in range(1, ITERS):
colors = itertools.cycle(['r', 'g', 'b', 'm', 'c', 'y'])
for k in range(K):
c = next(colors)
z = Z[Z['cluster (i={})'.format(i)] == k][FEATURES].to_numpy()
axs[i].scatter(z[:, 0], z[:, 1], c=c, marker='o', edgecolor='k', alpha=0.2)
axs[i].scatter(h[i-1, k, 0], h[i-1, k, 1], c=c, marker='x', edgecolor='k', s=90)
axs[i].scatter(h[i, k, 0], h[i, k, 1], c=c, marker='o', edgecolor='k', s=90)
axs[i].plot([h[i-1, k, 0], h[i, k, 0]], [h[i-1, k, 1], h[i, k, 1]], c=c)
axs[i].set_xlabel('x0 [a.u.]')
axs[i].set_ylabel('x1 [a.u.]')
axs[i].set_title('Iteration: {}'.format(i))
plt.show()
###Output
_____no_output_____
###Markdown
Looking at the figures above, we can observe that the centroids move less and less with every new iteration. Above the 6th iteration, the shift is so tiny that it makes sense to stop the computation. Scikit-Learn implementationNow, let's compare our result with implementation offered with _scikit-learn_ library.For easiness, we will keep the same dataset and use the declared number of cluster `K = 3`.
###Code
K = 3
y_pred = KMeans(n_clusters=K, random_state=0).fit_predict(X)
X1 = X.copy()
X1['cluster'] = y_pred
h, Z = km_clustering(X, K)
ITERS = len([c for c in Z.columns if c.startswith('cluster')])
fig, axs = plt.subplots(1, 2, figsize=(12, 6))
colors = itertools.cycle(['r', 'g', 'b', 'm', 'c', 'y'])
for k in range(K):
z = Z[Z['cluster (i={})'.format(ITERS)] == k][FEATURES].to_numpy()
axs[0].scatter(z[:, 0], z[:, 1], c=c, marker='o', edgecolor='k', alpha=0.2)
axs[0].scatter(h[ITERS, k, 0], h[ITERS, k, 1], c=c, marker='o', edgecolor='k', s=90)
axs[0].set_xlabel('x0 [a.u.]')
axs[0].set_ylabel('x1 [a.u.]')
axs[0].set_title('Our custom implementation')
c = next(colors)
colors = itertools.cycle(['r', 'g', 'b', 'm', 'c', 'y'])
for k in range(K):
x = X1[X1['cluster'] == k][FEATURES].to_numpy()
axs[1].scatter(x[:, 0], x[:, 1], c=c, marker='o', edgecolor='k', alpha=0.2)
axs[1].set_xlabel('x0 [a.u.]')
axs[1].set_ylabel('x1 [a.u.]')
axs[1].set_title('KMeans by scikit-learn')
c = next(colors)
plt.show()
###Output
_____no_output_____ |
Traffic_Sign_Classifier_v3.ipynb | ###Markdown
Self-Driving Car Engineer Nanodegree Deep Learning Project: Build a Traffic Sign Recognition Classifier Step 0: Load The Data
###Code
# Load pickled data
import pickle
import numpy as np
# TODO: Fill this in based on where you saved the training and testing data
training_file = './train.p'
validation_file='./valid.p'
testing_file = './test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']
###Output
_____no_output_____
###Markdown
--- Step 1: Dataset Summary & ExplorationThe pickled data is a dictionary with 4 key/value pairs:- `'features'` is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels).- `'labels'` is a 1D array containing the label/class id of the traffic sign. The file `signnames.csv` contains id -> name mappings for each id.- `'sizes'` is a list containing tuples, (width, height) representing the original width and height the image.- `'coords'` is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image. **THESE COORDINATES ASSUME THE ORIGINAL IMAGE. THE PICKLED DATA CONTAINS RESIZED VERSIONS (32 by 32) OF THESE IMAGES**Complete the basic data summary below. Use python, numpy and/or pandas methods to calculate the data summary rather than hard coding the results. For example, the [pandas shape method](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.shape.html) might be useful for calculating some of the summary results. Provide a Basic Summary of the Data Set Using Python, Numpy and/or Pandas
###Code
### Replace each question mark with the appropriate value.
### Use python, pandas or numpy methods rather than hard coding the results
n_classes = np.max(y_train) +1
#As suggested by first reviewer:
n_classes= np.unique(y_train).shape[0]
data_augmentation_padding_flag = True
print("data augmentation/padding is turned on: " + str(data_augmentation_padding_flag))
#optionally: append underrepresented classes (randomisation will occur lateron)
if data_augmentation_padding_flag:
ny=np.zeros(n_classes)
for cl in range(n_classes):
ny[cl] = np.sum(y_train==cl)
while ny[cl] <2000:
indexset=(y_train==cl)
#print(indexset)
#X_new=X_train[indexset,:,:,:]
#print(X_new.shape)
X_train = np.concatenate([X_train,X_train[indexset,:,:,:]],axis=0)
y_train = np.concatenate([y_train,y_train[indexset]],axis=0)
ny[cl] = np.sum(y_train==cl)
print(ny)
n_train = X_train.shape[0]
n_validation = X_valid.shape[0]
n_test = X_test.shape[0]
image_shape = X_train.shape[1],X_train.shape[2]
print("Number of training examples =", n_train)
print("Number of validation examples =", n_validation)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
print(X_train.shape)
print(y_train.shape)
###Output
data augmentation/padding is turned on: True
[2880. 3960. 2010. 2520. 3540. 3300. 2880. 2580. 2520. 2640. 3600. 2340.
3780. 3840. 2760. 2160. 2880. 3960. 2160. 2880. 2400. 2160. 2640. 3600.
3840. 2700. 2160. 3360. 3840. 3840. 3120. 2760. 3360. 2396. 2880. 2160.
2640. 2880. 3720. 2160. 2400. 3360. 3360.]
Number of training examples = 126926
Number of validation examples = 4410
Number of testing examples = 12630
Image data shape = (32, 32)
Number of classes = 43
(126926, 32, 32, 3)
(126926,)
###Markdown
Include an exploratory visualization of the dataset Visualize the German Traffic Signs Dataset using the pickled file(s). This is open ended, suggestions include: plotting traffic sign images, plotting the count of each sign, etc. The [Matplotlib](http://matplotlib.org/) [examples](http://matplotlib.org/examples/index.html) and [gallery](http://matplotlib.org/gallery.html) pages are a great resource for doing visualizations in Python.**NOTE:** It's recommended you start with something simple first. If you wish to do more, come back to it after you've completed the rest of the sections. It can be interesting to look at the distribution of classes in the training, validation and test set. Is the distribution the same? Are there more examples of some classes than others?
###Code
### Data exploration visualization code goes here.
### Feel free to use as many code cells as needed.
import random
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
# Visualizations will be shown in the notebook.
%matplotlib inline
index = random.randint(0, len(X_train))
image = X_train[index].squeeze()
''
plt.figure(figsize=(1,1))
plt.imshow(image)#, cmap="gray")
print(y_train[index])
#------
num_bins = 43
fig, ax = plt.subplots()
n, bins, patches = ax.hist(y_train, num_bins, rwidth = 0.8,normed=0)
ax.set_xlabel('lables')
ax.set_ylabel('frequency')
ax.set_title(r'frequency of labels in training set')
fig.tight_layout()
plt.show()
num_bins = 43
fig, ax = plt.subplots()
n, bins, patches = ax.hist(y_valid, num_bins,rwidth = 0.8, normed=0)
ax.set_xlabel('lables')
ax.set_ylabel('frequency')
ax.set_title(r'frequency of labels in validation set')
fig.tight_layout()
plt.show()
num_bins = 43
fig, ax = plt.subplots()
n, bins, patches = ax.hist(y_test, num_bins,rwidth = 0.8, normed=0)
ax.set_xlabel('lables')
ax.set_ylabel('frequency')
ax.set_title(r'frequency of labels in test set')
fig.tight_layout()
plt.show()
###Output
31
###Markdown
---- Step 2: Design and Test a Model ArchitectureDesign and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the [German Traffic Sign Dataset](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset).The LeNet-5 implementation shown in the [classroom](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) at the end of the CNN lesson is a solid starting point. You'll have to change the number of classes and possibly the preprocessing, but aside from that it's plug and play! With the LeNet-5 solution from the lecture, you should expect a validation set accuracy of about 0.89. To meet specifications, the validation set accuracy will need to be at least 0.93. It is possible to get an even higher accuracy, but 0.93 is the minimum for a successful project submission. There are various aspects to consider when thinking about this problem:- Neural network architecture (is the network over or underfitting?)- Play around preprocessing techniques (normalization, rgb to grayscale, etc)- Number of examples per label (some have more than others).- Generate fake data.Here is an example of a [published baseline model on this problem](http://yann.lecun.com/exdb/publis/pdf/sermanet-ijcnn-11.pdf). It's not required to be familiar with the approach used in the paper but, it's good practice to try to read papers like these. Pre-process the Data Set (normalization, grayscale, etc.) Minimally, the image data should be normalized so that the data has mean zero and equal variance. For image data, `(pixel - 128)/ 128` is a quick way to approximately normalize the data and can be used in this project. Other pre-processing steps are optional. You can try different techniques to see if it improves performance. Use the code cell (or multiple code cells, if necessary) to implement the first step of your project.
###Code
### Preprocess the data here. It is required to normalize the data. Other preprocessing steps could include
### converting to grayscale, etc.
### Feel free to use as many code cells as needed.
import cv2
index = random.randint(0, len(X_train))
image = X_train[index].squeeze()
''
plt.figure(figsize=(1,1))
plt.imshow(image)#, cmap="gray")
print(y_train[index])
# convert to grey images
X_train_g = np.zeros((X_train.shape[0],X_train.shape[1],X_train.shape[2],1))
X_train_hg = np.zeros((X_train.shape[0],X_train.shape[1],X_train.shape[2],1))
for i in range(X_train.shape[0]):
X_train_g[i,:,:,0]=cv2.cvtColor(X_train[i,:,:,:], cv2.COLOR_BGR2GRAY)
X_train_hg[i,:,:,0]=cv2.equalizeHist(X_train_g[i,:,:,0] .astype(np.uint8))
X_train=X_train_hg.astype(np.float32)
X_valid_g = np.zeros((X_valid.shape[0],X_valid.shape[1],X_valid.shape[2],1))
X_valid_hg = np.zeros((X_valid.shape[0],X_valid.shape[1],X_valid.shape[2],1))
for i in range(X_valid.shape[0]):
X_valid_g[i,:,:,0]=cv2.cvtColor(X_valid[i,:,:,:], cv2.COLOR_BGR2GRAY)
X_valid_hg[i,:,:,0]=cv2.equalizeHist(X_valid_g[i,:,:,0] .astype(np.uint8))
X_valid=X_valid_hg.astype(np.float32)
X_test_g = np.zeros((X_test.shape[0],X_test.shape[1],X_test.shape[2],1))
X_test_hg = np.zeros((X_test.shape[0],X_test.shape[1],X_test.shape[2],1))
for i in range(X_test.shape[0]):
X_test_g[i,:,:,0]=cv2.cvtColor(X_test[i,:,:,:], cv2.COLOR_BGR2GRAY)
X_test_hg[i,:,:,0]=cv2.equalizeHist(X_test_g[i,:,:,0] .astype(np.uint8))
X_test=X_test_hg.astype(np.float32)
#index = random.randint(0, len(X_train))
image_grey = X_train[index].squeeze()
''
plt.figure(figsize=(1,1))
plt.imshow(image_grey, cmap="gray")
# normalize approx
X_train=(X_train-128)/128
X_valid=(X_valid-128)/128
X_test=(X_test-128)/128
print(X_train.shape)
#shuffle
from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train)
#setup TF
import tensorflow as tf
#EPOCHS = 100 # to be overwritten later!
#BATCH_SIZE = 256 #128 # 128
#mylambda=0.25
print(index)
###Output
35
(126926, 32, 32, 1)
###Markdown
Model Architecture
###Code
### Define your architecture here.
### Feel free to use as many code cells as needed.
from tensorflow.contrib.layers import flatten
mu = 0.00
sigma = 0.1
weights = {
'wc1': tf.Variable(tf.truncated_normal([5, 5, 1, 6], mean = mu, stddev = sigma)),#,name='wc1'),
'wc2': tf.Variable(tf.truncated_normal([5, 5, 6, 16], mean = mu, stddev = sigma)),
'wd1': tf.Variable(tf.truncated_normal([400, 120], mean = mu, stddev = sigma)), #5x5x16. Output = 400
'wd2': tf.Variable(tf.truncated_normal([120, 84], mean = mu, stddev = sigma)), #Input = 120. Output = 84.
'wd3': tf.Variable(tf.truncated_normal([84, 43], mean = mu, stddev = sigma))
} #Input = 84. Output = 10.
biases = {
'bc1': tf.zeros(6),
'bc2': tf.zeros(16),
'bd1': tf.zeros(120),
'bd2': tf.zeros(84),
'bd3': tf.zeros(43)
}
def LeNet(x): # based on my implementation for the quizz in class
# Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
# TODO: Layer 1: Convolutional. Input = 32x32x1. Output = 28x28x6.
ac1 = tf.nn.conv2d(x, weights['wc1'], strides=[1, 1, 1, 1], padding='VALID')
ac1 = tf.nn.bias_add(ac1, biases['bc1'])
# TODO: Activation.
ac1 = tf.nn.relu(ac1)
# print('ac1 = ' + str(ac1.shape))
#ac1 = tf.nn.dropout(ac1, keep_prob)
# TODO: Pooling. Input = 28x28x6. Output = 14x14x6.
ap1= tf.nn.max_pool(
ac1,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
#print('ap1 = ' + str(ap1.shape))
# TODO: Layer 2: Convolutional. Output = 10x10x16.
ac2 = tf.nn.conv2d(ap1, weights['wc2'], strides=[1, 1, 1, 1], padding='VALID')
ac2 = tf.nn.bias_add(ac2, biases['bc2'])
# TODO: Activation.
ac2 = tf.nn.relu(ac2)
#print('ac2 = ' + str(ac2.shape))
#ac2 = tf.nn.dropout(ac2, keep_prob)
# TODO: Pooling. Input = 10x10x16. Output = 5x5x16.
ap2= tf.nn.max_pool(
ac2,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
#print('ap2 = ' + str(ap2.shape))
# TODO: Flatten. Input = 5x5x16. Output = 400.
fc1 = tf.reshape(ap2, [-1, weights['wd1'].get_shape().as_list()[0]]) # could have used "flatten"
#print('fc1 (after flatten) = ' + str(fc1.shape))
# TODO: Layer 3: Fully Connected. Input = 400. Output = 120.
fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1'])
# TODO: Activation.
fc1 = tf.nn.relu(fc1)
fc1 = tf.nn.dropout(fc1, keep_prob) #dropout parameter
#print('fc1 (final) = ' + str(fc1.shape))
# TODO: Layer 4: Fully Connected. Input = 120. Output = 84.
fc2 = tf.add(tf.matmul(fc1, weights['wd2']), biases['bd2'])
# TODO: Activation.
fc2 = tf.nn.relu(fc2)
fc2 = tf.nn.dropout(fc2, keep_prob) #dropout parameter
#print('fc2 = ' + str(fc2.shape))
# TODO: Layer 5: Fully Connected. Input = 84. Output = 43.
fc3 = tf.add(tf.matmul(fc2, weights['wd3']), biases['bd3'])
#print('fc3 = ' + str(fc3.shape))
logits=fc3
return logits,ac1,ap1,ac2,ap2
###Output
/home/voll/anaconda3/envs/carnd-term1/lib/python3.5/importlib/_bootstrap.py:222: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
return f(*args, **kwds)
/home/voll/anaconda3/envs/carnd-term1/lib/python3.5/importlib/_bootstrap.py:222: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
return f(*args, **kwds)
###Markdown
Train, Validate and Test the Model A validation set can be used to assess how well the model is performing. A low accuracy on the training and validationsets imply underfitting. A high accuracy on the training set but low accuracy on the validation set implies overfitting.
###Code
print(tf.__version__)
#tf.contrib.image.translate not available in this version of TF
# i am not risking an update at the present state, that is a few days before submission
### Train your model here.
### Calculate and report the accuracy on the training and validation set.
### Once a final model architecture is selected,
### the accuracy on the test set should be calculated and reported as well.
### Feel free to use as many code cells as needed.
x = tf.placeholder(tf.float32, (None, 32, 32, 1)) # xxx
batch_size = tf.shape(x)[0]
im_size=tf.shape(x)[1]
# apply random rotations "on the fly", i.e. on-line
anglevector=tf.random_normal(
[batch_size],
mean=0.0,
stddev=3.0 *3.14/180) # +/- 3° noise
x=tf.contrib.image.rotate(x,anglevector) #rotate x by random vactors
# end of random rotations
# add random noise "on the fly", i.e. on-line
noise=tf.random_normal(
tf.shape(x),
mean=0.0,
stddev=0.1) # range of pixel values is approx [-1,1]
x=x+noise
# end of add random noise
### # the following is omitted due to TF v1.3 on my machine not supporting tf.contrib.image.translate
#### add random translations "on the fly", i.e. on-line
###translations=tf.random_normal(
### [batch_size,im_size,im_size],
### mean=0.0,
### stddev=3) # translate +/- 3 pixel-widths
###x=tf.contrib.image.translate(x,translations)
####translations: A vector representing [dx, dy] or (if images has rank 4) a matrix of length num_images,
####with a [dx, dy] vector for each image in the batch.
#### end of add translations
y = tf.placeholder(tf.int32, (None))
keep_prob = tf.placeholder(tf.float32) # probability to keep units
mylambda = tf.placeholder(tf.float32)
one_hot_y = tf.one_hot(y, 43)
rate = 0.0015 #0.001 #0.003
logits,ac1,ap1,ac2,ap2 = LeNet(x)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=one_hot_y, logits=logits)
loss_operation = tf.reduce_mean(cross_entropy) \
+ mylambda*tf.reduce_mean(tf.multiply(weights['wc1'],weights['wc1'])) \
+ mylambda*tf.reduce_mean(tf.multiply(weights['wc2'],weights['wc2'])) \
+ mylambda*tf.reduce_mean(tf.multiply(weights['wd1'],weights['wd1'])) \
+ mylambda*tf.reduce_mean(tf.multiply(weights['wd2'],weights['wd2'])) \
+ mylambda*tf.reduce_mean(tf.multiply(weights['wd3'],weights['wd3']))
optimizer = tf.train.AdamOptimizer(learning_rate = rate)
training_operation = optimizer.minimize(loss_operation)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))
accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver()
def evaluate(X_data, y_data):
num_examples = len(X_data)
#print(num_examples)
total_accuracy = 0
sess = tf.get_default_session()
#saver.restore(sess, './lenet')
for offset in range(0, num_examples, BATCH_SIZE):
batch_x, batch_y = X_data[offset:offset+BATCH_SIZE], y_data[offset:offset+BATCH_SIZE]
accuracy = sess.run(accuracy_operation, feed_dict={x: batch_x, y: batch_y,keep_prob: 1.0,mylambda: 0.0})
total_accuracy += (accuracy * len(batch_x))
return total_accuracy / num_examples
import tensorflow as tf
EPOCHS = 100
BATCH_SIZE = 128 #128 # 128
mlambda=1.5 #0.25 #0.25
mkeep_prob=0.5
#saver = tf.train.Saver()
#tf.reset_default_graph()
#saver = tf.train.import_meta_graph('./lenet.meta')
if True: # retrain only on demand
train_err_hist=[]
valid_err_hist=[]
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
#saver = tf.train.import_meta_graph('./lenet.meta')
#saver.restore(sess, './lenet')
#new_saver = tf.train.import_meta_graph('lenet.meta')
#new_saver.restore(sess, tf.train.latest_checkpoint('./'))
num_examples = len(X_train)
num_valid_examples = len(X_valid)
print("Training...")
print()
for i in range(EPOCHS):
X_train, y_train = shuffle(X_train, y_train)
for offset in range(0, num_examples, BATCH_SIZE):
#print(offset)
end = offset + BATCH_SIZE
if 1: #end <= num_examples:
batch_x, batch_y = X_train[offset:end], y_train[offset:end]
sess.run(training_operation, feed_dict={x: batch_x, y: batch_y,keep_prob: mkeep_prob,mylambda: mlambda})
validation_accuracy = evaluate(X_valid, y_valid)
valid_err_hist.append(1-validation_accuracy)
#evaluate(X_valid[:(num_valid_examples//BATCH_SIZE) *BATCH_SIZE- 1,:,:,:], y_valid[:(num_valid_examples//BATCH_SIZE) *BATCH_SIZE-1])
print("EPOCH {} ...".format(i+1))
print("Validation Accuracy = {:.3f}".format(validation_accuracy))
print()
training_accuracy = evaluate(X_train, y_train)
train_err_hist.append(1-training_accuracy)
print("Training Accuracy = {:.3f}".format(training_accuracy))
print()
print()
test_accuracy = evaluate(X_test, y_test)
print("Test Accuracy = {:.3f}".format(test_accuracy))
print()
print()
saver.save(sess, './lenet')
print("Model saved")
###Output
Training...
EPOCH 1 ...
Validation Accuracy = 0.863
Training Accuracy = 0.932
EPOCH 2 ...
Validation Accuracy = 0.905
Training Accuracy = 0.972
EPOCH 3 ...
Validation Accuracy = 0.935
Training Accuracy = 0.983
EPOCH 4 ...
Validation Accuracy = 0.946
Training Accuracy = 0.989
EPOCH 5 ...
Validation Accuracy = 0.951
Training Accuracy = 0.991
EPOCH 6 ...
Validation Accuracy = 0.955
Training Accuracy = 0.993
EPOCH 7 ...
Validation Accuracy = 0.951
Training Accuracy = 0.994
EPOCH 8 ...
Validation Accuracy = 0.948
Training Accuracy = 0.993
EPOCH 9 ...
Validation Accuracy = 0.955
Training Accuracy = 0.995
EPOCH 10 ...
Validation Accuracy = 0.962
Training Accuracy = 0.996
EPOCH 11 ...
Validation Accuracy = 0.967
Training Accuracy = 0.996
EPOCH 12 ...
Validation Accuracy = 0.963
Training Accuracy = 0.995
EPOCH 13 ...
Validation Accuracy = 0.968
Training Accuracy = 0.996
EPOCH 14 ...
Validation Accuracy = 0.966
Training Accuracy = 0.997
EPOCH 15 ...
Validation Accuracy = 0.962
Training Accuracy = 0.997
EPOCH 16 ...
Validation Accuracy = 0.968
Training Accuracy = 0.995
EPOCH 17 ...
Validation Accuracy = 0.975
Training Accuracy = 0.998
EPOCH 18 ...
Validation Accuracy = 0.972
Training Accuracy = 0.997
EPOCH 19 ...
Validation Accuracy = 0.966
Training Accuracy = 0.995
EPOCH 20 ...
Validation Accuracy = 0.967
Training Accuracy = 0.997
EPOCH 21 ...
Validation Accuracy = 0.968
Training Accuracy = 0.997
EPOCH 22 ...
Validation Accuracy = 0.972
Training Accuracy = 0.998
EPOCH 23 ...
Validation Accuracy = 0.970
Training Accuracy = 0.998
EPOCH 24 ...
Validation Accuracy = 0.969
Training Accuracy = 0.998
EPOCH 25 ...
Validation Accuracy = 0.967
Training Accuracy = 0.997
EPOCH 26 ...
Validation Accuracy = 0.974
Training Accuracy = 0.998
EPOCH 27 ...
Validation Accuracy = 0.964
Training Accuracy = 0.997
EPOCH 28 ...
Validation Accuracy = 0.975
Training Accuracy = 0.998
EPOCH 29 ...
Validation Accuracy = 0.970
Training Accuracy = 0.997
EPOCH 30 ...
Validation Accuracy = 0.971
Training Accuracy = 0.998
EPOCH 31 ...
Validation Accuracy = 0.970
Training Accuracy = 0.997
EPOCH 32 ...
Validation Accuracy = 0.971
Training Accuracy = 0.998
EPOCH 33 ...
Validation Accuracy = 0.971
Training Accuracy = 0.998
EPOCH 34 ...
Validation Accuracy = 0.973
Training Accuracy = 0.998
EPOCH 35 ...
Validation Accuracy = 0.968
Training Accuracy = 0.998
EPOCH 36 ...
Validation Accuracy = 0.967
Training Accuracy = 0.997
EPOCH 37 ...
Validation Accuracy = 0.968
Training Accuracy = 0.998
EPOCH 38 ...
Validation Accuracy = 0.970
Training Accuracy = 0.998
EPOCH 39 ...
Validation Accuracy = 0.968
Training Accuracy = 0.998
EPOCH 40 ...
Validation Accuracy = 0.972
Training Accuracy = 0.998
EPOCH 41 ...
Validation Accuracy = 0.972
Training Accuracy = 0.998
EPOCH 42 ...
Validation Accuracy = 0.973
Training Accuracy = 0.998
EPOCH 43 ...
Validation Accuracy = 0.971
Training Accuracy = 0.999
EPOCH 44 ...
Validation Accuracy = 0.971
Training Accuracy = 0.999
EPOCH 45 ...
Validation Accuracy = 0.973
Training Accuracy = 0.997
EPOCH 46 ...
Validation Accuracy = 0.976
Training Accuracy = 0.998
EPOCH 47 ...
Validation Accuracy = 0.971
Training Accuracy = 0.999
EPOCH 48 ...
Validation Accuracy = 0.972
Training Accuracy = 0.998
EPOCH 49 ...
Validation Accuracy = 0.965
Training Accuracy = 0.997
EPOCH 50 ...
Validation Accuracy = 0.977
Training Accuracy = 0.999
EPOCH 51 ...
Validation Accuracy = 0.974
Training Accuracy = 0.998
EPOCH 52 ...
Validation Accuracy = 0.974
Training Accuracy = 0.998
EPOCH 53 ...
Validation Accuracy = 0.975
Training Accuracy = 0.999
EPOCH 54 ...
Validation Accuracy = 0.976
Training Accuracy = 0.999
EPOCH 55 ...
Validation Accuracy = 0.973
Training Accuracy = 0.999
EPOCH 56 ...
Validation Accuracy = 0.970
Training Accuracy = 0.998
EPOCH 57 ...
Validation Accuracy = 0.974
Training Accuracy = 0.999
EPOCH 58 ...
Validation Accuracy = 0.967
Training Accuracy = 0.998
EPOCH 59 ...
Validation Accuracy = 0.977
Training Accuracy = 0.998
EPOCH 60 ...
Validation Accuracy = 0.976
Training Accuracy = 0.998
EPOCH 61 ...
Validation Accuracy = 0.978
Training Accuracy = 0.999
EPOCH 62 ...
Validation Accuracy = 0.975
Training Accuracy = 0.998
EPOCH 63 ...
Validation Accuracy = 0.969
Training Accuracy = 0.998
EPOCH 64 ...
Validation Accuracy = 0.970
Training Accuracy = 0.998
EPOCH 65 ...
Validation Accuracy = 0.973
Training Accuracy = 0.999
EPOCH 66 ...
Validation Accuracy = 0.976
Training Accuracy = 0.999
EPOCH 67 ...
Validation Accuracy = 0.976
Training Accuracy = 0.999
EPOCH 68 ...
Validation Accuracy = 0.976
Training Accuracy = 0.999
EPOCH 69 ...
Validation Accuracy = 0.970
Training Accuracy = 0.998
EPOCH 70 ...
Validation Accuracy = 0.975
Training Accuracy = 0.999
EPOCH 71 ...
Validation Accuracy = 0.973
Training Accuracy = 0.998
EPOCH 72 ...
Validation Accuracy = 0.976
Training Accuracy = 0.999
EPOCH 73 ...
Validation Accuracy = 0.976
Training Accuracy = 0.999
EPOCH 74 ...
Validation Accuracy = 0.977
Training Accuracy = 0.999
EPOCH 75 ...
Validation Accuracy = 0.971
Training Accuracy = 0.999
EPOCH 76 ...
Validation Accuracy = 0.973
Training Accuracy = 0.999
EPOCH 77 ...
Validation Accuracy = 0.971
Training Accuracy = 0.998
EPOCH 78 ...
Validation Accuracy = 0.972
Training Accuracy = 0.998
EPOCH 79 ...
Validation Accuracy = 0.975
Training Accuracy = 0.998
EPOCH 80 ...
Validation Accuracy = 0.974
Training Accuracy = 0.999
EPOCH 81 ...
Validation Accuracy = 0.972
Training Accuracy = 0.998
EPOCH 82 ...
Validation Accuracy = 0.977
Training Accuracy = 0.999
EPOCH 83 ...
Validation Accuracy = 0.975
Training Accuracy = 0.999
EPOCH 84 ...
Validation Accuracy = 0.974
Training Accuracy = 0.999
EPOCH 85 ...
Validation Accuracy = 0.967
Training Accuracy = 0.998
EPOCH 86 ...
Validation Accuracy = 0.978
Training Accuracy = 0.999
EPOCH 87 ...
Validation Accuracy = 0.975
Training Accuracy = 0.998
EPOCH 88 ...
Validation Accuracy = 0.974
Training Accuracy = 0.999
EPOCH 89 ...
Validation Accuracy = 0.974
Training Accuracy = 0.998
EPOCH 90 ...
Validation Accuracy = 0.969
Training Accuracy = 0.998
EPOCH 91 ...
Validation Accuracy = 0.975
Training Accuracy = 0.999
EPOCH 92 ...
Validation Accuracy = 0.969
Training Accuracy = 0.998
EPOCH 93 ...
Validation Accuracy = 0.977
Training Accuracy = 0.999
EPOCH 94 ...
Validation Accuracy = 0.977
Training Accuracy = 0.999
EPOCH 95 ...
Validation Accuracy = 0.972
Training Accuracy = 0.998
EPOCH 96 ...
Validation Accuracy = 0.967
Training Accuracy = 0.998
EPOCH 97 ...
Validation Accuracy = 0.976
Training Accuracy = 0.999
EPOCH 98 ...
Validation Accuracy = 0.973
Training Accuracy = 0.999
EPOCH 99 ...
Validation Accuracy = 0.973
Training Accuracy = 0.998
EPOCH 100 ...
Validation Accuracy = 0.978
Training Accuracy = 0.999
Test Accuracy = 0.949
Model saved
###Markdown
--- Step 3: Test a Model on New ImagesTo give yourself more insight into how your model is working, download at least five pictures of German traffic signs from the web and use your model to predict the traffic sign type.You may find `signnames.csv` useful as it contains mappings from the class id (integer) to the actual sign name.
###Code
#print(train_acc_hist)
#plt.plot([train_acc_hist, valid_acc_hist])
t = np.linspace(1, EPOCHS,EPOCHS)
zz= np.zeros(EPOCHS)
ff= zz+0.04
fig, ax = plt.subplots(figsize=(10, 6))
line1, = plt.plot(t, train_err_hist, 'b-', label='train_err')
line2, = plt.plot(t, valid_err_hist, 'r--', label='valid_err')
line3, = plt.plot(t, zz, 'g' )
line4, = plt.plot(t, ff, 'g' )
ax.set_xlabel('epochs')
ax.set_ylabel('error (1-accuracy) in %')
ax.set_title('Learning Curves')
ax.legend(loc='upper right')
fig.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Load and Output the Images
###Code
### Load the images and plot them here.
### Feel free to use as many code cells as needed.
import cv2
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import numpy as np
def preprocess(filepath):
img = cv2.imread(filepath) #(('./verkehrszeichen/tempo30.jpg')
#print(img.shape)
#plt.figure(figsize=(1,1))
#plt.imshow(img)
img_small= cv2.resize(img, (32,32))
#X_test_g = np.zeros((X_test.shape[0],X_test.shape[1],X_test.shape[2],1))
#X_test_hg = np.zeros((X_test.shape[0],X_test.shape[1],X_test.shape[2]))
#for i in range(X_test.shape[0]):
# X_test_g[i,:,:,0]=cv2.cvtColor(X_test[i,:,:,:], cv2.COLOR_BGR2GRAY)
# X_test_hg[i,:,:]=cv2.equalizeHist(X_test_g[i,:,:] .astype(np.uint8))
#X_test=X_test_hg
img_small_g = np.zeros((32,32,1))
img_small_g[:,:,0]=cv2.cvtColor(img_small, cv2.COLOR_BGR2GRAY)
img_small_hg = np.zeros((32,32,1))
img_small_hg[:,:,0]=cv2.equalizeHist(img_small_g[:,:,0] .astype(np.uint8))
img_small_hg= (img_small_hg.astype(np.float32) -128)/128
plt.figure(figsize=(1,1))
plt.imshow(img_small_hg[:,:,0], cmap="gray")
return img_small_hg
def eval_leNet(img):
import tensorflow as tf
with tf.Session() as sess:
#saver = tf.train.import_meta_graph('./lenet.meta')
saver.restore(sess, './lenet')
values,indices = sess.run(tf.nn.top_k(tf.nn.softmax(logits),3), feed_dict={x: [img],keep_prob: 1.0})
training_accuracy = evaluate(X_train, y_train)
return values,indices
#--- define realworld images
y_realworld=np.array([1,38,14,4,9,17,13,12]) # the true indices of the following images
mytable=np.zeros((y_realworld.shape[0], 7)) # table to store the softmax probs, the indices and true index
myindex=0
tempo30_scaled = preprocess('./verkehrszeichen/tempo30.jpg')
values,indices = eval_leNet(tempo30_scaled)
mytable[myindex,0:3]=values
mytable[myindex,3:6]=indices
mytable[myindex,6]=y_realworld[myindex]
myindex=myindex+1
rechts_vorbei_scaled = preprocess('./verkehrszeichen/rechts_vorbei.jpg')
values,indices =eval_leNet(rechts_vorbei_scaled)
mytable[myindex,0:3]=values
mytable[myindex,3:6]=indices
mytable[myindex,6]=y_realworld[myindex]
myindex=myindex+1
stop_scaled = preprocess('./verkehrszeichen/stop.jpg')
values,indices =eval_leNet(stop_scaled)
mytable[myindex,0:3]=values
mytable[myindex,3:6]=indices
mytable[myindex,6]=y_realworld[myindex]
myindex=myindex+1
tempo70_scaled = preprocess('./verkehrszeichen/tempo70.jpg')
values,indices =eval_leNet(tempo70_scaled)
mytable[myindex,0:3]=values
mytable[myindex,3:6]=indices
mytable[myindex,6]=y_realworld[myindex]
myindex=myindex+1
ueberholverbot_scaled = preprocess('./verkehrszeichen/ueberholverbot.jpg')
values,indices =eval_leNet(ueberholverbot_scaled)
mytable[myindex,0:3]=values
mytable[myindex,3:6]=indices
mytable[myindex,6]=y_realworld[myindex]
myindex=myindex+1
einfahrt_verboten_scaled = preprocess('./verkehrszeichen/einfahrt_verboten.jpg')
values,indices =eval_leNet(einfahrt_verboten_scaled)
mytable[myindex,0:3]=values
mytable[myindex,3:6]=indices
mytable[myindex,6]=y_realworld[myindex]
myindex=myindex+1
vorfahrt_achten_scaled = preprocess('./verkehrszeichen/vorfahrt_achten.jpg')
values,indices =eval_leNet(vorfahrt_achten_scaled)
mytable[myindex,0:3]=values
mytable[myindex,3:6]=indices
mytable[myindex,6]=y_realworld[myindex]
myindex=myindex+1
vorfahrtstrasse_scaled = preprocess('./verkehrszeichen/vorfahrtstrasse.jpg')
values,indices =eval_leNet(vorfahrtstrasse_scaled)
mytable[myindex,0:3]=values
mytable[myindex,3:6]=indices
mytable[myindex,6]=y_realworld[myindex]
myindex=myindex+1
#----------
print()
print('example of German traffic sign not in the list of 43 signs:')
h3p5m_scaled = preprocess('./verkehrszeichen/3p5m.jpg')
print()
print(eval_leNet(h3p5m_scaled))
#---------
X_realworld=np.array([tempo30_scaled,
rechts_vorbei_scaled,
stop_scaled,
tempo70_scaled,
ueberholverbot_scaled,
einfahrt_verboten_scaled,
vorfahrt_achten_scaled,
vorfahrtstrasse_scaled
])
#h3p5m_scaled = preprocess('./verkehrszeichen/3p5m.jpg')
#print(eval_leNet(h3p5m_scaled))
np.set_printoptions(precision=2,suppress = True)
print(mytable)
#print(X_realworld.shape)
#print(y_realworld.shape)
with tf.Session() as sess:
#saver = tf.train.import_meta_graph('./lenet.meta')
saver.restore(sess, './lenet')
print()
print('real world acc:'+str(evaluate(X_realworld,y_realworld)))
###Output
INFO:tensorflow:Restoring parameters from ./lenet
INFO:tensorflow:Restoring parameters from ./lenet
INFO:tensorflow:Restoring parameters from ./lenet
INFO:tensorflow:Restoring parameters from ./lenet
INFO:tensorflow:Restoring parameters from ./lenet
INFO:tensorflow:Restoring parameters from ./lenet
INFO:tensorflow:Restoring parameters from ./lenet
INFO:tensorflow:Restoring parameters from ./lenet
example of German traffic sign not in the list of 43 signs:
INFO:tensorflow:Restoring parameters from ./lenet
(array([[0.8203516 , 0.0333126 , 0.02103482]], dtype=float32), array([[40, 21, 31]], dtype=int32))
[[ 0.82 0.17 0. 38. 14. 36. 1. ]
[ 0.52 0.19 0.08 39. 13. 14. 38. ]
[ 0.99 0. 0. 14. 33. 36. 14. ]
[ 0.96 0.03 0.01 1. 18. 31. 4. ]
[ 0.55 0.27 0.08 8. 13. 15. 9. ]
[ 1. 0. 0. 17. 14. 32. 17. ]
[ 1. 0. 0. 13. 12. 35. 13. ]
[ 1. 0. 0. 12. 40. 17. 12. ]]
INFO:tensorflow:Restoring parameters from ./lenet
real world acc:0.5
###Markdown
Predict the Sign Type for Each Image
###Code
### Run the predictions here and use the model to output the prediction for each image.
### Make sure to pre-process the images with the same pre-processing pipeline used earlier.
### Feel free to use as many code cells as needed.
values.shape
mytable
###Output
_____no_output_____
###Markdown
Analyze Performance Output Top 5 Softmax Probabilities For Each Image Found on the Web For each of the new images, print out the model's softmax probabilities to show the **certainty** of the model's predictions (limit the output to the top 5 probabilities for each image). [`tf.nn.top_k`](https://www.tensorflow.org/versions/r0.12/api_docs/python/nn.htmltop_k) could prove helpful here. The example below demonstrates how tf.nn.top_k can be used to find the top k predictions for each image.`tf.nn.top_k` will return the values and indices (class ids) of the top k predictions. So if k=3, for each sign, it'll return the 3 largest probabilities (out of a possible 43) and the correspoding class ids.Take this numpy array as an example. The values in the array represent predictions. The array contains softmax probabilities for five candidate images with six possible classes. `tf.nn.top_k` is used to choose the three classes with the highest probability:``` (5, 6) arraya = np.array([[ 0.24879643, 0.07032244, 0.12641572, 0.34763842, 0.07893497, 0.12789202], [ 0.28086119, 0.27569815, 0.08594638, 0.0178669 , 0.18063401, 0.15899337], [ 0.26076848, 0.23664738, 0.08020603, 0.07001922, 0.1134371 , 0.23892179], [ 0.11943333, 0.29198961, 0.02605103, 0.26234032, 0.1351348 , 0.16505091], [ 0.09561176, 0.34396535, 0.0643941 , 0.16240774, 0.24206137, 0.09155967]])```Running it through `sess.run(tf.nn.top_k(tf.constant(a), k=3))` produces:```TopKV2(values=array([[ 0.34763842, 0.24879643, 0.12789202], [ 0.28086119, 0.27569815, 0.18063401], [ 0.26076848, 0.23892179, 0.23664738], [ 0.29198961, 0.26234032, 0.16505091], [ 0.34396535, 0.24206137, 0.16240774]]), indices=array([[3, 0, 5], [0, 1, 4], [0, 5, 1], [1, 3, 5], [1, 4, 3]], dtype=int32))```Looking just at the first row we get `[ 0.34763842, 0.24879643, 0.12789202]`, you can confirm these are the 3 largest probabilities in `a`. You'll also notice `[3, 0, 5]` are the corresponding indices. Project WriteupOnce you have completed the code implementation, document your results in a project writeup using this [template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) as a guide. The writeup can be in a markdown or pdf file. > **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to \n", "**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission. --- Step 4 (Optional): Visualize the Neural Network's State with Test Images This Section is not required to complete but acts as an additional excersise for understaning the output of a neural network's weights. While neural networks can be a great learning device they are often referred to as a black box. We can understand what the weights of a neural network look like better by plotting their feature maps. After successfully training your neural network you can see what it's feature maps look like by plotting the output of the network's weight layers in response to a test stimuli image. From these plotted feature maps, it's possible to see what characteristics of an image the network finds interesting. For a sign, maybe the inner network feature maps react with high activation to the sign's boundary outline or to the contrast in the sign's painted symbol. Provided for you below is the function code that allows you to get the visualization output of any tensorflow weight layer you want. The inputs to the function should be a stimuli image, one used during training or a new one you provided, and then the tensorflow variable name that represents the layer's state during the training process, for instance if you wanted to see what the [LeNet lab's](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) feature maps looked like for it's second convolutional layer you could enter conv2 as the tf_activation variable.For an example of what feature map outputs look like, check out NVIDIA's results in their paper [End-to-End Deep Learning for Self-Driving Cars](https://devblogs.nvidia.com/parallelforall/deep-learning-self-driving-cars/) in the section Visualization of internal CNN State. NVIDIA was able to show that their network's inner weights had high activations to road boundary lines by comparing feature maps from an image with a clear path to one without. Try experimenting with a similar test to show that your trained network's weights are looking for interesting features, whether it's looking at differences in feature maps from images with or without a sign, or even what feature maps look like in a trained network vs a completely untrained one on the same sign image. Your output should look something like this (above)
###Code
myimage= stop_scaled #rechts_vorbei_scaled
plt.figure(figsize=(1,1))
plt.imshow(myimage[:,:,0], cmap="gray")
### Visualize your network's feature maps here.
### Feel free to use as many code cells as needed.
# image_input: the test image being fed into the network to produce the feature maps
# tf_activation: should be a tf variable name used during your training procedure that represents the calculated state of a specific weight layer
# activation_min/max: can be used to view the activation contrast in more detail, by default matplot sets min and max to the actual min and max values of the output
# plt_num: used to plot out multiple different weight feature map sets on the same block, just extend the plt number for each new feature map entry
def outputFeatureMap(image_input, tf_activation, activation_min=-1, activation_max=-1 ,plt_num=1):
# Here make sure to preprocess your image_input in a way your network expects
# with size, normalization, ect if needed
# image_input =
# Note: x should be the same name as your network's tensorflow data placeholder variable
# If you get an error tf_activation is not defined it may be having trouble accessing the variable from inside a function
activation = tf_activation.eval(session=sess,feed_dict={x : [image_input]})
featuremaps = activation.shape[3]
plt.figure(plt_num, figsize=(15,15))
for featuremap in range(featuremaps):
plt.subplot(6,8, featuremap+1) # sets the number of feature maps to show on each row and column
plt.title('FeatureMap ' + str(featuremap)) # displays the feature map number
if activation_min != -1 & activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin =activation_min, vmax=activation_max, cmap="gray")
elif activation_max != -1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmax=activation_max, cmap="gray")
elif activation_min !=-1:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", vmin=activation_min, cmap="gray")
else:
plt.imshow(activation[0,:,:, featuremap], interpolation="nearest", cmap="gray")
with tf.Session() as sess:
saver.restore(sess, './lenet')
outputFeatureMap(myimage,ap1)
###Output
INFO:tensorflow:Restoring parameters from ./lenet
|
improved_contrastive_divergence_v6_cycsgld_celeba.ipynb | ###Markdown
Mounting to Google Drive
###Code
!pip install geomloss
!pip install torchmetrics[image]
# from google.colab import drive
# import os
# drive.mount('/content/drive')
# ROOT = "/content/drive/MyDrive/Colab Notebooks"
# sample_dir = os.path.join(ROOT, 'improved_contrastive_divergence.v6')
# if not os.path.exists(sample_dir):
# os.makedirs(sample_dir)
# os.chdir(sample_dir)
import os
ROOT = "/workspace/EBM/"
sample_dir = os.path.join(ROOT, 'iccd.v6')
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
os.chdir(sample_dir)
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
###Output
_____no_output_____
###Markdown
Dependencies
###Code
from easydict import EasyDict
from tqdm import tqdm
import time
import timeit
import os.path as osp
import pandas as pd
from PIL import Image
import pickle
from imageio import imread
import cv2
import scipy.spatial as ss
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.data import Dataset
import torchvision
import torch
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision.datasets import MNIST
from torch.nn import Dropout
from torch.optim import Adam, SGD
import torch.nn.functional as F
from torch.nn.utils import clip_grad_norm_
from torchvision import models
from torchmetrics import IS, FID
import numpy as np
import random
import matplotlib.pyplot as plt
from scipy import linalg
from math import exp, log
from geomloss import SamplesLoss
from autograd.numpy import sqrt, sin, cos, exp, pi, prod
from autograd.numpy.random import normal
from collections import OrderedDict
%load_ext tensorboard
###Output
_____no_output_____
###Markdown
Configuration
###Code
flags = EasyDict()
# Configurations for distributed training
flags['slurm'] = False # whether we are on slurm
flags['repel_im'] = True # maximize entropy by repeling images from each other
flags['hmc'] = False # use the hamiltonian monte carlo sampler
flags['sampler'] = 'cycsgld' # use the adaptively precondition SGLD sampler
flags['square_energy'] = False # make the energy square
flags['alias'] = False # make the energy square
flags['cpu'] = torch.device("cpu")
flags['gpu'] = torch.device("cuda:0")
flags['dataset'] = 'celeba' # cifar10 or celeba
flags['batch_size'] = 128 #128 # batch size during training
flags['multiscale'] = False # A multiscale EBM
flags['self_attn'] = True #Use self attention in models
flags['sigmoid'] = False # Apply sigmoid on energy (can improve the stability)
flags['anneal'] = False # Decrease noise over Langevin steps
flags['data_workers'] = 4 # Number of different data workers to load data in parallel
flags['buffer_size'] = 10000 # Size of inputs
# General Experiment Settings
flags['exp'] = 'cycsgld_celeba' #name of experiments
flags['log_interval'] = 100 #log outputs every so many batches
flags['save_interval'] = 500 # save outputs every so many batches
flags['test_interval'] = 500 # evaluate outputs every so many batches
flags['resume_iter'] = 0 #iteration to resume training from
flags['train'] = True # whether to train or test
flags['transform'] = True # apply data augmentation when sampling from the replay buffer
flags['kl'] = True # apply a KL term to loss
flags['entropy'] = 'kl'
flags['cuda'] = True # move device on cuda
flags['epoch_num'] = 10 # Number of Epochs to train on
flags['ensembles'] = 1 #Number of ensembles to train models with
flags['lr'] = 2e-4 #Learning for training
flags['kl_coeff'] = 1.0 #coefficient for kl
# EBM Specific Experiments Settings
flags['objective'] = 'cd' #use the cd objective
# Setting for MCMC sampling
flags['num_steps'] = 40 # Steps of gradient descent for training
flags['step_lr'] = 20.5 # Size of steps for gradient descent
flags['replay_batch'] = True # Use MCMC chains initialized from a replay buffer.
flags['reservoir'] = True # Use a reservoir of past entires
flags['noise_scale'] = 0.23 # Relative amount of noise for MCMC
flags['init_noise'] = 0.1
flags['momentum'] = 0.9
flags['eps'] = 1e-6
flags['step_size'] = 10
# Architecture Settings
flags['filter_dim'] = 64 #64 #number of filters for conv nets
flags['im_size'] = 32 #32 #size of images
flags['spec_norm'] = False #Whether to use spectral normalization on weights
flags['norm'] = True #Use group norm in models norm in models
# Conditional settings
flags['cond'] = False #conditional generation with the model
flags['all_step'] = False #backprop through all langevin steps
flags['log_grad'] = False #log the gradient norm of the kl term
flags['cond_idx'] = 0 #conditioned index
writer = SummaryWriter(comment="_{sampler}_{entropy}_{dataset}_{step_lr}_{noise_scale}".format(dataset=flags.dataset, entropy=flags.entropy, sampler=flags.sampler, step_lr=flags.step_lr, noise_scale=flags.noise_scale))
inception = IS().to(flags.gpu, non_blocking=True)
fid = FID(feature=2048).to(flags.gpu, non_blocking=True)
# kid = KID(subset_size=50)
###Output
_____no_output_____
###Markdown
Utils
###Code
# Functions for adaptations with PyTorch:
def to_np_array(*arrays):
"""Transform torch tensors/Variables into numpy arrays"""
array_list = []
for array in arrays:
if isinstance(array, Variable):
if array.is_cuda:
array = array.cpu()
array = array.data
if isinstance(array, torch.FloatTensor) or isinstance(array, torch.LongTensor) or isinstance(array, torch.ByteTensor) or isinstance(array, torch.cuda.FloatTensor) or isinstance(array, torch.cuda.LongTensor) or isinstance(array, torch.cuda.ByteTensor):
if array.is_cuda:
array = array.cpu()
array = array.numpy()
array_list.append(array)
if len(array_list) == 1:
array_list = array_list[0]
return array_list
def kldiv(x, xp, k=3, base=2):
""" KL Divergence between p and q for x~p(x), xp~q(x)
x, xp should be a list of vectors, e.g. x = [[1.3], [3.7], [5.1], [2.4]]
if x is a one-dimensional scalar and we have four samples
"""
assert k <= len(x) - 1, "Set k smaller than num. samples - 1"
assert k <= len(xp) - 1, "Set k smaller than num. samples - 1"
assert len(x[0]) == len(xp[0]), "Two distributions must have same dim."
x, xp = to_np_array(x, xp)
d = len(x[0])
n = len(x)
m = len(xp)
const = log(m) - log(n - 1)
tree = ss.cKDTree(x)
treep = ss.cKDTree(xp)
nn = [tree.query(point, k + 1, p=float('inf'))[0][k] for point in x]
nnp = [treep.query(point, k, p=float('inf'))[0][k - 1] for point in x]
return (const + d * np.mean(np.log(nnp)) - d * np.mean(np.log(nn))) / log(base)
def swish(x):
return x * torch.sigmoid(x)
class WSConv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super(WSConv2d, self).__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias)
def forward(self, x):
weight = self.weight
weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2,
keepdim=True).mean(dim=3, keepdim=True)
weight = weight - weight_mean
std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1) + 1e-5
weight = weight / std.expand_as(weight)
return F.conv2d(x, weight, self.bias, self.stride,
self.padding, self.dilation, self.groups)
def compress_x_mod(x_mod):
x_mod = (255 * np.clip(x_mod, 0, 1)).astype(np.uint8)
return x_mod
def decompress_x_mod(x_mod):
x_mod = x_mod / 256 + \
np.random.uniform(0, 1 / 256, x_mod.shape)
return x_mod
def ema_model(models, models_ema, mu=0.99):
for model, model_ema in zip(models, models_ema):
for param, param_ema in zip(model.parameters(), model_ema.parameters()):
param_ema.data[:] = mu * param_ema.data + (1 - mu) * param.data
###Output
_____no_output_____
###Markdown
Downsample
###Code
class Downsample(nn.Module):
def __init__(self, pad_type='reflect', filt_size=3, stride=2, channels=None, pad_off=0):
super(Downsample, self).__init__()
self.filt_size = filt_size
self.pad_off = pad_off
self.pad_sizes = [int(1.*(filt_size-1)/2), int(np.ceil(1.*(filt_size-1)/2)), int(1.*(filt_size-1)/2), int(np.ceil(1.*(filt_size-1)/2))]
self.pad_sizes = [pad_size+pad_off for pad_size in self.pad_sizes]
self.stride = stride
self.off = int((self.stride-1)/2.)
self.channels = channels
if(self.filt_size==1):
a = np.array([1.,])
elif(self.filt_size==2):
a = np.array([1., 1.])
elif(self.filt_size==3):
a = np.array([1., 2., 1.])
elif(self.filt_size==4):
a = np.array([1., 3., 3., 1.])
elif(self.filt_size==5):
a = np.array([1., 4., 6., 4., 1.])
elif(self.filt_size==6):
a = np.array([1., 5., 10., 10., 5., 1.])
elif(self.filt_size==7):
a = np.array([1., 6., 15., 20., 15., 6., 1.])
filt = torch.Tensor(a[:,None]*a[None,:])
filt = filt/torch.sum(filt)
self.register_buffer('filt', filt[None,None,:,:].repeat((self.channels,1,1,1)))
self.pad = get_pad_layer(pad_type)(self.pad_sizes)
def forward(self, inp):
if(self.filt_size==1):
if(self.pad_off==0):
return inp[:,:,::self.stride,::self.stride]
else:
return self.pad(inp)[:,:,::self.stride,::self.stride]
else:
return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1])
def get_pad_layer(pad_type):
if(pad_type in ['refl','reflect']):
PadLayer = nn.ReflectionPad2d
elif(pad_type in ['repl','replicate']):
PadLayer = nn.ReplicationPad2d
elif(pad_type=='zero'):
PadLayer = nn.ZeroPad2d
else:
print('Pad type [%s] not recognized'%pad_type)
return PadLayer
###Output
_____no_output_____
###Markdown
Models
###Code
class Self_Attn(nn.Module):
""" Self attention Layer"""
def __init__(self,in_dim,activation):
super(Self_Attn,self).__init__()
self.chanel_in = in_dim
self.activation = activation
self.query_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1)
self.key_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1)
self.value_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim , kernel_size= 1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1) #
def forward(self,x):
"""
inputs :
x : input feature maps( B X C X W X H)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
"""
m_batchsize,C,width ,height = x.size()
proj_query = self.query_conv(x).view(m_batchsize,-1,width*height).permute(0,2,1) # B X CX(N)
proj_key = self.key_conv(x).view(m_batchsize,-1,width*height) # B X C x (*W*H)
energy = torch.bmm(proj_query,proj_key) # transpose check
attention = self.softmax(energy) # BX (N) X (N)
proj_value = self.value_conv(x).view(m_batchsize,-1,width*height) # B X C X N
out = torch.bmm(proj_value,attention.permute(0,2,1) )
out = out.view(m_batchsize,C,width,height)
out = self.gamma*out + x
return out,attention
class CondResBlock(nn.Module):
def __init__(self, args, downsample=True, rescale=True, filters=64, latent_dim=64, im_size=64, classes=512, norm=True, spec_norm=False):
super(CondResBlock, self).__init__()
self.filters = filters
self.latent_dim = latent_dim
self.im_size = im_size
self.downsample = downsample
if filters <= 128:
self.bn1 = nn.InstanceNorm2d(filters, affine=True)
else:
self.bn1 = nn.GroupNorm(32, filters)
if not norm:
self.bn1 = None
self.args = args
if spec_norm:
self.conv1 = spectral_norm(nn.Conv2d(filters, filters, kernel_size=3, stride=1, padding=1))
else:
self.conv1 = WSConv2d(filters, filters, kernel_size=3, stride=1, padding=1)
if filters <= 128:
self.bn2 = nn.InstanceNorm2d(filters, affine=True)
else:
self.bn2 = nn.GroupNorm(32, filters, affine=True)
if not norm:
self.bn2 = None
if spec_norm:
self.conv2 = spectral_norm(nn.Conv2d(filters, filters, kernel_size=3, stride=1, padding=1))
else:
self.conv2 = WSConv2d(filters, filters, kernel_size=3, stride=1, padding=1)
self.dropout = Dropout(0.2)
# Upscale to an mask of image
self.latent_map = nn.Linear(classes, 2*filters)
self.latent_map_2 = nn.Linear(classes, 2*filters)
self.relu = torch.nn.ReLU(inplace=True)
self.act = swish
# Upscale to mask of image
if downsample:
if rescale:
self.conv_downsample = nn.Conv2d(filters, 2 * filters, kernel_size=3, stride=1, padding=1)
if args.alias:
self.avg_pool = Downsample(channels=2*filters)
else:
self.avg_pool = nn.AvgPool2d(3, stride=2, padding=1)
else:
self.conv_downsample = nn.Conv2d(filters, filters, kernel_size=3, stride=1, padding=1)
if args.alias:
self.avg_pool = Downsample(channels=filters)
else:
self.avg_pool = nn.AvgPool2d(3, stride=2, padding=1)
def forward(self, x, y):
x_orig = x
if y is not None:
latent_map = self.latent_map(y).view(-1, 2*self.filters, 1, 1)
gain = latent_map[:, :self.filters]
bias = latent_map[:, self.filters:]
x = self.conv1(x)
if self.bn1 is not None:
x = self.bn1(x)
if y is not None:
x = gain * x + bias
x = self.act(x)
if y is not None:
latent_map = self.latent_map_2(y).view(-1, 2*self.filters, 1, 1)
gain = latent_map[:, :self.filters]
bias = latent_map[:, self.filters:]
x = self.conv2(x)
if self.bn2 is not None:
x = self.bn2(x)
if y is not None:
x = gain * x + bias
x = self.act(x)
x_out = x
if self.downsample:
x_out = self.conv_downsample(x_out)
x_out = self.act(self.avg_pool(x_out))
return x_out
###Output
_____no_output_____
###Markdown
MNIST Model
###Code
class MNISTModel(nn.Module):
def __init__(self, args):
super(MNISTModel, self).__init__()
self.act = swish
# self.relu = torch.nn.ReLU(inplace=True)
self.args = args
self.filter_dim = args.filter_dim
self.init_main_model()
self.init_label_map()
self.filter_dim = args.filter_dim
# self.act = self.relu
self.cond = args.cond
self.sigmoid = args.sigmoid
def init_main_model(self):
args = self.args
filter_dim = self.filter_dim
im_size = 28
self.conv1 = nn.Conv2d(1, filter_dim, kernel_size=3, stride=1, padding=1)
self.res1 = CondResBlock(args, filters=filter_dim, latent_dim=1, im_size=im_size)
self.res2 = CondResBlock(args, filters=2*filter_dim, latent_dim=1, im_size=im_size)
self.res3 = CondResBlock(args, filters=4*filter_dim, latent_dim=1, im_size=im_size)
self.energy_map = nn.Linear(filter_dim*8, 1)
def init_label_map(self):
args = self.args
self.map_fc1 = nn.Linear(10, 256)
self.map_fc2 = nn.Linear(256, 256)
def main_model(self, x, latent):
x = x.view(-1, 1, 28, 28)
x = self.act(self.conv1(x))
x = self.res1(x, latent)
x = self.res2(x, latent)
x = self.res3(x, latent)
x = self.act(x)
x = x.mean(dim=2).mean(dim=2)
energy = self.energy_map(x)
return energy
def label_map(self, latent):
x = self.act(self.map_fc1(latent))
x = self.map_fc2(x)
return x
def forward(self, x, latent):
args = self.args
x = x.view(x.size(0), -1)
if self.cond:
latent = self.label_map(latent)
else:
latent = None
energy = self.main_model(x, latent)
return energy
###Output
_____no_output_____
###Markdown
Standard CNN Model
###Code
class StandardCNN(nn.Module):
def __init__(self):
super(StandardCNN, self).__init__()
self.conv1 = nn.utils.spectral_norm(nn.Conv2d(3, 64, 3, 1, 1))
self.conv2 = nn.utils.spectral_norm(nn.Conv2d(64, 64, 4, 2, 1))
self.conv3 = nn.utils.spectral_norm(nn.Conv2d(64, 128, 3, 1, 1))
self.conv4 = nn.utils.spectral_norm(nn.Conv2d(128, 128, 4, 2, 1))
self.conv5 = nn.utils.spectral_norm(nn.Conv2d(128, 256, 3, 1, 1))
self.conv6 = nn.utils.spectral_norm(nn.Conv2d(256, 256, 4, 2, 1))
self.conv7 = nn.utils.spectral_norm(nn.Conv2d(256, 512, 3, 1, 1))
self.pool = nn.MaxPool2d(2, 2)
self.act = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.dense = nn.utils.spectral_norm(nn.Linear(512 * 4 * 4, 1))
def forward(self, x):
x = self.act(self.conv1(x))
x = self.act(self.conv2(x))
# x = self.pool(x)
x = self.act(self.conv3(x))
x = self.act(self.conv4(x))
# x = self.pool(x)
x = self.act(self.conv5(x))
x = self.act(self.conv6(x))
# x = self.pool(x)
x = self.act(self.conv7(x))
x = self.dense(x.view(x.shape[0], -1))
return x
###Output
_____no_output_____
###Markdown
CelebA Model
###Code
class CelebAModel(nn.Module):
def __init__(self, args, debug=False):
super(CelebAModel, self).__init__()
self.act = swish
self.max_pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.cond = args.cond
self.args = args
self.init_main_model()
if args.multiscale:
self.init_mid_model()
self.init_small_model()
self.relu = torch.nn.ReLU(inplace=True)
self.downsample = Downsample(channels=3)
self.heir_weight = nn.Parameter(torch.Tensor([1.0, 1.0, 1.0]))
self.debug = debug
def init_main_model(self):
args = self.args
filter_dim = args.filter_dim
latent_dim = args.filter_dim
im_size = args.im_size
self.conv1 = nn.Conv2d(3, filter_dim // 2, kernel_size=3, stride=1, padding=1)
self.res_1a = CondResBlock(args, filters=filter_dim // 2, latent_dim=latent_dim, im_size=im_size, downsample=True, classes=2, norm=args.norm, spec_norm=args.spec_norm)
self.res_1b = CondResBlock(args, filters=filter_dim, latent_dim=latent_dim, im_size=im_size, rescale=False, classes=2, norm=args.norm, spec_norm=args.spec_norm)
self.res_2a = CondResBlock(args, filters=filter_dim, latent_dim=latent_dim, im_size=im_size, downsample=True, rescale=False, classes=2, norm=args.norm, spec_norm=args.spec_norm)
self.res_2b = CondResBlock(args, filters=filter_dim, latent_dim=latent_dim, im_size=im_size, rescale=True, classes=2, norm=args.norm, spec_norm=args.spec_norm)
self.res_3a = CondResBlock(args, filters=2*filter_dim, latent_dim=latent_dim, im_size=im_size, downsample=False, classes=2, norm=args.norm, spec_norm=args.spec_norm)
self.res_3b = CondResBlock(args, filters=2*filter_dim, latent_dim=latent_dim, im_size=im_size, rescale=True, classes=2, norm=args.norm, spec_norm=args.spec_norm)
self.res_4a = CondResBlock(args, filters=4*filter_dim, latent_dim=latent_dim, im_size=im_size, downsample=False, classes=2, norm=args.norm, spec_norm=args.spec_norm)
self.res_4b = CondResBlock(args, filters=4*filter_dim, latent_dim=latent_dim, im_size=im_size, rescale=True, classes=2, norm=args.norm, spec_norm=args.spec_norm)
self.self_attn = Self_Attn(4 * filter_dim, self.act)
self.energy_map = nn.Linear(filter_dim*8, 1)
def init_mid_model(self):
args = self.args
filter_dim = args.filter_dim
latent_dim = args.filter_dim
im_size = args.im_size
self.mid_conv1 = nn.Conv2d(3, filter_dim, kernel_size=3, stride=1, padding=1)
self.mid_res_1a = CondResBlock(args, filters=filter_dim, latent_dim=latent_dim, im_size=im_size, downsample=True, rescale=False, classes=2)
self.mid_res_1b = CondResBlock(args, filters=filter_dim, latent_dim=latent_dim, im_size=im_size, rescale=False, classes=2)
self.mid_res_2a = CondResBlock(args, filters=filter_dim, latent_dim=latent_dim, im_size=im_size, downsample=True, rescale=False, classes=2)
self.mid_res_2b = CondResBlock(args, filters=filter_dim, latent_dim=latent_dim, im_size=im_size, rescale=True, classes=2)
self.mid_res_3a = CondResBlock(args, filters=2*filter_dim, latent_dim=latent_dim, im_size=im_size, downsample=False, classes=2)
self.mid_res_3b = CondResBlock(args, filters=2*filter_dim, latent_dim=latent_dim, im_size=im_size, rescale=True, classes=2)
self.mid_energy_map = nn.Linear(filter_dim*4, 1)
self.avg_pool = Downsample(channels=3)
def init_small_model(self):
args = self.args
filter_dim = args.filter_dim
latent_dim = args.filter_dim
im_size = args.im_size
self.small_conv1 = nn.Conv2d(3, filter_dim, kernel_size=3, stride=1, padding=1)
self.small_res_1a = CondResBlock(args, filters=filter_dim, latent_dim=latent_dim, im_size=im_size, downsample=True, rescale=False, classes=2)
self.small_res_1b = CondResBlock(args, filters=filter_dim, latent_dim=latent_dim, im_size=im_size, rescale=False, classes=2)
self.small_res_2a = CondResBlock(args, filters=filter_dim, latent_dim=latent_dim, im_size=im_size, downsample=True, rescale=False, classes=2)
self.small_res_2b = CondResBlock(args, filters=filter_dim, latent_dim=latent_dim, im_size=im_size, rescale=True, classes=2)
self.small_energy_map = nn.Linear(filter_dim*2, 1)
def main_model(self, x, latent):
x = self.act(self.conv1(x))
x = self.res_1a(x, latent)
x = self.res_1b(x, latent)
x = self.res_2a(x, latent)
x = self.res_2b(x, latent)
x = self.res_3a(x, latent)
x = self.res_3b(x, latent)
if self.args.self_attn:
x, _ = self.self_attn(x)
x = self.res_4a(x, latent)
x = self.res_4b(x, latent)
x = self.act(x)
x = x.mean(dim=2).mean(dim=2)
x = x.view(x.size(0), -1)
energy = self.energy_map(x)
if self.args.square_energy:
energy = torch.pow(energy, 2)
if self.args.sigmoid:
energy = F.sigmoid(energy)
return energy
def mid_model(self, x, latent):
x = F.avg_pool2d(x, 3, stride=2, padding=1)
x = self.act(self.mid_conv1(x))
x = self.mid_res_1a(x, latent)
x = self.mid_res_1b(x, latent)
x = self.mid_res_2a(x, latent)
x = self.mid_res_2b(x, latent)
x = self.mid_res_3a(x, latent)
x = self.mid_res_3b(x, latent)
x = self.act(x)
x = x.mean(dim=2).mean(dim=2)
x = x.view(x.size(0), -1)
energy = self.mid_energy_map(x)
if self.args.square_energy:
energy = torch.pow(energy, 2)
if self.args.sigmoid:
energy = F.sigmoid(energy)
return energy
def small_model(self, x, latent):
x = F.avg_pool2d(x, 3, stride=2, padding=1)
x = F.avg_pool2d(x, 3, stride=2, padding=1)
x = self.act(self.small_conv1(x))
x = self.small_res_1a(x, latent)
x = self.small_res_1b(x, latent)
x = self.small_res_2a(x, latent)
x = self.small_res_2b(x, latent)
x = self.act(x)
x = x.mean(dim=2).mean(dim=2)
x = x.view(x.size(0), -1)
energy = self.small_energy_map(x)
if self.args.square_energy:
energy = torch.pow(energy, 2)
if self.args.sigmoid:
energy = F.sigmoid(energy)
return energy
def label_map(self, latent):
x = self.act(self.map_fc1(latent))
x = self.act(self.map_fc2(x))
x = self.act(self.map_fc3(x))
x = self.act(self.map_fc4(x))
return x
def forward(self, x, latent):
args = self.args
if not self.cond:
latent = None
energy = self.main_model(x, latent)
if args.multiscale:
large_energy = energy
mid_energy = self.mid_model(x, latent)
small_energy = self.small_model(x, latent)
energy = torch.cat([small_energy, mid_energy, large_energy], dim=-1)
return energy
###Output
_____no_output_____
###Markdown
ResNet Model
###Code
class ResNetModel(nn.Module):
def __init__(self, args):
super(ResNetModel, self).__init__()
self.act = swish
self.args = args
self.spec_norm = args.spec_norm
self.norm = args.norm
self.init_main_model()
if args.multiscale:
self.init_mid_model()
self.init_small_model()
self.relu = torch.nn.ReLU(inplace=True)
self.downsample = Downsample(channels=3)
self.cond = args.cond
def init_main_model(self):
args = self.args
filter_dim = args.filter_dim
latent_dim = args.filter_dim
im_size = args.im_size
self.conv1 = nn.Conv2d(3, filter_dim, kernel_size=3, stride=1, padding=1)
self.res_1a = CondResBlock(args, filters=filter_dim, latent_dim=latent_dim, im_size=im_size, downsample=False, spec_norm=self.spec_norm, norm=self.norm)
self.res_1b = CondResBlock(args, filters=filter_dim, latent_dim=latent_dim, im_size=im_size, rescale=False, spec_norm=self.spec_norm, norm=self.norm)
self.res_2a = CondResBlock(args, filters=filter_dim, latent_dim=latent_dim, im_size=im_size, downsample=False, spec_norm=self.spec_norm, norm=self.norm)
self.res_2b = CondResBlock(args, filters=filter_dim, latent_dim=latent_dim, im_size=im_size, rescale=True, spec_norm=self.spec_norm, norm=self.norm)
self.res_3a = CondResBlock(args, filters=2*filter_dim, latent_dim=latent_dim, im_size=im_size, downsample=False, spec_norm=self.spec_norm, norm=self.norm)
self.res_3b = CondResBlock(args, filters=2*filter_dim, latent_dim=latent_dim, im_size=im_size, rescale=True, spec_norm=self.spec_norm, norm=self.norm)
self.res_4a = CondResBlock(args, filters=4*filter_dim, latent_dim=latent_dim, im_size=im_size, downsample=False, spec_norm=self.spec_norm, norm=self.norm)
self.res_4b = CondResBlock(args, filters=4*filter_dim, latent_dim=latent_dim, im_size=im_size, rescale=True, spec_norm=self.spec_norm, norm=self.norm)
self.self_attn = Self_Attn(2 * filter_dim, self.act)
self.energy_map = nn.Linear(filter_dim*8, 1)
def init_mid_model(self):
args = self.args
filter_dim = args.filter_dim
latent_dim = args.filter_dim
im_size = args.im_size
self.mid_conv1 = nn.Conv2d(3, filter_dim, kernel_size=3, stride=1, padding=1)
self.mid_res_1a = CondResBlock(args, filters=filter_dim, latent_dim=latent_dim, im_size=im_size, downsample=False, spec_norm=self.spec_norm, norm=self.norm)
self.mid_res_1b = CondResBlock(args, filters=filter_dim, latent_dim=latent_dim, im_size=im_size, rescale=False, spec_norm=self.spec_norm, norm=self.norm)
self.mid_res_2a = CondResBlock(args, filters=filter_dim, latent_dim=latent_dim, im_size=im_size, downsample=False, spec_norm=self.spec_norm, norm=self.norm)
self.mid_res_2b = CondResBlock(args, filters=filter_dim, latent_dim=latent_dim, im_size=im_size, rescale=True, spec_norm=self.spec_norm, norm=self.norm)
self.mid_res_3a = CondResBlock(args, filters=2*filter_dim, latent_dim=latent_dim, im_size=im_size, downsample=False, spec_norm=self.spec_norm, norm=self.norm)
self.mid_res_3b = CondResBlock(args, filters=2*filter_dim, latent_dim=latent_dim, im_size=im_size, rescale=True, spec_norm=self.spec_norm, norm=self.norm)
self.mid_energy_map = nn.Linear(filter_dim*4, 1)
self.avg_pool = Downsample(channels=3)
def init_small_model(self):
args = self.args
filter_dim = args.filter_dim
latent_dim = args.filter_dim
im_size = args.im_size
self.small_conv1 = nn.Conv2d(3, filter_dim, kernel_size=3, stride=1, padding=1)
self.small_res_1a = CondResBlock(args, filters=filter_dim, latent_dim=latent_dim, im_size=im_size, downsample=False, spec_norm=self.spec_norm, norm=self.norm)
self.small_res_1b = CondResBlock(args, filters=filter_dim, latent_dim=latent_dim, im_size=im_size, rescale=False, spec_norm=self.spec_norm, norm=self.norm)
self.small_res_2a = CondResBlock(args, filters=filter_dim, latent_dim=latent_dim, im_size=im_size, downsample=False, spec_norm=self.spec_norm, norm=self.norm)
self.small_res_2b = CondResBlock(args, filters=filter_dim, latent_dim=latent_dim, im_size=im_size, rescale=True, spec_norm=self.spec_norm, norm=self.norm)
self.small_energy_map = nn.Linear(filter_dim*2, 1)
def main_model(self, x, latent, compute_feat=False):
x = self.act(self.conv1(x))
x = self.res_1a(x, latent)
x = self.res_1b(x, latent)
x = self.res_2a(x, latent)
x = self.res_2b(x, latent)
if self.args.self_attn:
x, _ = self.self_attn(x)
x = self.res_3a(x, latent)
x = self.res_3b(x, latent)
x = self.res_4a(x, latent)
x = self.res_4b(x, latent)
x = self.act(x)
x = x.mean(dim=2).mean(dim=2)
if compute_feat:
return x
x = x.view(x.size(0), -1)
energy = self.energy_map(x)
if self.args.square_energy:
energy = torch.pow(energy, 2)
if self.args.sigmoid:
energy = F.sigmoid(energy)
return energy
def mid_model(self, x, latent):
x = F.avg_pool2d(x, 3, stride=2, padding=1)
x = self.act(self.mid_conv1(x))
x = self.mid_res_1a(x, latent)
x = self.mid_res_1b(x, latent)
x = self.mid_res_2a(x, latent)
x = self.mid_res_2b(x, latent)
x = self.mid_res_3a(x, latent)
x = self.mid_res_3b(x, latent)
x = self.act(x)
x = x.mean(dim=2).mean(dim=2)
x = x.view(x.size(0), -1)
energy = self.mid_energy_map(x)
if self.args.square_energy:
energy = torch.pow(energy, 2)
if self.args.sigmoid:
energy = F.sigmoid(energy)
return energy
def small_model(self, x, latent):
x = F.avg_pool2d(x, 3, stride=2, padding=1)
x = F.avg_pool2d(x, 3, stride=2, padding=1)
x = self.act(self.small_conv1(x))
x = self.small_res_1a(x, latent)
x = self.small_res_1b(x, latent)
x = self.small_res_2a(x, latent)
x = self.small_res_2b(x, latent)
x = self.act(x)
x = x.mean(dim=2).mean(dim=2)
x = x.view(x.size(0), -1)
energy = self.small_energy_map(x)
if self.args.square_energy:
energy = torch.pow(energy, 2)
if self.args.sigmoid:
energy = F.sigmoid(energy)
return energy
def forward(self, x, latent):
args = self.args
if self.cond:
latent = self.label_map(latent)
else:
latent = None
energy = self.main_model(x, latent)
if args.multiscale:
large_energy = energy
mid_energy = self.mid_model(x, latent)
small_energy = self.small_model(x, latent)
# Add a seperate energy penalizing the different energies from each model
energy = torch.cat([small_energy, mid_energy, large_energy], dim=-1)
return energy
def compute_feat(self, x, latent):
return self.main_model(x, None, compute_feat=True)
###Output
_____no_output_____
###Markdown
Replay Buffer
###Code
class GaussianBlur(object):
def __init__(self, min=0.1, max=2.0, kernel_size=9):
self.min = min
self.max = max
self.kernel_size = kernel_size
def __call__(self, sample):
sample = np.array(sample)
# blur the image with a 50% chance
prob = np.random.random_sample()
if prob < 0.5:
sigma = (self.max - self.min) * np.random.random_sample() + self.min
sample = cv2.GaussianBlur(sample, (self.kernel_size, self.kernel_size), sigma)
return sample
class ReplayBuffer(object):
def __init__(self, size, transform, dataset):
"""Create Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._storage = []
self._maxsize = size
self._next_idx = 0
self.gaussian_blur = GaussianBlur()
def get_color_distortion(s=1.0):
# s is the strength of color distortion.
color_jitter = transforms.ColorJitter(0.8*s, 0.8*s, 0.8*s, 0.4*s)
rnd_color_jitter = transforms.RandomApply([color_jitter], p=0.8)
rnd_gray = transforms.RandomGrayscale(p=0.2)
color_distort = transforms.Compose([
rnd_color_jitter,
rnd_gray])
return color_distort
color_transform = get_color_distortion()
if dataset in ("cifar10", "celeba", "cats"):
im_size = 32
elif dataset == "continual":
im_size = 64
elif dataset == "celebahq":
im_size = 128
elif dataset == "object":
im_size = 128
elif dataset == "mnist":
im_size = 28
elif dataset == "moving_mnist":
im_size = 28
elif dataset == "imagenet":
im_size = 128
elif dataset == "lsun":
im_size = 128
else:
assert False
self.dataset = dataset
if transform:
if dataset in ("cifar10", "celeba", "cats"):
self.transform = transforms.Compose([transforms.RandomResizedCrop(im_size, scale=(0.08, 1.0)), transforms.RandomHorizontalFlip(), color_transform, transforms.ToTensor()])
elif dataset == "continual":
color_transform = get_color_distortion(0.1)
self.transform = transforms.Compose([transforms.RandomResizedCrop(im_size, scale=(0.7, 1.0)), color_transform, transforms.ToTensor()])
elif dataset == "celebahq":
self.transform = transforms.Compose([transforms.RandomResizedCrop(im_size, scale=(0.08, 1.0)), transforms.RandomHorizontalFlip(), color_transform, transforms.ToTensor()])
elif dataset == "imagenet":
self.transform = transforms.Compose([transforms.RandomResizedCrop(im_size, scale=(0.01, 1.0)), transforms.RandomHorizontalFlip(), color_transform, transforms.ToTensor()])
elif dataset == "object":
self.transform = transforms.Compose([transforms.RandomResizedCrop(im_size, scale=(0.01, 1.0)), transforms.RandomHorizontalFlip(), color_transform, transforms.ToTensor()])
elif dataset == "lsun":
self.transform = transforms.Compose([transforms.RandomResizedCrop(im_size, scale=(0.08, 1.0)), transforms.RandomHorizontalFlip(), color_transform, transforms.ToTensor()])
elif dataset == "mnist":
self.transform = None
elif dataset == "moving_mnist":
self.transform = None
else:
assert False
else:
self.transform = None
def __len__(self):
return len(self._storage)
def add(self, ims):
batch_size = ims.shape[0]
if self._next_idx >= len(self._storage):
self._storage.extend(list(ims))
else:
if batch_size + self._next_idx < self._maxsize:
self._storage[self._next_idx:self._next_idx +
batch_size] = list(ims)
else:
split_idx = self._maxsize - self._next_idx
self._storage[self._next_idx:] = list(ims)[:split_idx]
self._storage[:batch_size - split_idx] = list(ims)[split_idx:]
self._next_idx = (self._next_idx + ims.shape[0]) % self._maxsize
def _encode_sample(self, idxes, no_transform=False, downsample=False):
ims = []
for i in idxes:
im = self._storage[i]
if self.dataset != "mnist":
if (self.transform is not None) and (not no_transform):
im = im.transpose((1, 2, 0))
im = np.array(self.transform(Image.fromarray(np.array(im))))
# if downsample and (self.dataset in ["celeba", "object", "imagenet"]):
# im = im[:, ::4, ::4]
im = im * 255
ims.append(im)
return np.array(ims)
def sample(self, batch_size, no_transform=False, downsample=False):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
idxes = [random.randint(0, len(self._storage) - 1)
for _ in range(batch_size)]
return self._encode_sample(idxes, no_transform=no_transform, downsample=downsample), idxes
def set_elms(self, data, idxes):
if len(self._storage) < self._maxsize:
self.add(data)
else:
for i, ix in enumerate(idxes):
self._storage[ix] = data[i]
class ReservoirBuffer(object):
def __init__(self, size, transform, dataset):
"""Create Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._storage = []
self._maxsize = size
self._next_idx = 0
self.n = 0
def get_color_distortion(s=1.0):
# s is the strength of color distortion.
color_jitter = transforms.ColorJitter(0.8*s, 0.8*s, 0.8*s, 0.4*s)
rnd_color_jitter = transforms.RandomApply([color_jitter], p=0.8)
rnd_gray = transforms.RandomGrayscale(p=0.2)
color_distort = transforms.Compose([
rnd_color_jitter,
rnd_gray])
return color_distort
if dataset in ("cifar10", "celeba", "cats"):
im_size = 32
elif dataset == "continual":
im_size = 64
elif dataset == "celeba":
im_size = 128
elif dataset == "object":
im_size = 128
elif dataset == "mnist":
im_size = 28
elif dataset == "moving_mnist":
im_size = 28
elif dataset == "imagenet":
im_size = 128
elif dataset == "lsun":
im_size = 128
elif dataset == "stl":
im_size = 48
else:
assert False
color_transform = get_color_distortion(0.5)
self.dataset = dataset
if transform:
if dataset in ("cifar10", "celeba", "cats"):
color_transform = get_color_distortion(1.0)
self.transform = transforms.Compose([transforms.RandomResizedCrop(im_size, scale=(0.08, 1.0)), transforms.RandomHorizontalFlip(), color_transform, transforms.ToTensor()])
# self.transform = transforms.Compose([transforms.RandomResizedCrop(im_size, scale=(0.03, 1.0)), transforms.RandomHorizontalFlip(), color_transform, GaussianBlur(kernel_size=5), transforms.ToTensor()])
elif dataset == "continual":
self.transform = transforms.Compose([transforms.RandomResizedCrop(im_size, scale=(0.08, 1.0)), transforms.RandomHorizontalFlip(), color_transform, GaussianBlur(kernel_size=5), transforms.ToTensor()])
elif dataset == "celeba":
self.transform = transforms.Compose([transforms.RandomResizedCrop(im_size, scale=(0.08, 1.0)), transforms.RandomHorizontalFlip(), color_transform, GaussianBlur(kernel_size=5), transforms.ToTensor()])
elif dataset == "imagenet":
self.transform = transforms.Compose([transforms.RandomResizedCrop(im_size, scale=(0.6, 1.0)), transforms.RandomHorizontalFlip(), color_transform, GaussianBlur(kernel_size=11), transforms.ToTensor()])
elif dataset == "lsun":
self.transform = transforms.Compose([transforms.RandomResizedCrop(im_size, scale=(0.08, 1.0)), transforms.RandomHorizontalFlip(), color_transform, GaussianBlur(kernel_size=5), transforms.ToTensor()])
elif dataset == "stl":
self.transform = transforms.Compose([transforms.RandomResizedCrop(im_size, scale=(0.04, 1.0)), transforms.RandomHorizontalFlip(), color_transform, GaussianBlur(kernel_size=11), transforms.ToTensor()])
elif dataset == "object":
self.transform = transforms.Compose([transforms.RandomResizedCrop(im_size, scale=(0.08, 1.0)), transforms.RandomHorizontalFlip(), color_transform, transforms.ToTensor()])
elif dataset == "mnist":
self.transform = None
elif dataset == "moving_mnist":
self.transform = None
else:
assert False
else:
self.transform = None
def __len__(self):
return len(self._storage)
def add(self, ims):
batch_size = ims.shape[0]
if self._next_idx >= len(self._storage):
self._storage.extend(list(ims))
self.n = self.n + ims.shape[0]
else:
for im in ims:
self.n = self.n + 1
ix = random.randint(0, self.n - 1)
if ix < len(self._storage):
self._storage[ix] = im
self._next_idx = (self._next_idx + ims.shape[0]) % self._maxsize
def _encode_sample(self, idxes, no_transform=False, downsample=False):
ims = []
for i in idxes:
im = self._storage[i]
if self.dataset != "mnist":
if (self.transform is not None) and (not no_transform):
im = im.transpose((1, 2, 0))
im = np.array(self.transform(Image.fromarray(im)))
im = im * 255
ims.append(im)
return np.array(ims)
def sample(self, batch_size, no_transform=False, downsample=False):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
idxes = [random.randint(0, len(self._storage) - 1)
for _ in range(batch_size)]
return self._encode_sample(idxes, no_transform=no_transform, downsample=downsample), idxes
###Output
_____no_output_____
###Markdown
Dataset
###Code
class Mnist(Dataset):
def __init__(self, train=True, rescale=1.0):
self.data = MNIST(
"data/mnist",
transform=transforms.ToTensor(),
download=True, train=train)
self.labels = np.eye(10)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
im, label = self.data[index]
label = self.labels[label]
im = im.squeeze()
im = im.numpy() / 256 * 255 + np.random.uniform(0, 1. / 256, (28, 28))
im = np.clip(im, 0, 1)
s = 28
im_corrupt = np.random.uniform(0, 1, (s, s, 1))
im = im[:, :, None]
return torch.Tensor(im_corrupt), torch.Tensor(im), label
class CelebAHQ(Dataset):
def __init__(self, cond_idx=1, filter_idx=0):
self.path = "/content/data/celebAHQ/data128x128/{:05}.jpg"
self.hq_labels = pd.read_csv(os.path.join(sample_dir, "data/celebAHQ/image_list.txt"), sep="\s+")
self.labels = pd.read_csv(os.path.join(sample_dir, "data/celebAHQ/list_attr_celeba.txt"), sep="\s+", skiprows=1)
self.cond_idx = cond_idx
self.filter_idx = filter_idx
def __len__(self):
return self.hq_labels.shape[0]
def __getitem__(self, index):
info = self.hq_labels.iloc[index]
info = self.labels.iloc[info.orig_idx]
path = self.path.format(index+1)
im = np.array(Image.open(path))
image_size = 128
# im = imresize(im, (image_size, image_size))
im = im / 256
im = im + np.random.uniform(0, 1 / 256., im.shape)
label = int(info.iloc[self.cond_idx])
if label == -1:
label = 0
label = np.eye(2)[label]
im_corrupt = np.random.uniform(
0, 1, size=(image_size, image_size, 3))
return im_corrupt, im, label
class CelebADataset(Dataset):
def __init__(
self,
FLAGS,
split='train',
augment=False,
noise=True,
rescale=1.0):
if augment:
transform_list = [
torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
]
transform = transforms.Compose(transform_list)
else:
# transform = transforms.ToTensor()
transform = transforms.Compose([
# resize
transforms.Resize(32),
# center-crop
transforms.CenterCrop(32),
# to-tensor
transforms.ToTensor()
])
self.data = torchvision.datasets.CelebA(
"/content/data",
transform=transform,
split=split,
download=True)
self.one_hot_map = np.eye(10)
self.noise = noise
self.rescale = rescale
self.FLAGS = FLAGS
def __len__(self):
return len(self.data)
def __getitem__(self, index):
FLAGS = self.FLAGS
im, label = self.data[index]
im = np.transpose(im, (1, 2, 0)).numpy()
image_size = 32
label = self.one_hot_map[label]
im = im * 255 / 256
im = im * self.rescale + \
np.random.uniform(0, 1 / 256., im.shape)
# np.random.seed((index + int(time.time() * 1e7)) % 2**32)
im_corrupt = np.random.uniform(
0.0, self.rescale, (image_size, image_size, 3))
return torch.Tensor(im_corrupt), torch.Tensor(im), label
# return torch.Tensor(im), label
class Cats(Dataset):
def __init__(
self,
augment=False,
noise=True,
rescale=1.0):
if augment:
transform_list = [
torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
]
transform = transforms.Compose(transform_list)
else:
# transform = transforms.ToTensor()
transform = transforms.Compose([
# resize
transforms.Resize(32),
# center-crop
transforms.CenterCrop(32),
# to-tensor
transforms.ToTensor()
])
self.data = torchvision.datasets.ImageFolder('/content/data/cats', transform = transform)
self.one_hot_map = np.eye(10)
self.noise = noise
self.rescale = rescale
def __len__(self):
return len(self.data)
def __getitem__(self, index):
im, label = self.data[index]
im = np.transpose(im, (1, 2, 0)).numpy()
image_size = 32
label = self.one_hot_map[label]
im = im * 255 / 256
im = im * self.rescale + \
np.random.uniform(0, 1 / 256., im.shape)
im_corrupt = np.random.uniform(
0.0, self.rescale, (image_size, image_size, 3))
return torch.Tensor(im_corrupt), torch.Tensor(im), label
class Cifar10(Dataset):
def __init__(
self,
FLAGS,
train=True,
full=False,
augment=False,
noise=True,
rescale=1.0):
if augment:
transform_list = [
torchvision.transforms.RandomCrop(32, padding=4),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
]
transform = transforms.Compose(transform_list)
else:
transform = transforms.ToTensor()
self.full = full
self.data = torchvision.datasets.CIFAR10(
"./data/cifar10",
transform=transform,
train=train,
download=True)
self.test_data = torchvision.datasets.CIFAR10(
"./data/cifar10",
transform=transform,
train=False,
download=True)
self.one_hot_map = np.eye(10)
self.noise = noise
self.rescale = rescale
self.FLAGS = FLAGS
def __len__(self):
if self.full:
return len(self.data) + len(self.test_data)
else:
return len(self.data)
def __getitem__(self, index):
FLAGS = self.FLAGS
if self.full:
if index >= len(self.data):
im, label = self.test_data[index - len(self.data)]
else:
im, label = self.data[index]
else:
im, label = self.data[index]
im = np.transpose(im, (1, 2, 0)).numpy()
image_size = 32
label = self.one_hot_map[label]
im = im * 255 / 256
im = im * self.rescale + \
np.random.uniform(0, 1 / 256., im.shape)
# np.random.seed((index + int(time.time() * 1e7)) % 2**32)
im_corrupt = np.random.uniform(
0.0, self.rescale, (image_size, image_size, 3))
return torch.Tensor(im_corrupt), torch.Tensor(im), label
###Output
_____no_output_____
###Markdown
Sampling
###Code
def rescale_im(image):
image = np.clip(image, 0, 1)
return (np.clip(image * 256, 0, 255)).astype(np.uint8)
def gen_image_cycsgld(label, FLAGS, model, im_neg, num_steps, sample=False):
im_noise = torch.randn_like(im_neg).detach()
total=1e6
cycles=3500
sub_total = total / cycles
T = 1e-7
# noise_scale = 0.25
# total=1e6
# cycles=5000
# sub_total = total / cycles
# T = 1e-6
im_negs_samples = []
for i in range(num_steps):
im_noise.normal_()
iters = i
r_remainder = (iters % sub_total) * 1.0 / sub_total
cyc_lr = FLAGS.step_lr * 5 / 2 * (cos(pi * r_remainder) + 1)
# print("\ncyc_lr", cyc_lr)
if FLAGS.anneal:
im_neg = im_neg + 0.001 * (num_steps - i - 1) / num_steps * im_noise
else:
# im_neg = im_neg + 0.001 * im_noise
im_neg = im_neg + sqrt(2 * cyc_lr * T) * FLAGS.noise_scale * im_noise
# print("\nnoise_cyc_lr", sqrt(2 * cyc_lr * T) * noise_scale)
im_neg.requires_grad_(requires_grad=True)
energy = model.forward(im_neg, label)
if FLAGS.all_step:
im_grad = torch.autograd.grad([energy.sum()], [im_neg], create_graph=True)[0]
else:
im_grad = torch.autograd.grad([energy.sum()], [im_neg])[0]
if i == num_steps - 1:
im_neg_orig = im_neg
im_neg = im_neg - cyc_lr * im_grad
if FLAGS.dataset in ("cifar10", "celeba", "cats"):
n = 128
elif FLAGS.dataset == "celebahq":
# Save space
n = 128
elif FLAGS.dataset == "lsun":
# Save space
n = 32
elif FLAGS.dataset == "object":
# Save space
n = 32
elif FLAGS.dataset == "mnist":
n = 128
elif FLAGS.dataset == "imagenet":
n = 32
elif FLAGS.dataset == "stl":
n = 32
im_neg_kl = im_neg_orig[:n]
if sample:
pass
else:
energy = model.forward(im_neg_kl, label)
im_grad = torch.autograd.grad([energy.sum()], [im_neg_kl], create_graph=True)[0]
im_neg_kl = im_neg_kl - cyc_lr * im_grad[:n]
im_neg_kl = torch.clamp(im_neg_kl, 0, 1)
else:
im_neg = im_neg - cyc_lr * im_grad
im_neg = im_neg.detach()
if sample:
im_negs_samples.append(im_neg)
im_neg = torch.clamp(im_neg, 0, 1)
if sample:
return im_neg, im_neg_kl, im_negs_samples, np.abs(im_grad.detach().cpu().numpy()).mean()
else:
return im_neg, im_neg_kl, np.abs(im_grad.detach().cpu().numpy()).mean()
def gen_image(label, FLAGS, model, im_neg, num_steps, sample=False):
im_noise = torch.randn_like(im_neg).detach()
im_negs_samples = []
for i in range(num_steps):
im_noise.normal_()
if FLAGS.anneal:
im_neg = im_neg + 0.001 * (num_steps - i - 1) / num_steps * im_noise
else:
im_neg = im_neg + 0.001 * im_noise
im_neg.requires_grad_(requires_grad=True)
energy = model.forward(im_neg, label)
if FLAGS.all_step:
im_grad = torch.autograd.grad([energy.sum()], [im_neg], create_graph=True)[0]
else:
im_grad = torch.autograd.grad([energy.sum()], [im_neg])[0]
if i == num_steps - 1:
im_neg_orig = im_neg
im_neg = im_neg - FLAGS.step_lr * im_grad
if FLAGS.dataset in ("cifar10", "celeba", "cats"):
n = 128
elif FLAGS.dataset == "celebahq":
# Save space
n = 128
elif FLAGS.dataset == "lsun":
# Save space
n = 32
elif FLAGS.dataset == "object":
# Save space
n = 32
elif FLAGS.dataset == "mnist":
n = 128
elif FLAGS.dataset == "imagenet":
n = 32
elif FLAGS.dataset == "stl":
n = 32
im_neg_kl = im_neg_orig[:n]
if sample:
pass
else:
energy = model.forward(im_neg_kl, label)
im_grad = torch.autograd.grad([energy.sum()], [im_neg_kl], create_graph=True)[0]
im_neg_kl = im_neg_kl - FLAGS.step_lr * im_grad[:n]
im_neg_kl = torch.clamp(im_neg_kl, 0, 1)
else:
im_neg = im_neg - FLAGS.step_lr * im_grad
im_neg = im_neg.detach()
if sample:
im_negs_samples.append(im_neg)
im_neg = torch.clamp(im_neg, 0, 1)
if sample:
return im_neg, im_neg_kl, im_negs_samples, np.abs(im_grad.detach().cpu().numpy()).mean()
else:
return im_neg, im_neg_kl, np.abs(im_grad.detach().cpu().numpy()).mean()
def stochastic_f(energy):
return energy.detach().cpu().numpy() + 0.32*normal(size=1)
def gen_image_csgld(label, FLAGS, model, im_neg, num_steps, sample=False):
im_noise = torch.randn_like(im_neg).detach()
im_negs_samples = []
parts = 100
Gcum = np.array(range(parts, 0, -1)) * 1.0 / sum(range(parts, 0, -1))
J = parts - 1
bouncy_move = 0
grad_mul = 1.
zeta = 0.75
T = 1
decay_lr = 100.0
for i in range(num_steps):
im_noise.normal_()
if FLAGS.anneal:
im_neg = im_neg + 0.001 * (num_steps - i - 1) / num_steps * im_noise
else:
im_neg = im_neg + 0.001 * im_noise
im_neg.requires_grad_(requires_grad=True)
energy = model.forward(im_neg, label)
# print("energy : ", energy)
lower_bound, upper_bound = np.min(energy.detach().cpu().numpy()) - 1, np.max(energy.detach().cpu().numpy()) + 1
partition=[lower_bound, upper_bound]
if FLAGS.all_step:
im_grad = torch.autograd.grad([energy.sum()], [im_neg], create_graph=True)[0]
else:
im_grad = torch.autograd.grad([energy.sum()], [im_neg])[0]
if i == num_steps - 1:
im_neg_orig = im_neg
im_neg = im_neg - FLAGS.step_lr * grad_mul * im_grad
if FLAGS.dataset in ("cifar10", "celeba", "cats"):
n = 128
elif FLAGS.dataset == "celebahq":
# Save space
n = 128
elif FLAGS.dataset == "lsun":
# Save space
n = 32
elif FLAGS.dataset == "object":
# Save space
n = 32
elif FLAGS.dataset == "mnist":
n = 128
elif FLAGS.dataset == "imagenet":
n = 32
elif FLAGS.dataset == "stl":
n = 32
im_neg_kl = im_neg_orig[:n]
if sample:
pass
else:
energy = model.forward(im_neg_kl, label)
im_grad = torch.autograd.grad([energy.sum()], [im_neg_kl], create_graph=True)[0]
im_neg_kl = im_neg_kl - FLAGS.step_lr * grad_mul * im_grad[:n]
im_neg_kl = torch.clamp(im_neg_kl, 0, 1)
else:
im_neg = im_neg - FLAGS.step_lr * grad_mul * im_grad
print("\n grad_mul: ", grad_mul)
div_f = (partition[1] - partition[0]) / parts
grad_mul = 1 + zeta * T * (np.log(Gcum[J]) - np.log(Gcum[J-1])) / div_f
J = (min(max(int((stochastic_f(energy).mean() - partition[0]) / div_f + 1), 1), parts - 1))
step_size = min(decay_lr, 10./(i**0.8+100))
Gcum[:J] = Gcum[:J] + step_size * Gcum[J]**zeta * (-Gcum[:J])
Gcum[J] = Gcum[J] + step_size * Gcum[J]**zeta * (1 - Gcum[J])
Gcum[(J+1):] = Gcum[(J+1):] + step_size * Gcum[J]**zeta * (-Gcum[(J+1):])
if grad_mul < 0:
bouncy_move = bouncy_move + 1
print("\n bouncy_move : ", bouncy_move)
im_neg = im_neg.detach()
if sample:
im_negs_samples.append(im_neg)
im_neg = torch.clamp(im_neg, 0, 1)
if sample:
return im_neg, im_neg_kl, im_negs_samples, np.abs(im_grad.detach().cpu().numpy()).mean()
else:
return im_neg, im_neg_kl, np.abs(im_grad.detach().cpu().numpy()).mean()
def gen_image_resgld(label, FLAGS, model, im_neg, num_steps, sample=False):
im_noise = torch.randn_like(im_neg).detach()
T_multiply=0.9
T = 0.9
var=0.1
resgld_beta_high = im_neg
resgld_beta_low = im_neg
swaps = 0
noise_scale = sqrt(2e-6 * FLAGS.step_lr * T)
print("noise_scale : ", noise_scale)
print("noise_scale * T_multiply: ", noise_scale* T_multiply)
im_negs_samples = []
for i in range(num_steps):
im_noise.normal_()
resgld_beta_low = resgld_beta_low + noise_scale * im_noise
resgld_beta_high = resgld_beta_high + noise_scale * T_multiply * im_noise
resgld_beta_high.requires_grad_(requires_grad=True)
energy_high = model.forward(resgld_beta_high, label)
resgld_beta_low.requires_grad_(requires_grad=True)
energy_low = model.forward(resgld_beta_low, label)
im_grad_low = torch.autograd.grad([energy_low.sum()], [resgld_beta_low])[0]
im_grad_high = torch.autograd.grad([energy_high.sum()], [resgld_beta_high])[0]
if i == num_steps - 1:
im_neg_orig = resgld_beta_low
resgld_beta_low = resgld_beta_low - FLAGS.step_lr * im_grad_low
resgld_beta_high = resgld_beta_high - FLAGS.step_lr * im_grad_high
if FLAGS.dataset in ("cifar10", "celeba", "cats"):
n = 128
elif FLAGS.dataset == "celebahq":
# Save space
n = 128
elif FLAGS.dataset == "lsun":
# Save space
n = 32
elif FLAGS.dataset == "object":
# Save space
n = 32
elif FLAGS.dataset == "mnist":
n = 128
elif FLAGS.dataset == "imagenet":
n = 32
elif FLAGS.dataset == "stl":
n = 32
im_neg_kl = im_neg_orig[:n]
if sample:
pass
else:
energy = model.forward(im_neg_kl, label)
im_grad = torch.autograd.grad([energy.sum()], [im_neg_kl], create_graph=True)[0]
im_neg_kl = im_neg_kl - FLAGS.step_lr * im_grad[:n]
im_neg_kl = torch.clamp(im_neg_kl, 0, 1)
else:
resgld_beta_low = resgld_beta_low - FLAGS.step_lr * im_grad_low
resgld_beta_high = resgld_beta_high - FLAGS.step_lr * im_grad_high * T_multiply
dT = 1 / T - 1 / (T * T_multiply)
swap_rate = torch.exp(dT * (energy_low - energy_high - dT * var))
intensity_r = 0.1
# print("swap_rate", swap_rate)
swap_rate = swap_rate.mean().item()
print("swap_rate", swap_rate)
random = np.random.uniform(0, 1)
print("random", random)
if random < intensity_r * swap_rate:
resgld_beta_high, resgld_beta_low = resgld_beta_low, resgld_beta_high
swaps += 1
print("swaps : ", swaps)
im_neg = resgld_beta_low.detach()
if sample:
im_negs_samples.append(im_neg)
im_neg = torch.clamp(im_neg, 0, 1)
if sample:
return im_neg, im_neg_kl, im_negs_samples, np.abs(im_grad_low.detach().cpu().numpy()).mean()
else:
return im_neg, im_neg_kl, np.abs(im_grad_low.detach().cpu().numpy()).mean()
def gen_image_psgld(label, FLAGS, model, im_neg, num_steps, sample=False):
square_avg = torch.zeros_like(im_neg)
im_negs_samples = []
for i in range(num_steps):
avg = square_avg.sqrt().add_(FLAGS.eps)
im_noise = torch.normal(mean=0,std=avg)
if FLAGS.anneal:
im_neg = im_neg + 0.001 * (num_steps - i - 1) / num_steps * im_noise
else:
im_neg = im_neg + 0.001 * im_noise
im_neg.requires_grad_(requires_grad=True)
energy = model.forward(im_neg, label)
if FLAGS.all_step:
im_grad = torch.autograd.grad([energy.sum()], [im_neg], create_graph=True)[0]
else:
im_grad = torch.autograd.grad([energy.sum()], [im_neg])[0]
square_avg.mul_(FLAGS.momentum).addcmul_(1 - FLAGS.momentum, im_neg.data, im_neg.data)
if i == num_steps - 1:
im_neg_orig = im_neg
im_neg = im_neg - FLAGS.step_lr * im_grad / avg
if FLAGS.dataset in ("cifar10", "celeba", "cats"):
n = 128
elif FLAGS.dataset == "celebahq":
# Save space
n = 128
elif FLAGS.dataset == "lsun":
# Save space
n = 32
elif FLAGS.dataset == "object":
# Save space
n = 32
elif FLAGS.dataset == "mnist":
n = 128
elif FLAGS.dataset == "imagenet":
n = 32
elif FLAGS.dataset == "stl":
n = 32
im_neg_kl = im_neg_orig[:n]
if sample:
pass
else:
energy = model.forward(im_neg_kl, label)
im_grad = torch.autograd.grad([energy.sum()], [im_neg_kl], create_graph=True)[0]
im_neg_kl = im_neg_kl - FLAGS.step_lr * im_grad[:n]
im_neg_kl = torch.clamp(im_neg_kl, 0, 1)
else:
im_neg = im_neg - FLAGS.step_lr * im_grad
im_neg = im_neg.detach()
if sample:
im_negs_samples.append(im_neg)
im_neg = torch.clamp(im_neg, 0, 1)
if sample:
return im_neg, im_neg_kl, im_negs_samples, np.abs(im_grad.detach().cpu().numpy()).mean()
else:
return im_neg, im_neg_kl, np.abs(im_grad.detach().cpu().numpy()).mean()
def gen_image_asgld(label, FLAGS, model, im_neg, num_steps, sample=False):
stepsize = 0.2
noise_scale = np.sqrt(stepsize * 0.01)
im_noise = torch.randn_like(im_neg).detach() * noise_scale
im_negs_samples = []
# Intialize mean and variance to zero
mean = torch.zeros_like(im_neg.data)
std = torch.zeros_like(im_neg.data)
weight_decay = 5e-4
v_noise=0.001
momentum=0.9
eps=1e-6
for i in range(num_steps):
# im_noise.normal_()
# Getting mean,std at previous step
old_mean = mean.clone()
old_std = std.clone()
im_noise = torch.normal(mean=old_mean, std=old_std)
# updt = x_negative.data.add(v_noise,im_noise)
if FLAGS.anneal:
im_neg = im_neg + 0.001 * (num_steps - i - 1) / num_steps * im_noise
else:
im_neg = im_neg + 0.001 * im_noise
im_neg.requires_grad_(requires_grad=True)
energy = model.forward(im_neg, label)
if FLAGS.all_step:
im_grad = torch.autograd.grad([energy.sum()], [im_neg], create_graph=True)[0]
else:
im_grad = torch.autograd.grad([energy.sum()], [im_neg])[0]
# Updating mean
mean = mean.mul(momentum).add(im_neg)
# Updating std
part_var1 = im_neg.add(-old_mean)
part_var2 = im_neg.add(-mean)
new_std = torch.pow(old_std,2).mul(momentum).addcmul(1,part_var1,part_var2).add(eps)
new_std = torch.pow(torch.abs_(new_std),1/2)
std.add_(-1,std).add_(new_std)
if i == num_steps - 1:
im_neg_orig = im_neg
im_neg = im_neg - FLAGS.step_lr * im_grad
if FLAGS.dataset in ("cifar10", "celeba", "cats"):
n = 128
elif FLAGS.dataset == "celebahq":
# Save space
n = 128
elif FLAGS.dataset == "lsun":
# Save space
n = 32
elif FLAGS.dataset == "object":
# Save space
n = 32
elif FLAGS.dataset == "mnist":
n = 128
elif FLAGS.dataset == "imagenet":
n = 32
elif FLAGS.dataset == "stl":
n = 32
im_neg_kl = im_neg_orig[:n]
if sample:
pass
else:
energy = model.forward(im_neg_kl, label)
im_grad = torch.autograd.grad([energy.sum()], [im_neg_kl], create_graph=True)[0]
im_neg_kl = im_neg_kl - FLAGS.step_lr * im_grad[:n]
im_neg_kl = torch.clamp(im_neg_kl, 0, 1)
else:
im_neg = im_neg - FLAGS.step_lr * im_grad
im_neg = im_neg.detach()
if sample:
im_negs_samples.append(im_neg)
im_neg = torch.clamp(im_neg, 0, 1)
if sample:
return im_neg, im_neg_kl, im_negs_samples, np.abs(im_grad.detach().cpu().numpy()).mean()
else:
return im_neg, im_neg_kl, np.abs(im_grad.detach().cpu().numpy()).mean()
###Output
_____no_output_____
###Markdown
Training
###Code
def test(model, logger, dataloader):
pass
def log_tensorboard(data):
writer.add_scalar("replay buffer length", data["length_replay_buffer"], data["iter"])
writer.add_scalar("repel loss", data["loss_repel"], data["iter"])
writer.add_scalar("batch loss", data["loss"], data["iter"])
writer.add_scalar("average loss", data["avg_loss"], data["iter"])
writer.add_scalar("KL mean loss", data["kl_mean"], data["iter"])
writer.add_scalar("FID", data["fid"], data["iter"])
writer.add_scalar("IS mean", data["is_mean"], data["iter"])
writer.add_scalar("IS std", data["is_std"], data["iter"])
writer.add_scalar("SSIM", data["ssim"], data["iter"])
writer.add_scalar("positive energy mean", data["e_pos"], data["iter"])
writer.add_scalar("positive energy std", data["e_pos_std"], data["iter"])
writer.add_scalar("negative energy mean", data["e_neg"], data["iter"])
writer.add_scalar("negative energy std", data["e_neg_std"], data["iter"])
writer.add_scalar("energy different", data["e_diff"], data["iter"])
writer.add_scalar("x gradient", data["x_grad"], data["iter"])
writer.add_images("positive examples", data["positive_samples"], data["iter"])
writer.add_images("negative examples", data["negative_samples"], data["iter"])
def train(model, optimizer, dataloader,logdir, resume_iter, FLAGS, best_inception):
if FLAGS.replay_batch:
if FLAGS.reservoir:
replay_buffer = ReservoirBuffer(FLAGS.buffer_size, FLAGS.transform, FLAGS.dataset)
else:
replay_buffer = ReplayBuffer(FLAGS.buffer_size, FLAGS.transform, FLAGS.dataset)
dist_sinkhorn = SamplesLoss('sinkhorn')
itr = resume_iter
im_neg = None
gd_steps = 1
optimizer.zero_grad()
num_steps = FLAGS.num_steps
for epoch in range(FLAGS.epoch_num):
print("epoch : ", epoch)
tock = time.time()
average_loss = 0.0
for data_corrupt, data, label in tqdm(dataloader):
label = label.float().to(FLAGS.gpu, non_blocking=True)
data = data.permute(0, 3, 1, 2).float().contiguous()
# Generate samples to evaluate inception score
if itr % FLAGS.save_interval == 0:
if FLAGS.dataset in ("cifar10", "celeba", "cats"):
data_corrupt = torch.Tensor(np.random.uniform(0.0, 1.0, (128, 32, 32, 3)))
repeat = 128 // FLAGS.batch_size + 1
label = torch.cat([label] * repeat, axis=0)
label = label[:128]
elif FLAGS.dataset == "celebahq":
data_corrupt = torch.Tensor(np.random.uniform(0.0, 1.0, (data.shape[0], 128, 128, 3)))
label = label[:data.shape[0]]
data_corrupt = data_corrupt[:label.shape[0]]
elif FLAGS.dataset == "stl":
data_corrupt = torch.Tensor(np.random.uniform(0.0, 1.0, (32, 48, 48, 3)))
label = label[:32]
data_corrupt = data_corrupt[:label.shape[0]]
elif FLAGS.dataset == "lsun":
data_corrupt = torch.Tensor(np.random.uniform(0.0, 1.0, (32, 128, 128, 3)))
label = label[:32]
data_corrupt = data_corrupt[:label.shape[0]]
elif FLAGS.dataset == "imagenet":
data_corrupt = torch.Tensor(np.random.uniform(0.0, 1.0, (32, 128, 128, 3)))
label = label[:32]
data_corrupt = data_corrupt[:label.shape[0]]
elif FLAGS.dataset == "object":
data_corrupt = torch.Tensor(np.random.uniform(0.0, 1.0, (32, 128, 128, 3)))
label = label[:32]
data_corrupt = data_corrupt[:label.shape[0]]
elif FLAGS.dataset == "mnist":
data_corrupt = torch.Tensor(np.random.uniform(0.0, 1.0, (128, 28, 28, 1)))
label = label[:128]
data_corrupt = data_corrupt[:label.shape[0]]
else:
assert False
data_corrupt = torch.Tensor(data_corrupt.float()).permute(0, 3, 1, 2).float().contiguous()
data = data.to(FLAGS.gpu, non_blocking=True)
data_corrupt = data_corrupt.to(FLAGS.gpu, non_blocking=True)
if FLAGS.replay_batch and len(replay_buffer) >= FLAGS.batch_size:
replay_batch, idxs = replay_buffer.sample(data_corrupt.size(0))
replay_batch = decompress_x_mod(replay_batch)
replay_mask = (
np.random.uniform(
0,
1,
data_corrupt.size(0)) > 0.001)
data_corrupt[replay_mask] = torch.Tensor(replay_batch[replay_mask]).to(FLAGS.gpu, non_blocking=True)
else:
idxs = None
if FLAGS.sampler == "psgld":
if itr % FLAGS.save_interval == 0:
im_neg, im_neg_kl, im_samples, x_grad = gen_image_psgld(label, FLAGS, model, data_corrupt, num_steps, sample=True)
else:
im_neg, im_neg_kl, x_grad = gen_image_psgld(label, FLAGS, model, data_corrupt, num_steps)
elif FLAGS.sampler == "asgld":
if itr % FLAGS.save_interval == 0:
im_neg, im_neg_kl, im_samples, x_grad = gen_image_asgld(label, FLAGS, model, data_corrupt, num_steps, sample=True)
else:
im_neg, im_neg_kl, x_grad = gen_image_asgld(label, FLAGS, model, data_corrupt, num_steps)
elif FLAGS.sampler == "sgld":
if itr % FLAGS.save_interval == 0:
im_neg, im_neg_kl, im_samples, x_grad = gen_image(label, FLAGS, model, data_corrupt, num_steps, sample=True)
else:
im_neg, im_neg_kl, x_grad = gen_image(label, FLAGS, model, data_corrupt, num_steps)
elif FLAGS.sampler == "cycsgld":
if itr % FLAGS.save_interval == 0:
im_neg, im_neg_kl, im_samples, x_grad = gen_image_cycsgld(label, FLAGS, model, data_corrupt, num_steps, sample=True)
else:
im_neg, im_neg_kl, x_grad = gen_image_cycsgld(label, FLAGS, model, data_corrupt, num_steps)
elif FLAGS.sampler == "resgld":
if itr % FLAGS.save_interval == 0:
im_neg, im_neg_kl, im_samples, x_grad = gen_image_resgld(label, FLAGS, model, data_corrupt, num_steps, sample=True)
else:
im_neg, im_neg_kl, x_grad = gen_image_resgld(label, FLAGS, model, data_corrupt, num_steps)
elif FLAGS.sampler == "csgld":
if itr % FLAGS.save_interval == 0:
im_neg, im_neg_kl, im_samples, x_grad = gen_image_csgld(label, FLAGS, model, data_corrupt, num_steps, sample=True)
else:
im_neg, im_neg_kl, x_grad = gen_image_csgld(label, FLAGS, model, data_corrupt, num_steps)
else:
assert False
data_corrupt = None
energy_pos = model.forward(data, label[:data.size(0)])
energy_neg = model.forward(im_neg, label)
if FLAGS.replay_batch and (im_neg is not None):
replay_buffer.add(compress_x_mod(im_neg.detach().cpu().numpy()))
loss = energy_pos.mean() - energy_neg.mean()
loss = loss + (torch.pow(energy_pos, 2).mean() + torch.pow(energy_neg, 2).mean())
if FLAGS.kl:
model.requires_grad_(False)
loss_kl = model.forward(im_neg_kl, label)
model.requires_grad_(True)
loss = loss + FLAGS.kl_coeff * loss_kl.mean()
if FLAGS.repel_im:
start = timeit.timeit()
bs = im_neg_kl.size(0)
if FLAGS.dataset in ["celebahq", "imagenet", "object", "lsun", "stl"]:
im_neg_kl = im_neg_kl[:, :, :, :].contiguous()
im_flat = torch.clamp(im_neg_kl.view(bs, -1), 0, 1)
if FLAGS.dataset in ("cifar10", "celeba", "cats"):
if len(replay_buffer) > 1000:
compare_batch, idxs = replay_buffer.sample(100, no_transform=False)
compare_batch = decompress_x_mod(compare_batch)
compare_batch = torch.Tensor(compare_batch).to(FLAGS.gpu, non_blocking=True)
compare_flat = compare_batch.view(100, -1)
if FLAGS.entropy == 'kl':
dist_matrix = torch.norm(im_flat[:, None, :] - compare_flat[None, :, :], p=2, dim=-1)
loss_repel = torch.log(dist_matrix.min(dim=1)[0]).mean()
# loss_repel = kldiv(im_flat, compare_flat)
loss = loss - 0.3 * loss_repel
elif FLAGS.entropy == 'sinkhorn':
dist_matrix = dist_sinkhorn(im_flat, compare_flat)
loss_repel = torch.log(dist_matrix).sum()
loss = loss - 0.03 * loss_repel
else:
assert False
else:
loss_repel = torch.zeros(1)
# loss = loss - 0.3 * loss_repel
else:
if len(replay_buffer) > 1000:
compare_batch, idxs = replay_buffer.sample(100, no_transform=False, downsample=True)
compare_batch = decompress_x_mod(compare_batch)
compare_batch = torch.Tensor(compare_batch).to(FLAGS.gpu, non_blocking=True)
compare_flat = compare_batch.view(100, -1)
if FLAGS.entropy == 'kl':
dist_matrix = torch.norm(im_flat[:, None, :] - compare_flat[None, :, :], p=2, dim=-1)
loss_repel = torch.log(dist_matrix.min(dim=1)[0]).mean()
# loss_repel = kldiv(im_flat, compare_flat)
elif FLAGS.entropy == 'sinkhorn':
dist_matrix = dist_sinkhorn(im_flat, compare_flat)
loss_repel = torch.log(dist_matrix).sum()
else:
assert False
else:
loss_repel = torch.zeros(1).to(FLAGS.gpu, non_blocking=True)
if FLAGS.entropy == 'kl':
loss = loss - 0.3 * loss_repel
elif FLAGS.entropy == 'sinkhorn':
loss = loss - 0.03 * loss_repel
else:
assert False
end = timeit.timeit()
else:
loss_repel = torch.zeros(1)
else:
loss_kl = torch.zeros(1)
loss_repel = torch.zeros(1)
if FLAGS.log_grad and len(replay_buffer) > 1000:
loss_kl = loss_kl - 0.1 * loss_repel
loss_kl = loss_kl.mean()
loss_ml = energy_pos.mean() - energy_neg.mean()
loss_ml.backward(retain_graph=True)
ele = []
for param in model.parameters():
if param.grad is not None:
ele.append(torch.norm(param.grad.data))
ele = torch.stack(ele, dim=0)
ml_grad = torch.mean(ele)
model.zero_grad()
loss_kl.backward(retain_graph=True)
ele = []
for param in model.parameters():
if param.grad is not None:
ele.append(torch.norm(param.grad.data))
ele = torch.stack(ele, dim=0)
kl_grad = torch.mean(ele)
model.zero_grad()
else:
ml_grad = None
kl_grad = None
loss.backward()
clip_grad_norm_(model.parameters(), 0.5)
optimizer.step()
optimizer.zero_grad()
# ema_model(models, models_ema)
if torch.isnan(energy_pos.mean()):
assert False
if torch.abs(energy_pos.mean()) > 10.0:
assert False
average_loss += (loss - average_loss) / (itr + 1)
if itr % FLAGS.log_interval == 0:
tick = time.time()
kvs = {}
kvs['e_pos'] = energy_pos.mean().item()
kvs['e_pos_std'] = energy_pos.std().item()
kvs['e_neg'] = energy_neg.mean().item()
kvs['kl_mean'] = loss_kl.mean().item()
kvs['loss_repel'] = loss_repel.mean().item()
kvs['loss'] = loss
kvs['avg_loss'] = average_loss
kvs['e_neg_std'] = energy_neg.std().item()
kvs['e_diff'] = kvs['e_pos'] - kvs['e_neg']
# kvs['x_grad'] = np.abs(x_grad.detach().cpu().numpy()).mean()
kvs['x_grad'] = x_grad
kvs['iter'] = itr
# kvs['hmc_loss'] = hmc_loss.item()
kvs['num_steps'] = num_steps
# kvs['t_diff'] = tick - tock
kvs['positive_samples'] = data.detach()
kvs['negative_samples'] = im_neg.detach()
real = data.detach().cpu()
fake = im_neg.detach().cpu()
data = None
im_neg = None
if real.shape[1] == 1:
# print("channel 1")
real = torch.cat((real, real, real), dim=1)
fake = torch.cat((fake, fake, fake), dim=1)
real = torch.from_numpy(rescale_im(real.cpu().numpy())).to(FLAGS.gpu, non_blocking=True)
fake = torch.from_numpy(rescale_im(fake.cpu().numpy())).to(FLAGS.gpu, non_blocking=True)
# print("real shape = ", real.shape)
# print("campute IS")
inception.update(fake)
inception_mean, inception_std = inception.compute()
# print("campute FID")
fid.update(real, real=True)
fid.update(fake, real=False)
fid_val = fid.compute()
real = None
fake = None
ssim_value = 0
kvs['fid'] = fid_val.item()
kvs['is_mean'] = inception_mean.item()
kvs['is_std'] = inception_std.item()
kvs['ssim'] = ssim_value
if FLAGS.replay_batch:
kvs['length_replay_buffer'] = len(replay_buffer)
# if (ml_grad is not None):
# kvs['kl_grad'] = kl_grad
# kvs['ml_grad'] = ml_grad
log_tensorboard(kvs)
tock = tick
if itr % FLAGS.save_interval == 0 and (FLAGS.save_interval != 0):
model_path = osp.join(logdir, "model_{}.pth".format(itr))
ckpt = {'optimizer_state_dict': optimizer.state_dict(),
'FLAGS': FLAGS, 'best_inception': best_inception}
for i in range(FLAGS.ensembles):
ckpt['model_state_dict_{}'.format(i)] = model.state_dict()
# ckpt['ema_model_state_dict_{}'.format(i)] = model.state_dict()
torch.save(ckpt, model_path)
# if itr % FLAGS.log_interval == 0:
# im_samples = im_samples[::10]
# im_samples_total = torch.stack(im_samples, dim=1).detach().cpu().permute(0, 1, 3, 4, 2).numpy()
# try_im = im_neg
# orig_im = data_corrupt
# actual_im = rescale_im(data.detach().permute(0, 2, 3, 1).cpu().numpy())
# orig_im = rescale_im(orig_im.detach().permute(0, 2, 3, 1).cpu().numpy())
# try_im = rescale_im(try_im.detach().permute(0, 2, 3, 1).cpu().numpy()).squeeze()
# im_samples_total = rescale_im(im_samples_total)
# if rank_idx == 0:
# score, std = get_inception_score(list(try_im), splits=1)
# print("Inception score of {} with std of {}".format(
# score, std))
# # kvs = {}
# # kvs['inception_score'] = score
# # kvs['inception_score_std'] = std
# # logger.writekvs(kvs)
# writer.add_scalar("inception score", score, itr)
# writer.add_scalar("inception score std", std, itr)
# if score > best_inception:
# model_path = osp.join(logdir, "model_best.pth")
# torch.save(ckpt, model_path)
# best_inception = score
itr += 1
def main_single(FLAGS):
print("Values of args: ", FLAGS)
if FLAGS.dataset == "cifar10":
train_dataset = Cifar10(FLAGS)
# valid_dataset = Cifar10(FLAGS, split='valid', augment=False)
# test_dataset = Cifar10(FLAGS, split='test', augment=False)
elif FLAGS.dataset == "celeba":
train_dataset = CelebADataset(FLAGS)
# valid_dataset = CelebADataset(FLAGS, train=False, augment=False)
# test_dataset = CelebADataset(FLAGS, train=False, augment=False)
elif FLAGS.dataset == "cats":
train_dataset = Cats()
elif FLAGS.dataset == "stl":
train_dataset = STLDataset(FLAGS)
# valid_dataset = STLDataset(FLAGS, train=False)
# test_dataset = STLDataset(FLAGS, train=False)
elif FLAGS.dataset == "object":
train_dataset = ObjectDataset(FLAGS.cond_idx)
# valid_dataset = ObjectDataset(FLAGS.cond_idx)
# test_dataset = ObjectDataset(FLAGS.cond_idx)
elif FLAGS.dataset == "imagenet":
train_dataset = ImageNet()
# valid_dataset = ImageNet()
# test_dataset = ImageNet()
elif FLAGS.dataset == "mnist":
train_dataset = Mnist(train=True)
# valid_dataset = Mnist(train=False)
# test_dataset = Mnist(train=False)
elif FLAGS.dataset == "celebahq":
train_dataset = CelebAHQ(cond_idx=FLAGS.cond_idx)
# valid_dataset = CelebAHQ(cond_idx=FLAGS.cond_idx)
# test_dataset = CelebAHQ(cond_idx=FLAGS.cond_idx)
elif FLAGS.dataset == "lsun":
train_dataset = LSUNBed(cond_idx=FLAGS.cond_idx)
# valid_dataset = LSUNBed(cond_idx=FLAGS.cond_idx)
# test_dataset = LSUNBed(cond_idx=FLAGS.cond_idx)
else:
assert False
train_dataloader = DataLoader(train_dataset, num_workers=FLAGS.data_workers, batch_size=FLAGS.batch_size, shuffle=True, drop_last=True)
# valid_dataloader = DataLoader(valid_dataset, num_workers=FLAGS.data_workers, batch_size=FLAGS.batch_size, shuffle=True, drop_last=True)
# test_dataloader = DataLoader(test_dataset, num_workers=FLAGS.data_workers, batch_size=FLAGS.batch_size, shuffle=True, drop_last=True)
logdir = osp.join(sample_dir, FLAGS.exp, FLAGS.dataset)
best_inception = 0.0
if FLAGS.resume_iter != 0:
FLAGS_OLD = FLAGS
model_path = osp.join(logdir, "model_{}.pth".format(FLAGS.resume_iter))
checkpoint = torch.load(model_path)
best_inception = checkpoint['best_inception']
FLAGS = checkpoint['FLAGS']
FLAGS.resume_iter = FLAGS_OLD.resume_iter
FLAGS_OLD = None
if FLAGS.dataset in ("cifar10", "celeba", "cats"):
model_fn = ResNetModel
elif FLAGS.dataset == "stl":
model_fn = ResNetModel
elif FLAGS.dataset == "object":
model_fn = CelebAModel
elif FLAGS.dataset == "mnist":
model_fn = MNISTModel
elif FLAGS.dataset == "celebahq":
model_fn = CelebAModel
elif FLAGS.dataset == "lsun":
model_fn = CelebAModel
elif FLAGS.dataset == "imagenet":
model_fn = ImagenetModel
else:
assert False
model = model_fn(FLAGS).train()
# models_ema = model_fn(FLAGS).train()
if FLAGS.cuda:
model = model.to(FLAGS.gpu)
optimizer = Adam(model.parameters(), lr=FLAGS.lr, betas=(0.0, 0.9), eps=1e-8)
# ema_model(models, models_ema, mu=0.0)
it = FLAGS.resume_iter
if not osp.exists(logdir):
os.makedirs(logdir)
checkpoint = None
if FLAGS.resume_iter != 0:
print("FLAGS.resume_iter:",FLAGS.resume_iter)
model_path = osp.join(logdir, "model_{}.pth".format(FLAGS.resume_iter))
checkpoint = torch.load(model_path)
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
for i in range(FLAGS.ensembles):
model.load_state_dict(checkpoint['model_state_dict_{}'.format(i)])
# model_ema.load_state_dict(checkpoint['ema_model_state_dict_{}'.format(i)])
print("New Values of args: ", FLAGS)
pytorch_total_params = sum([p.numel() for p in model.parameters() if p.requires_grad])
print("Number of parameters for models", pytorch_total_params)
train(model, optimizer, train_dataloader, logdir, FLAGS.resume_iter, FLAGS, best_inception)
###Output
_____no_output_____
###Markdown
Start Train
###Code
if flags.dataset == "celebahq":
!mkdir -p /content/data/celebAHQ
!unzip -qq '/content/drive/MyDrive/Colab Notebooks/improved_contrastive_divergence/data/celebAHQ/data128x128.zip' -d /content/data/celebAHQ
elif flags.dataset == "celeba":
!mkdir -p /content/data
%cd /content/drive/MyDrive/Colab Notebooks/improved_contrastive_divergence.v6
%cp -av data/celeba/ /content/data
elif flags.dataset == "cats":
!mkdir -p /content/data
%cd /content/drive/MyDrive/Colab Notebooks/improved_contrastive_divergence.v6
%cp -av data/cats/ /content/data
!unzip -qq /content/data/cats/cats-dataset.zip -d /content/data/cats
tensorboard --logdir runs
main_single(flags)
###Output
_____no_output_____ |
Python-Kaggle.ipynb | ###Markdown
[Python](https://www.kaggle.com/learn/python) Hello, Python
###Code
spam_amount = 0
print(spam_amount)
# Ordering Spam, egg, Spam, Spam, bacon and Spam (4 more servings of Spam)
spam_amount = spam_amount + 4
if spam_amount > 0:
print("But I don't want ANY spam!")
viking_song = "Spam " * spam_amount
print(viking_song)
###Output
0
But I don't want ANY spam!
Spam Spam Spam Spam
###Markdown
The `*` operator can be used to multiply two numbers (`3 * 3` evaluates to 9), but amusingly enough, we can also multiply a string by a number, to get a version that's been repeated that many times. Python offers a number of cheeky little time-saving tricks like this where operators like `*` and `+` have a different meaning depending on what kind of thing they're applied to. (The technical term for this is **operator overloading**) Numbers and arithmetic in Python
###Code
spam_amount = 0
type(spam_amount)
type(10.09)
print(5 / 2)
print(6 / 2)
print(5 // 2)
print(6 // 2)
###Output
2
3
###Markdown
Order of operationsThe arithmetic we learned in primary school has conventions about the order in which operations are evaluated. Some remember these by a mnemonic such as **PEMDAS** - Parentheses, Exponents, Multiplication/Division, Addition/Subtraction. Builtin functions for working with numbers`min` and `max` return the minimum and maximum of their arguments, respectively...
###Code
print(min(1, 2, 3))
print(max(1, 2, 3))
###Output
1
3
###Markdown
`abs` returns the absolute value of it argument:
###Code
print(abs(32))
print(abs(-32))
###Output
32
32
###Markdown
In addition to being the names of Python's two main numerical types, `int` and `float` can also be called as functions which convert their arguments to the corresponding type:
###Code
print(float(10))
print(int(3.33))
# They can even be called on strings!
print(int('807') + 1)
###Output
10.0
3
808
###Markdown
Functions and Getting Help Getting HelpThe `help()` function is possibly the most important Python function you can learn. If you can remember how to use `help()`, you hold the key to understanding most other function.
###Code
help(round)
###Output
Help on built-in function round in module builtins:
round(number, ndigits=None)
Round a number to a given precision in decimal digits.
The return value is an integer if ndigits is omitted or None. Otherwise
the return value has the same type as the number. ndigits may be negative.
###Markdown
Defining functions
###Code
def least_difference(a, b, c):
diff1 = abs(a - b)
diff2 = abs(b - c)
diff3 = abs(a - c)
return min(diff1, diff2, diff3)
print(
least_difference(1, 10, 100),
least_difference(1, 10, 10),
least_difference(5, 6, 7), # Python allows trailing commas in argument lists. How nice is that?
)
help(least_difference)
###Output
Help on function least_difference in module __main__:
least_difference(a, b, c)
###Markdown
Docstrings
###Code
def least_difference(a, b, c):
"""Return the smallest difference between any two numbers
among a, b and c.
>>> least_difference(1, 5, -5)
4
"""
diff1 = abs(a - b)
diff2 = abs(b - c)
diff3 = abs(a - c)
return min(diff1, diff2, diff3)
help(least_difference)
###Output
Help on function least_difference in module __main__:
least_difference(a, b, c)
Return the smallest difference between any two numbers
among a, b and c.
>>> least_difference(1, 5, -5)
4
###Markdown
Functions that don't return
###Code
def least_difference(a, b, c):
"""Return the smallest difference between any two numbers
among a, b and c.
"""
diff1 = abs(a - b)
diff2 = abs(b - c)
diff3 = abs(a - c)
min(diff1, diff2, diff3)
print(
least_difference(1, 10, 100),
least_difference(1, 10, 10),
least_difference(5, 6, 7),
)
###Output
None None None
###Markdown
A function with side effects may do something useful without returning anything. We've already seen two examples of this: `print()` and `help()` don't return anything. We only call them for their side effects (putting some text on the screen). Other examples of useful side effects include writing to a file, or modifying an input.
###Code
mystery = print()
print(mystery)
###Output
None
###Markdown
Default arguments
###Code
print(1, 2, 3, sep=' < ')
def greet(who="Colin"):
print("Hello,", who)
greet()
greet(who="Kaggle")
# (In this case, we don't need to specify the name of the argument, because it's unambiguous.)
greet("world")
###Output
Hello, Colin
Hello, Kaggle
Hello, world
###Markdown
Functions Applied to Functions
###Code
def mult_by_five(x):
return 5 * x
def call(fn, arg):
"""Call fn on arg"""
return fn(arg)
def squared_call(fn, arg):
"""Call fn on the result of calling fn on arg"""
return fn(fn(arg))
print(
call(mult_by_five, 1),
squared_call(mult_by_five, 1),
sep='\n', # '\n' is the newline character - it starts a new line
)
###Output
5
25
###Markdown
Functions that operate on other functions are called **"Higher order functions."**
###Code
def mod_5(x):
"""Return the remainder of x after dividing by 5"""
return x % 5
print(
'Which number is biggest?',
max(100, 51, 14),
'Which number is the biggest modulo 5?',
max(100, 51, 14, key=mod_5),
sep='\n',
)
###Output
Which number is biggest?
100
Which number is the biggest modulo 5?
14
###Markdown
Booleans and Conditionals BooleansPython has a type `bool` which can take on one of two values: `True` and `False`.
###Code
x = True
print(x)
print(type(x))
###Output
True
<class 'bool'>
###Markdown
Rather than putting `True` or `False` directly in our code, we usually get boolean values from **boolean operators**. These are operators that answer yes/no questions.
###Code
def can_run_for_president(age):
"""Can someone of the given age run for president in the US?"""
# The US Constitution says you must "have attained to the Age of thirty-five Years"
return age >= 35
print("Can a 19-year-old run for president?", can_run_for_president(19))
print("Can a 45-year-old run for president?", can_run_for_president(45))
# Comparisons are a little bit clever...
3.0 == 3
# But not too clever...
'3' == 3
###Output
_____no_output_____
###Markdown
Combining Boolean ValuesPython provides operators to combine boolean values using the standard concepts of "and", "or", and "not". And in fact, the corresponding Python operators use just those words: `and`, `or`, and `not`.
###Code
def can_run_for_president(age, is_natural_born_citizen):
"""Can someone of the given age and citizenship status run for president in the US?"""
# The US Constitution says you must be a natural born citizen *and* at least 35 years old
return is_natural_born_citizen and (age >= 35)
print(can_run_for_president(19, True))
print(can_run_for_president(55, False))
print(can_run_for_president(55, True))
True or True and False
###Output
_____no_output_____
###Markdown
Python has precedence rules that determine the order in which operations get evaluated in expressions like above. For example, `and` has a higher precedence than `or`, which is why the first expression above is True. You could try to [memorize the order of precedence](https://docs.python.org/3/reference/expressions.htmloperator-precedence), but a safer bet is to just use liberal parentheses. Not only does this help prevent bugs, it makes your intentions clearer to anyone who reads your code. ConditionalsWhile useful enough in their own right, booleans really start to shine when combined with conditional statements, using the keywords `if`, `elif`, and `else`.
###Code
def inspect(x):
if x == 0:
print(x, "is zero")
elif x > 0:
print(x, "is positive")
elif x < 0:
print(x, "is negative")
else:
print(x, "is unlike anything I've ever seen...")
inspect(0)
inspect(-15)
###Output
0 is zero
-15 is negative
###Markdown
Boolean conversionPython has a `bool()` function which turns things into bools.
###Code
print(bool(1)) # all numbers are treated as true, except 0
print(bool(0))
print(bool("asf")) # all strings are treated as true, except the empty string ""
print(bool(""))
# Generally empty sequences (strings, lists, and other types we've yet to see like lists and tuples)
# are "falsey" and the rest are "truthy"
###Output
True
False
True
False
###Markdown
We can use non-boolean objects in `if` conditions and other places where a boolean would be expected. Python will implicitly treat them as their corresponding boolean value:
###Code
if 0:
print(0)
elif "spam":
print("spam")
###Output
spam
###Markdown
Conditional expressions (aka 'ternary')
###Code
def quiz_message(grade):
if grade < 50:
outcome = 'failed'
else:
outcome = 'passed'
print('You', outcome, 'the quiz with a grade of', grade)
quiz_message(80)
def quiz_message(grade):
outcome = 'failed' if grade < 50 else 'passed'
print('You', outcome, 'the quiz with a grade of', grade)
quiz_message(45)
###Output
You failed the quiz with a grade of 45
|
chapter/6 CNN/CNN.ipynb | ###Markdown
LeNet
###Code
net = nn.Sequential(
nn.Conv2d(1, 6, kernel_size=5, padding=2),
nn.Sigmoid(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(6, 16, kernel_size=5),
nn.Sigmoid(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Flatten(),
nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),
nn.Linear(120, 84), nn.Sigmoid(),
nn.Linear(84, 10))
X = torch.rand(size=(1, 1, 28, 28), dtype=torch.float32)
for layer in net:
X = layer(X)
print(layer.__class__.__name__,'output shape: \t',X.shape)
###Output
Conv2d output shape: torch.Size([1, 6, 28, 28])
Sigmoid output shape: torch.Size([1, 6, 28, 28])
AvgPool2d output shape: torch.Size([1, 6, 14, 14])
Conv2d output shape: torch.Size([1, 16, 10, 10])
Sigmoid output shape: torch.Size([1, 16, 10, 10])
AvgPool2d output shape: torch.Size([1, 16, 5, 5])
Flatten output shape: torch.Size([1, 400])
Linear output shape: torch.Size([1, 120])
Sigmoid output shape: torch.Size([1, 120])
Linear output shape: torch.Size([1, 84])
Sigmoid output shape: torch.Size([1, 84])
Linear output shape: torch.Size([1, 10])
###Markdown
AlexNet
###Code
net = nn.Sequential(
# 这里,我们使用一个11*11的更大窗口来捕捉对象。
# 同时,步幅为4,以减少输出的高度和宽度。
# 另外,输出通道的数目远大于LeNet
nn.Conv2d(1, 96, kernel_size=11, stride=4, padding=1), nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2),
# 减小卷积窗口,使用填充为2来使得输入与输出的高和宽一致,且增大输出通道数
nn.Conv2d(96, 256, kernel_size=5, padding=2), nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2),
# 使用三个连续的卷积层和较小的卷积窗口。
# 除了最后的卷积层,输出通道的数量进一步增加。
# 在前两个卷积层之后,汇聚层不用于减少输入的高度和宽度
nn.Conv2d(256, 384, kernel_size=3, padding=1), nn.ReLU(),
nn.Conv2d(384, 384, kernel_size=3, padding=1), nn.ReLU(),
nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Flatten(),
# 这里,全连接层的输出数量是LeNet中的好几倍。使用dropout层来减轻过拟合
nn.Linear(6400, 4096), nn.ReLU(),
nn.Dropout(p=0.5),
nn.Linear(4096, 4096), nn.ReLU(),
nn.Dropout(p=0.5),
# 最后是输出层。由于这里使用Fashion-MNIST,所以用类别数为10,而非论文中的1000
nn.Linear(4096, 10))
###Output
_____no_output_____
###Markdown
BatchNorm
###Code
X = torch.rand((3, 2, 3, 3))
X
X.mean(dim=(0, 2, 3), keepdim=True).shape
net = nn.Sequential(
nn.Conv2d(1, 6, kernel_size=5), nn.BatchNorm2d(6), nn.Sigmoid(),
nn.AvgPool2d(kernel_size=2, stride=2),
nn.Conv2d(6, 16, kernel_size=5), nn.BatchNorm2d(16), nn.Sigmoid(),
nn.AvgPool2d(kernel_size=2, stride=2), nn.Flatten(),
nn.Linear(256, 120), nn.BatchNorm1d(120), nn.Sigmoid(),
nn.Linear(120, 84), nn.BatchNorm1d(84), nn.Sigmoid(),
nn.Linear(84, 10))
###Output
_____no_output_____ |
2a-Baselines-Occutherm.ipynb | ###Markdown
Load TCS Dataset
###Code
df_tcs = pd.read_pickle("data/occutherm/df_feature1.pkl")
del df_tcs['Participant_No'] # original dataset contains participant number
df_tcs_train = pd.read_pickle("data/occutherm/df_feature1_train.pkl")
df_tcs_test = pd.read_pickle("data/occutherm/df_feature1_test.pkl")
dataset_string = "occutherm"
# total count for instances per class: 818
print(df_tcs_train.describe())
###Output
Temperature (Fahrenheit) SkinTemperature ClothingInsulation \
count 1508.000000 1508.000000 1508.000000
mean 71.453707 85.207610 0.558176
std 6.221285 5.362427 0.198067
min 60.070000 62.986781 0.000000
25% 65.599998 81.927500 0.410000
50% 70.199997 85.376000 0.490000
75% 77.634998 88.598001 0.680000
max 85.000000 110.235782 1.070000
Height(cm) Shoulder Circumference(cm) Weight(lbs) Gender \
count 1508.000000 1508.000000 1508.000000 1508.000000
mean 169.909218 109.055637 152.835411 0.443634
std 9.215815 10.985466 30.818397 0.496978
min 151.000000 89.500000 90.000000 0.000000
25% 163.300000 101.600000 126.000000 0.000000
50% 170.000000 106.900000 146.000000 0.000000
75% 176.700000 117.000000 173.000000 1.000000
max 189.000000 132.000000 236.600000 1.000000
Temperature_outside Humidity_outside Discrete Thermal Comfort_TA
count 1508.000000 1508.000000 1508.000000
mean 49.839702 70.359284 -0.257294
std 20.873157 13.296121 0.906428
min 10.510000 33.500000 -2.000000
25% 35.540001 62.000000 -1.000000
50% 47.240002 69.199997 0.000000
75% 69.260002 79.400002 0.000000
max 91.400002 100.000000 2.000000
###Markdown
Classification models on train data (imbalanced)
###Code
acc_rdf, rdf_real_model = train_rdf(df_tcs_train, rdf_depth=fixed_depth, test_size_percentage=test_size_percentage)
print("rdf acc CV: {}".format(acc_rdf))
save_pickle(rdf_real_model, "models/" + dataset_string + "_rdf_reall_full.pkl")
save_pickle(acc_rdf, "metrics/" + dataset_string + "_rdf_reall_full_acc.pkl")
###Output
_____no_output_____
###Markdown
Variability baseline
###Code
variability_baseline_list = []
for _ in range(0, num_trials):
variability_baseline = evaluation_variability(df_tcs_train)
variability_baseline_list.append(variability_baseline)
mean_var_baseline = mean(variability_baseline_list)
print(mean_var_baseline)
save_pickle(mean_var_baseline, "metrics/" + dataset_string + "_variability_baseline.pkl")
###Output
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
Thermal Comfort: 0
Thermal Comfort: -2
Thermal Comfort: 1
Thermal Comfort: -1
Thermal Comfort: 2
52.50489290173811
###Markdown
Diversity baseline
###Code
diversity_baseline_list = []
for _ in range(0, num_trials):
diversity_baseline = evaluation_diversity(df_tcs_train, df_tcs_train, baseline=True)
diversity_baseline_list.append(diversity_baseline)
mean_diversity_baseline = mean(diversity_baseline_list)
print(mean_diversity_baseline)
save_pickle(mean_diversity_baseline, "metrics/" + dataset_string + "_diversity_baseline.pkl")
###Output
1.8966552371208951
###Markdown
Quality of the final classification
###Code
class_acc_test, class_acc_train, class_models, class_report_rdf = evaluation_classification(df_tcs_train,
df_tcs_test,
rdf_depth=fixed_depth,
depth_file_name='default',
test_size_percentage=test_size_percentage)
print(class_acc_test)
final_classification_rdf = class_acc_test[3]
save_pickle(final_classification_rdf, "metrics/" + dataset_string + "_rdf_classification_baseline.pkl")
save_pickle(class_report_rdf, "label-metrics/" + dataset_string + "_class_report_baseline_trials.pkl")
###Output
_____no_output_____ |
teaching_material/module_4/module_4_slides.ipynb | ###Markdown
.rendered_html * + ul { margin-top: 0.5em;} div.text_cell_render { padding: 0.0em 0.0em 0.0em 0.0em;} .reveal p { margin: 20px 10; line-height: 1.3;} html, body, .reveal div, .reveal span, .reveal applet, .reveal object, .reveal iframe, .reveal h1, .reveal h2, .reveal h3, .reveal h4, .reveal h5, .reveal h6, .reveal p, .reveal blockquote, .reveal pre, .reveal a, .reveal abbr, .reveal acronym, .reveal address, .reveal big, .reveal cite, .reveal code, .reveal del, .reveal dfn, .reveal em, .reveal img, .reveal ins, .reveal kbd, .reveal q, .reveal s, .reveal samp, .reveal small, .reveal strike, .reveal strong, .reveal sub, .reveal sup, .reveal tt, .reveal var, .reveal b, .reveal u, .reveal center, .reveal dl, .reveal dt, .reveal dd, .reveal ol, .reveal ul, .reveal li, .reveal fieldset, .reveal form, .reveal label, .reveal legend, .reveal table, .reveal caption, .reveal tbody, .reveal tfoot, .reveal thead, .reveal tr, .reveal th, .reveal td, .reveal article, .reveal aside, .reveal canvas, .reveal details, .reveal embed, .reveal figure, .reveal figcaption, .reveal footer, .reveal header, .reveal hgroup, .reveal menu, .reveal nav, .reveal output, .reveal ruby, .reveal section, .reveal summary, .reveal time, .reveal mark, .reveal audio, .reveal video { margin-bottom: -1px;} div.text_cell_render { padding: 0em 0em 0.5em 0.0em;} Session 4: Intro to Visualization*Joachim Kahr Rasmussen* Recap (I/II)*OK, so I have a collection of data that I want to analyze. How to get my data ready for analysis?* If your data comes in different subsets:- Using `merge`: Combining through one or multiple keys- Using `concat` or `join`: Combining though index- Inner join? Outer join? Left join? Might create missings. Think about how to deal with missings or duplicates:- Missings: Should these be dropped (`.dropna()`) or imputed (`.fillna()`)?- Duplicates: What is a duplicate really? And should they be dropped (`.drop_duplicates()`)? Think about whether your data has the right shape:- Wide format or long format? Use `.stack()` or `.unstack()` Recap (II/II)*How do I learn something about specific groups in the data?* Use *split-apply-combine* framework to make group-specific computations:- Leverages the `.groupby()` method- Allows computation of mean, standard deviation, median, etc. Flexibility of *split-apply-combine* framework:- Can apply categories generated from multiple subcategories- Can make computations on multiple variables on the same time- Can apply multiple functions using the `.agg()` method How to get group-specific computations back onto original dataframe?- merge on keys?- `.transform()` Overview of Session 4Today, we will work with how one can do plotting in Python. In particular, we will cover:1. Understanding Plotting (live) - Why we plot - Why are you plotting? - How should you plot?2. Plotting in Python: Packages and Grammar (live) - Intro to `matplotlib` and `seaborn` - The "Grammar of Graphics"3. Plotting the Tips Data (video + notebook) - Plots for one variable - Numeric data - Categorical - Plots for two or more variables - Numeric data - Mixed numerica and categorical data - Advanced exploratory plotting Associated Readings Wickham (2010), sections 1-3- Fundamentals of a plotting- "Grammar of Graphics" PDA, chapter 9:- Basic syntax and fundamental concepts with matplotlib- Combining matplotlib with pandas and using seaborn package Moffitt (2017):- Strengths and weaknesses of matplotlib- Intro to `figure` and `axes`- Using functions in order to improve formatting Understanding Plotting *What are we plotting?* In the last sessions, we worked with generating, cleaning and making operations on data using pandas.- When we plot, we essentially want to make a *visual* and *digestable* representation of these data.! *What are some guidelines on making plots in **general**?* Be aware of *what* you plot- numerical vs. non-numeric (categorical)- raw data vs. model results vs. both (be clear!) Why We PlotSomeone should gain something from the plot...An English adage> A picture is worth a thousand wordsIs that always the case? What Values Do A,B,C and D Have? The Shocking Answer Why Are You Plotting?*Who's the audience?* You / your team: - **Exploratory** plots: Figures for understanding data - Quick to produce $\sim$ minimal polishing - Interesting feature may by implied by the producer - Be careful showing these out of context Others: - **Explanatory** plots: Figures to convey a message - Polished figures - Direct attention to interesting feature in the data - Minimize risk of misunderstanding How Should You Plot?*What are some tips for making **explanatory** plots in a report?* ***(Exam relevant!)*** - Clear narratives - should convey key point(s) - If you want to show difference between groups in data make sure it is easy to distinguish them. - Self explanatory - Contain axis label, title, footnotes in text containing relevant information. - Nice appereance - Choose the right plot type. - Make sure font type, size, colors, line width work together. - Keep simplicity. - Anything unnecessary should be removed, see [this post](https://www.darkhorseanalytics.com/blog/data-looks-better-naked/). *Some practical pieces advice on making **explanatory** plots?* 1. Try out a few plot types, using exploratory analysis - use what works.1. Apply the *layered grammer of graphics*. - Start with an empty canvas - Fill the necessary things (axis, ticks, bars/lines, labels) Plotting in Python: Packages and Grammar How Are You Plotting?There are two overall approaches to plotting:- make a fast, decent figure - iteratively adjust if necessary - start out in `seaborn` continue to `matplotlib`- from empty canvas to figure - iteratively add material and layers - performed in `matplotlib` Packages for Python Plotting (I/II)*What is the fundamental tool for making plots in Python?* **Matplotlib** is the fundamental plotting module- Can make almost any 2d plot.- Can build publication ready figures.- Caveat: - requires time consuming customization (a bit like excel, but with a script!); - requires practice. Packages for Python Plotting (II/II)*What are good tools for fast, exploratory plots?* `seaborn` has built-in capabilities to make plots- Analyzing data, e.g. splitting by subsets- Make interpolation of data to smooth out noise.`pandas` can easily convert Series and DataFrames to plots (you just tried that) Videos and ExercisesNow proceed to the notebook with videos and exercises, where you will first learn more about the structure of a good plot. Then we proceed and go through a lot of different plot types for exploratory and explanatory plotting.The structure of the notebook is as follows:1. The Layered Grammar of Graphics2. Plotting One Variable (Exploratory Plotting)3. Plotting Multiple Variables - Plotting Two Numeric Variables - Plotting Mixed Variables (Numeric and Categorical) - Exercises with the Data from Seaborn4. Final Pieces of Advice on Plotting VIDEO 4.1: The Grammar of Graphics Loading stuff
###Code
# Loading libraries
import numpy as np
import pandas as pd # For data structuring
import matplotlib.pyplot as plt # For plotting
import seaborn as sns # Add-on toolkit for plt
# allow printing in notebook
%matplotlib inline
# Ignore some annoying warnings
import warnings
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
Matplotlib and the Grammar of Graphics (I/IV)*Where do I start when making a plot?* We will begin with the fundamental and flexible way. We start with our plotting canvas.
###Code
fig, ax = plt.subplots(figsize = (7, 3)) # create placeholder for plot
###Output
_____no_output_____
###Markdown
`fig` and `ax` are interrelated, but it is important to distinguish the two from each other:- `ax` contains most of the chart content as objects: - grid axes, labels, shapes we draw etc.- `fig` the actual plot which is displayed (export to pdf etc.) Matplotlib and the Grammar of Graphics (II/IV)*Is there a way to change the fundamental style of the plot?* Yes, you can set a plotting style. Usually, however, you will not set this explicitly. There are lots of styles...
###Code
print(plt.style.available)
###Output
['Solarize_Light2', '_classic_test_patch', 'bmh', 'bright', 'classic', 'dark_background', 'fast', 'fivethirtyeight', 'ggplot', 'grayscale', 'grid', 'high-contrast', 'high-vis', 'ieee', 'light', 'muted', 'no-latex', 'notebook', 'pgf', 'retro', 'scatter', 'science', 'seaborn', 'seaborn-bright', 'seaborn-colorblind', 'seaborn-dark', 'seaborn-dark-palette', 'seaborn-darkgrid', 'seaborn-deep', 'seaborn-muted', 'seaborn-notebook', 'seaborn-paper', 'seaborn-pastel', 'seaborn-poster', 'seaborn-talk', 'seaborn-ticks', 'seaborn-white', 'seaborn-whitegrid', 'std-colors', 'tableau-colorblind10', 'vibrant']
###Markdown
Can recomment `ggplot` or simply `default` as styles. Matplotlib and the Grammar of Graphics (III/IV)*Are there any other defaults, that can be changed?* A lot. With `plt.rc()`, we can change all sorts of default plotting styles. Consider the following:
###Code
plt.style.use('default') # set style (colors, background, size, gridlines etc.) # ggplot, default
plt.rc('figure', figsize=(6, 3)) # set default size of plots
font_options = {'family' : 'monospace', # define default font options
'weight' : 'bold',
'size' : 12}
plt.rc('font', **font_options) # set default font options
###Output
_____no_output_____
###Markdown
Matplotlib and the Grammar of Graphics (IV/IV)*Now, let's take a look at our canvas*
###Code
fig, ax = plt.subplots() # recreate placeholder for plot
###Output
_____no_output_____
###Markdown
Plotting Something on Our Canvas (I/IV)Now, we want to plot something on our canvas! Luckily, Seaborn comes with some illustrative datasets. We load `tips` and explore it a bit...
###Code
tips = sns.load_dataset('tips')
print('Number of rows:',len(tips),'\n')
print(tips.head(5))
###Output
Number of rows: 244
total_bill tip sex smoker day time size
0 16.99 1.01 Female No Sun Dinner 2
1 10.34 1.66 Male No Sun Dinner 3
2 21.01 3.50 Male No Sun Dinner 3
3 23.68 3.31 Male No Sun Dinner 2
4 24.59 3.61 Female No Sun Dinner 4
###Markdown
Plotting Something on Our Canvas (II/IV)We will now draw plots of the tips data on the canvas. Let's plot the *numeric* variable `total_bill`:
###Code
tb = tips['total_bill']
fig, ax = plt.subplots()
ax.hist(tb)
ax.plot()
###Output
_____no_output_____
###Markdown
Plotting Something on Our Canvas (III/IV)Let's make some additional variable-specific customization:
###Code
props = {
'title': 'Distribution of bill size',
'xlabel': 'Total bill ($)',
'ylabel': 'Count',
'xlim': [0, 60]
}
###Output
_____no_output_____
###Markdown
Plotting Something on Our Canvas (IV/IV)And display:
###Code
fig, ax = plt.subplots()
ax.set(**props)
ax.hist(tb)
ax.plot()
###Output
_____no_output_____
###Markdown
VIDEO 4.2: Plotting One Variable The Kernel Density Plot (I/IV)Let's now try with Seaborn and no customization:
###Code
sns.distplot(tb,hist=True)
###Output
_____no_output_____
###Markdown
The Kernel Density Plot (II/IV)Quite useful, right? Let's customize this a bit too...
###Code
ax = sns.distplot(tb,hist=True)
ax.set(xlabel='Total bill ($)')
sns.despine()
###Output
_____no_output_____
###Markdown
The Kernel Density Plot (III/IV)We can also easily plot the cumulative distribution. Customization...
###Code
ax = sns.distplot(tb, hist_kws={'cumulative': True}, kde_kws={'cumulative': True})
ax.set(xlabel='Total bill ($)', ylabel='CDF')
sns.despine()
###Output
_____no_output_____
###Markdown
The Kernel Density Plot (IV/IV)There are still many things that we can play around with such as...- Thickness?- Color? - Showing raw data in different bins? - Subgroups? (exercise)Try and play around with this when you have time! Plotting One Categorical Variable (I/II)Before, we plotted the *distribution* of a *numeric* variable. Suppose we have data on gender. What does the distribution look like in the data?Pie chart? Unfortunately, not possible with Seaborn...
###Code
sns.countplot(x='sex', data=tips)
###Output
_____no_output_____
###Markdown
Plotting One Categorical Variable (II/II)That was not very informative. You might as well just present the actual numbers.Luckily, this works for `matplotlib`:
###Code
sizes = tips.groupby('sex')['sex'].count() # Get size of different groups
fig, ax = plt.subplots()
ax.pie(sizes, labels=['Male', 'Female'], autopct='%1.2f%%') # Make pie representation
plt.show()
###Output
_____no_output_____
###Markdown
Wrapping Up on Tools This FarHow did our tools perform? - Matplotlib is good for customization (explanatory plots)- Seaborn and Pandas are good quick and dirty plots (exploratory)Certain things are cumbersome in one package and straighforward in another! VIDEO 4.3: Plotting Two Numeric Variables Two Numeric Variables (I/IX)*Now, how do we plot two numeric variables?* If we do not have too many observations, we can make a point cloud, i.e. a scatter plot.
###Code
fig, ax = plt.subplots(figsize=(10, 3))
ax.scatter(x=tips['total_bill'], y=tips['tip'])
ax.set(xlabel='Total bill ($)', ylabel='Tips ($)')
###Output
_____no_output_____
###Markdown
Two Numeric Variables (II/IX)*What happens if we do have 'too' many observations?* Simulate some data...
###Code
X = np.random.normal(0, 1, size=(2*10**4, 1))
Y = 2*X+0.5*np.random.normal(0, 1, size=(2*10**4, 1))
data = np.concatenate((Y,X),axis=1)
df= pd.DataFrame(data, columns=['Y','X'])
###Output
_____no_output_____
###Markdown
Two Numeric Variables (III/IX)... and display!
###Code
fig, axes = plt.subplots(2, 2, sharex=True, sharey=True)
sizes=0
for i in range(0,2):
for j in range(0,2):
sizes=sizes+1
axes[i, j].scatter(x=df['X'][:2*10**sizes], y=df['Y'][:2*10**sizes])
plt.subplots_adjust(wspace=0.05, hspace=0.15)
###Output
_____no_output_____
###Markdown
Two Numeric Variables (IV/IX)If you insist on making a scatter plot, you can change the size of the scatter points...
###Code
fig, axes = plt.subplots(2, 2, sharex=True, sharey=True)
sizes=0
for i in range(0,2):
for j in range(0,2):
sizes=sizes+1
axes[i, j].scatter(x=df['X'][:2*10**sizes], y=df['Y'][:2*10**sizes], s=10**1.5/(10**(sizes-1)))
plt.subplots_adjust(wspace=0.05, hspace=0.15)
###Output
_____no_output_____
###Markdown
Two Numeric Variables (V/IX)And you can also tweek the opacity:
###Code
fig, axes = plt.subplots(2, 2, sharex=True, sharey=True)
sizes=0
for i in range(0,2):
for j in range(0,2):
sizes=sizes+1
axes[i, j].scatter(x=df['X'][:2*10**sizes], y=df['Y'][:2*10**sizes],
s=10**1.5/(10**(sizes-1)), alpha=0.2**((sizes-1)/2))
plt.subplots_adjust(wspace=0.05, hspace=0.15)
###Output
_____no_output_____
###Markdown
Two Numeric Variables (VI/IX)*How might we alter the scatter plot?* We can interpolate the data and jointly plot the marginal and joint distribution:
###Code
ax = sns.jointplot(x='total_bill', y='tip', data=tips, kind='kde', size=3) # hex, reg, resid
ax.set_axis_labels('Total bill ($)', 'Tips ($)')
###Output
_____no_output_____
###Markdown
Two Numeric Variables (VII/IX) We can also plot the distribution with bars and hexagons for a different visiual representation!
###Code
ax = sns.jointplot(x='total_bill', y='tip', data=tips, kind='hex', size=3) # kde, reg, resid
ax.set_axis_labels('Total bill ($)', 'Tips ($)')
###Output
_____no_output_____
###Markdown
Two Numeric Variables (VIII/IX)In spite being fairly slow, this can be particularly useful with moderately large data sets:
###Code
sizes=4
sns.jointplot(x=df['X'][:2*10**sizes], y=df['Y'][:2*10**sizes], kind='kde', size=4) # hex
###Output
_____no_output_____
###Markdown
Two Numeric Variables (IX/IX)*What if we want to see the linear relationship?* We use the linear model plot:
###Code
ax = sns.lmplot(x='total_bill', y='tip', data=tips, size=3, aspect=2.5)
ax.set(xlabel='Total bill ($)', ylabel='Tips ($)')
###Output
_____no_output_____
###Markdown
VIDEO 4.4: Plotting Mixed Variables Mixed: Categorical and Numeric Variables (I/VI)*How might we use categorical variables?* - We can split data and make plots based on subsets of data! Mixed: Categorical and Numeric Variables (II/VI)*Can we say anything about gender-specific tipping behavior?* - One simple way of getting an idea of the core traits of your data is to use the `catplot`
###Code
ax = sns.catplot(x="sex", y="tip", kind="swarm", data=tips, size=3)
ax.set(xlabel='Sex', ylabel='Tips ($)')
###Output
_____no_output_____
###Markdown
Mixed: Categorical and Numeric Variables (III/VI)We can add a third dimension to this...
###Code
ax = sns.catplot(x="sex", y="tip", hue="time", kind="swarm", data=tips, size=3)
ax.set(xlabel='Sex', ylabel='Tips ($)')
###Output
_____no_output_____
###Markdown
Mixed: Categorical and Numeric Variables (IV/VI)And even utilize a kernel to smooth the (conditional) distributions!
###Code
ax = sns.violinplot(x='time', y='tip', data=tips, hue='sex')
ax.set(xlabel='Time', ylabel='Tips ($)')
###Output
_____no_output_____
###Markdown
Mixed: Categorical and Numeric Variables (V/VI)We can also directly assess whether the mean tipping behavior is different conditional on tipping
###Code
ax = sns.barplot(x='time', y='tip', data=tips, hue='sex')
ax.set(xlabel='Time', ylabel='Tips ($)')
###Output
_____no_output_____
###Markdown
Mixed: Categorical and Numeric Variables (VI/VI)Now, combining two continuous variables with one categorical
###Code
ax = sns.lmplot('total_bill', 'tip', hue='sex', data=tips, size=3)
ax.set(xlabel='Total Bill ($)', ylabel='Tips ($)')
###Output
_____no_output_____
###Markdown
Advanced exploratory plotting *How can we plot the relationship for more than two numeric variables?*
###Code
sns.pairplot(tips, height=2.3, size=1.4) # make hist and scatter for all
###Output
_____no_output_____ |
openbb_terminal/jupyter/reports/similar_analysis.ipynb | ###Markdown
Similar companies and descriptions
###Code
from openbb_terminal.stocks.fundamental_analysis import yahoo_finance_model
df_info = yahoo_finance_model.get_info(ticker)
if "Long business summary" in df_info.index:
summary = df_info.loc["Long business summary"].values[0]
if "Website" in df_info.index:
website = df_info.loc["Website"].values[0]
if finviz_similar:
print(f"{ticker}: {website}")
print(summary)
for symbol in finviz_similar:
df_info = yahoo_finance_model.get_info(symbol)
if "Long business summary" in df_info.index:
summary = df_info.loc["Long business summary"].values[0]
if "Website" in df_info.index:
website = df_info.loc["Website"].values[0]
print("")
print(f"{symbol}: {website}")
print(summary)
###Output
_____no_output_____
###Markdown
Historical prices
###Code
import math
from openbb_terminal.stocks.comparison_analysis import yahoo_finance_view
if finviz_similar and finviz_similar != [""]:
for i in range(math.ceil(len(finviz_similar) / 4)):
yahoo_finance_view.display_historical(
similar_tickers=finviz_similar[4 * (i) : 4 * (i + 1)],
)
else:
print("Ticker not found in CoinGeckoAPI")
###Output
_____no_output_____
###Markdown
Historical correlation
###Code
from matplotlib import pyplot as plt
if finviz_similar and finviz_similar != [""]:
plt.figure(figsize=(25, 10))
yahoo_finance_view.display_correlation(
similar_tickers=finviz_similar,
)
else:
print("Ticker not found in CoinGeckoAPI")
###Output
_____no_output_____
###Markdown
Historical volumes
###Code
if finviz_similar and finviz_similar != [""]:
for i in range(math.ceil(len(finviz_similar) / 4)):
yahoo_finance_view.display_volume(
similar_tickers=finviz_similar[4 * (i) : 4 * (i + 1)],
)
else:
print("Ticker not found in CoinGeckoAPI")
###Output
_____no_output_____
###Markdown
Overview
###Code
from openbb_terminal.stocks.comparison_analysis import finviz_compare_view
if finviz_similar and finviz_similar != [""]:
finviz_compare_view.screener(
similar=finviz_similar,
data_type="overview",
)
else:
print("Ticker not found in CoinGeckoAPI")
###Output
_____no_output_____
###Markdown
Valuation
###Code
from openbb_terminal.stocks.comparison_analysis import finviz_compare_view
if finviz_similar and finviz_similar != [""]:
finviz_compare_view.screener(
similar=finviz_similar,
data_type="valuation",
)
else:
print("Ticker not found in CoinGeckoAPI")
###Output
_____no_output_____
###Markdown
Financial
###Code
from openbb_terminal.stocks.comparison_analysis import finviz_compare_view
if finviz_similar and finviz_similar != [""]:
finviz_compare_view.screener(
similar=finviz_similar,
data_type="financial",
)
else:
print("Ticker not found in CoinGeckoAPI")
###Output
_____no_output_____
###Markdown
Ownership
###Code
from openbb_terminal.stocks.comparison_analysis import finviz_compare_view
if finviz_similar and finviz_similar != [""]:
finviz_compare_view.screener(
similar=finviz_similar,
data_type="ownership",
)
else:
print("Ticker not found in CoinGeckoAPI")
###Output
_____no_output_____
###Markdown
Performance
###Code
from openbb_terminal.stocks.comparison_analysis import finviz_compare_view
if finviz_similar and finviz_similar != [""]:
finviz_compare_view.screener(
similar=finviz_similar,
data_type="performance",
)
else:
print("Ticker not found in CoinGeckoAPI")
###Output
_____no_output_____
###Markdown
Technical
###Code
from openbb_terminal.stocks.comparison_analysis import finviz_compare_view
if finviz_similar and finviz_similar != [""]:
finviz_compare_view.screener(
similar=finviz_similar,
data_type="technical",
)
else:
print("Ticker not found in CoinGeckoAPI")
!jupyter nbconvert {report_name + ".ipynb"} --to html --no-input
###Output
_____no_output_____ |
notebooks/index-geojson/Index-geojson.ipynb | ###Markdown
This script creates a table from coordinates to corresponding tif images where you can find those coordinates and will look for the catalog IDs of those which are missing that information Steps: 1. Loads the tomnod geojson file and tifRange file 2. Cleans the list of catalog IDs into a column called 'complete_catalog_id' 3. Creates a reference table for the damage points tif file
###Code
import gdal
import geopandas as gpd
import os
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
load the tomnod geojson file, TOMNOD = GEOJSON & tifRange file
###Code
tomnod = gpd.read_file("/Users/tessaschneider/Projects/Dcubed/Indexgeojson/data/digitalglobe_crowdsourcing_hurricane_harvey_20170915.geojson")
tifRange = pd.read_csv("/Users/tessaschneider/Projects/Dcubed/Indexgeojson/data/tifRange-tiles-run-1.csv", header = None, names = ['tif_id', 'minxy','maxxy'])
tomnod
###Output
_____no_output_____
###Markdown
splitting coordinates to different callable variables
###Code
tomnod_x = tomnod['geometry'].x
tomnod_y = tomnod['geometry'].y
tomnod['tomnod_x'] = tomnod_x
tomnod['tomnod_y'] = tomnod_y
###Output
_____no_output_____
###Markdown
convert the lat lng tuple into individual floats
###Code
def process_tup(tup):
return [float(ele) for ele in (tup.strip('()').split(','))]
###Output
_____no_output_____
###Markdown
get lat lng range of catalog_id (corner points), iterate over the tifs in order to get the catalog's range
###Code
tifRange['tif_id']
###Output
_____no_output_____
###Markdown
known catalogs that exist in the data set
###Code
POST_EVENT_CATALOG = ['105001000B95E200', '105001000B95E100', '1040010032211E00']
tifRange.iloc[3]['tif_id']
###Output
_____no_output_____
###Markdown
###Code
tomnod['tif_id'] = ""
for index_tomnod, row_tomnod in tomnod.iterrows():
if index_tomnod % 1 == 0:
print('tomnod row: ',index_tomnod)
for index_tif, row_tif in tifRange.iterrows():
# print(row_tif.loc['tif_id'])
# for file in os.listdir('image_tiles/'):
# if file.endswith('.tif'):
# minmax = get_range_tif('image_tiles/' + file)
# minxy = minmax[0]
# maxxy = minmax[1]
minxy = process_tup(row_tif["minxy"])
maxxy = process_tup(row_tif["maxxy"])
if minxy[0] <= row_tomnod['tomnod_x'] <= maxxy[0] \
and minxy[1] <= row_tomnod['tomnod_y'] <= maxxy[1]:
if tomnod.at[index_tomnod,'tif_id'] == "":
tomnod.at[index_tomnod,'tif_id'] = row_tif["tif_id"]
print ('yaaas')
elif tomnod.at[index_tomnod,'tif_id'] != "":
tomnod = tomnod.append(tomnod.iloc[index_tomnod], ignore_index=True)
tomnod.at[index_tomnod,'tif_id'] = row_tif["tif_id"]
# tomnod.sort_values('tif_id').head(10)
# tomnod[-tomnod['tif_id'].isnull()]
# type(tomnod.loc[10,'tif_id']) == float
tomnod[tomnod.id == '214149-59']
###Output
_____no_output_____
###Markdown
to add a small sample set of the tomnod geojson for testing
###Code
tomnod[tomnod["tif_id"] != ""].to_file('coordinateandtif.geojson', driver="GeoJSON")
tomnod[tomnod["tif_id"] != ""]["tif_id"].to_csv("list.txt")
###Output
_____no_output_____
###Markdown
to remove the index values in the txt file list of small sample set
###Code
len(tomnod[tomnod["tif_id"] != ""]["tif_id"].unique())
###Output
_____no_output_____
###Markdown
define where to save output list of sample set by tif_id
###Code
np.savetxt("list.txt", tomnod[tomnod["tif_id"] != ""]["tif_id"].unique(), fmt = "%s")
###Output
_____no_output_____
###Markdown
###Code
tomnod.append(tomnod[tomnod.id == '214149-59'], ignore_index=True)
#tomnod.iloc[0]
# tomnod.to_csv('coordinateAndTif.csv', encoding='utf-8')
tomnod.to_file('coordinateandtif.geojson', driver="GeoJSON")
###Output
_____no_output_____
###Markdown
check output file
###Code
tomnod = gpd.read_file("/Users/tessaschneider/Projects/Dcubed/Indexgeojson/notebooks/coordinateandtif.geojson")
###Output
_____no_output_____ |
Workspace_of_272_iNat_Final_Project_(TF_GPU).ipynb | ###Markdown
###Code
import json
import requests
import os
from tqdm import tqdm
from joblib import Parallel, delayed
classif_dict = {56061: "Alliaria petiolata", 55830: "Glechoma hederacea", 130751: "Rubus dalibarda"}
# classif_dict = {205875: "Pteridium aquilinum pseudocaudatum", 210269: "Pteridium aquilinum latiusculum"}
ims_dict = {}
for id, sp in classif_dict.items():
o_url = f"https://api.inaturalist.org/v1/observations?quality_grade=research&identifications=any&place_id=any&taxon_id={id}&verifiable=true&per_page=200"
r = requests.get(o_url)
obs = r.text
try:
obs = json.loads(obs)
except:
print(id, sp)
total_results = obs["total_results"]
if total_results < 200:
pages = 1
elif total_results % 200 != 0:
pages = total_results // 200 + 1
else:
pages = total_results / 200
ims = []
for page in range(1, pages + 1):
url = f'{o_url}&page={page}'
r = requests.get(url)
obs = r.text
try:
obs = json.loads(obs)
except:
continue
try:
for r in obs['results']:
for im in r["photos"]:
ims.append(im["url"].replace("square", "large"))
except KeyError:
pass
ims_dict[sp] = ims
try:
os.makedirs(f"data/{sp}")
except FileExistsError:
pass
#min_ims = min([len(a) for a in ims_dict.values()])
min_ims = max([len(a) for a in ims_dict.values()])
for sp, ims in ims_dict.items():
def down_ims(idx, im):
if idx >= min_ims:
return
response = requests.get(im)
file = open(f"data/{sp}/{idx}.jpg", "wb")
file.write(response.content)
file.close()
Parallel(n_jobs=os.cpu_count())(delayed(down_ims)(idx, im) for idx, im in tqdm(enumerate(ims), total=min_ims))
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, GlobalAveragePooling2D
from tensorflow.keras.models import Model
import numpy as np
import os
import PIL
import PIL.Image
image_r = 331
bs = 8
model = tf.keras.applications.nasnet.NASNetLarge(
input_shape=(image_r, image_r, 3), weights='imagenet', include_top=False
)
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3)
# flat1 = GlobalAveragePooling2D()(model.layers[-1].output)
# output = Dense(2, activation='softmax')(flat1)
flat1 = Flatten()(model.layers[-1].output)
class1 = Dense(2048, activation='relu')(flat1)
class1 = Dense(512, activation='relu')(class1)
output = Dense(len(list(classif_dict.items())), activation='softmax')(class1)
model = Model(inputs=model.inputs, outputs=output)
train_ds = tf.keras.utils.image_dataset_from_directory(
"data/",
validation_split=0.1,
subset="training",
seed=123,
image_size=(image_r, image_r),
batch_size=bs,
label_mode='categorical')
val_ds = tf.keras.utils.image_dataset_from_directory(
"data/",
validation_split=0.1,
subset="validation",
seed=123,
image_size=(image_r, image_r),
batch_size=bs,
label_mode='categorical')
model.compile(
optimizer='sgd',
loss=tf.losses.BinaryCrossentropy(),
metrics=['accuracy'])
callback = tf.keras.callbacks.EarlyStopping(monitor='val_acc', patience=10)
mc = tf.keras.callbacks.ModelCheckpoint(
"model_cp",
monitor="val_acc",
verbose=0,
save_best_only=True,
mode="auto",
save_freq="epoch"
)
model.fit(
train_ds, validation_data=val_ds, batch_size=bs,
epochs=5,
callbacks=[],
shuffle=True
)
model.save("model")
! pip install lime
! pkill -f "python"
from lime import lime_image
from skimage.segmentation import mark_boundaries
import tensorflow as tf
from tensorflow.keras.models import load_model
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
model = load_model("model")
print(model.summary())
im = Image.open("large.jpeg")
im = im.resize((331, 331))
im = np.array(im)
im = np.array([im])
im = im / 255
print(im.shape)
print(model.predict(im))
explainer = lime_image.LimeImageExplainer()
explanation = explainer.explain_instance(im[0].astype('double'), model.predict,
top_labels=2, hide_color=0, num_samples=5000)
temp_1, mask_1 = explanation.get_image_and_mask(explanation.top_labels[0], positive_only=True, num_features=5, hide_rest=True)
temp_2, mask_2 = explanation.get_image_and_mask(explanation.top_labels[0], positive_only=False, num_features=10, hide_rest=False)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15,15))
ax1.imshow(mark_boundaries(temp_1, mask_1))
ax2.imshow(mark_boundaries(temp_2, mask_2))
ax1.axis('off')
ax2.axis('off')
plt.show()
###Output
_____no_output_____ |
examples/Isosurface.ipynb | ###Markdown
Using dicom2stl.py to extract an iso-surface from a volume This notebook gives a basic introduction to using the `'dicom2stl.py'` script to extract an iso-surface from a volume image.
###Code
import os, sys
# download dicom2stl if it's not here already
if not os.path.isdir('dicom2stl'):
!{'git clone https://github.com/dave3d/dicom2stl.git'}
# Get the latest version
!{'cd dicom2stl; git pull'}
# Install required packages
!{sys.executable} -m pip install SimpleITK
!{sys.executable} -m pip install vtk
!{sys.executable} -m pip install itkwidgets
###Output
_____no_output_____
###Markdown
Create a test volume that is 4 Gaussian blobs arranged in a tetrahedron
###Code
from dicom2stl.tests import create_data
tetra = create_data.make_tetra()
###Output
_____no_output_____
###Markdown
Display the tetra volume using [ITK Widgets](https://github.com/InsightSoftwareConsortium/itkwidgets)
###Code
import itkwidgets
itkwidgets.view(tetra, cmap='Grayscale', vmin=100)
###Output
_____no_output_____
###Markdown
Write the tetra volume to a file
###Code
import SimpleITK as sitk
sitk.WriteImage(tetra, "tetra.nii.gz")
###Output
_____no_output_____
###Markdown
Show the command line options for dicom2stl.py
###Code
!{'./dicom2stl/dicom2stl.py -h'}
###Output
_____no_output_____
###Markdown
Extract an iso-surface from the tetra volumeThe `'-i'` flag tells the script the intensity value to use for the iso-surface, `150` in this case. The `'-o'` flag specifies the output file, `tetra.stl`. The script can output STL, VTK or PLY files. And `tetra.nii.gz` is input volume.
###Code
!{'./dicom2stl/dicom2stl.py -i 150 -o tetra.stl tetra.nii.gz'}
###Output
_____no_output_____
###Markdown
Load the mesh
###Code
from dicom2stl.utils import vtkutils
mesh = vtkutils.readMesh('tetra.stl')
###Output
_____no_output_____
###Markdown
Display the mesh with the volume
###Code
itkwidgets.view(tetra, cmap='Grayscale', geometries=[mesh], vmin=100)
###Output
_____no_output_____ |
jupyter_notebooks/4_State_Estimation/3_Extended_Kalman_Filters/EKF/Sympy Demonstration.ipynb | ###Markdown
If you ever don't feel like taking derivatives, you can use a Python library called `sympy` to do the dirty work.When we have a $g$ function like this:$$g = \begin{bmatrix}u_{\phi} \\\dot{y} - \sin(\phi) \Delta t \\y + \dot{y} \Delta t\end{bmatrix}$$and a state vector like this:$$x = \begin{bmatrix}\phi \\\dot{y} \\y\end{bmatrix}$$(Note that I'm writing $\phi$ here instead of $x_{\phi}$. Like wise with $\dot{y}$ and $y$)we can use sympy to calculate $g'$ as follows:
###Code
# 1. define sympy symbols
u_phi, phi, y_dot, y, dt = sympy.symbols(
'u_phi, phi, y_dot, y, dt')
# 2. define the state variable
x = sympy.Matrix([
phi,
y_dot,
y])
# 3. define state transition function
g = sympy.Matrix([
u_phi,
y_dot - sympy.sin(phi) * dt,
y + y_dot * dt
])
# 4. take jacobian of g with respect to x
g.jacobian(x)
###Output
_____no_output_____ |
CoTS_MR.ipynb | ###Markdown
Joint mark-recapture CPUE model for crown of thorns abundance estimates on the GBR One of the largest gaps in understanding crown of thorns starfish (CoTS) population dynamics is the lack of information concerning their abundance at any given point in time. Here we develop a joint Bayesian hierarchial model for estimating the detectabilty of CoTS adults that will be subsequently used to integrate datasets and improve CoTS population estimates in the Cairns sector of the GBR. The Bayesian hierarchical model models we're going to build will be done using the [PyMC](http://pymc-devs.github.io/pymc/) package for the [Python](https://www.python.org) programming language. Both are open-source and freely accessable. Data wranglingThe first step is to import the required python packages:
###Code
# Import packages
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy as sp
import pymc as pm
import matplotlib as mp
import seaborn as sns
from mpl_toolkits.basemap import Basemap as bm
from scipy.stats import gaussian_kde
import sqlite3
import os
###Output
_____no_output_____
###Markdown
And a few custom scripts
###Code
# Helper functions
def indexall(L):
poo = []
for p in L:
if not p in poo:
poo.append(p)
Ix = np.array([poo.index(p) for p in L])
return poo,Ix
def subindexall(short,long):
poo = []
out = []
for s,l in zip(short,long):
if not l in poo:
poo.append(l)
out.append(s)
return indexall(out)
match = lambda a, b: np.array([ b.index(x) if x in b else None for x in a ])
###Output
_____no_output_____
###Markdown
The next step is to import the mark-recapture data:
###Code
# Import mark-recapture data
xdata = pd.read_csv('CoTS_MR_data.csv')
# Column names
xdata.columns.values
###Output
_____no_output_____
###Markdown
Looking at these data we can see a number of covariates, including:1. *id* - identifier number for individual CoTS (CAUTION: duplicated among sites)2. *dia* - the diameter of each CoTS in cm; some of which are unobserved and given the value -9993. *reef* - the individual reef surveyed4. *site* - individual site id within each reef, corresponding to a 5x50 m fixed area5. *transect* - replicate number (*k*) for each site6. *observer* - dive team member initials (2 people)7. *night* - dummy variable indicating dive done at night8. *depth* - depth of survey in m9. *hc* - percent hard coral cover within site10. *habitat* - habitat type surveyed (crest, slope etc.) Now that the data are imported we can get them into the shape we need to model them with. The first step is to get the total number of starfish observed among all reefs:
###Code
max(xdata.hc.values)
# Number of reefs*sites*id
recid = np.array([r+'_'+str(s)+'_'+str(i) for r,s,i in zip(xdata.reef.values,xdata.site.values,xdata.id.values)])
ireefsiteid = np.unique(recid)
nrsi = len(ireefsiteid)
nrsi
###Output
_____no_output_____
###Markdown
Next the maximum number of capture occasions (transects) per site:
###Code
# Number of capture occasions
K = 6
###Output
_____no_output_____
###Markdown
Next we'll need to build an empy capture history matrix for the data we have observed, which is a `T*K` matrix, where `T` is the total number of individuals obsered on each unique site:
###Code
# Empty capture history array
Yobs = np.zeros(K*nrsi).reshape(nrsi,K)
###Output
_____no_output_____
###Markdown
We'll also add a couple of empy arrays to hold the transect-scale covariate observations
###Code
# Record observations made at night
obs_night = np.ones(K*nrsi).reshape(nrsi,K)*-1
# Record which observation team did each count
team = np.ones(K*nrsi).reshape(nrsi,K)*-1
# Indicate if individual had been observed previously
tag = np.ones(K*nrsi).reshape(nrsi,K)*0
# Empty individual covaraites
length = np.zeros(nrsi)
idrec = []
###Output
_____no_output_____
###Markdown
Now we can fill each individual record into the capture history matrix and update the observed transect-scale covaraites too:
###Code
# Fill in transects
for i in range(len(recid)):
# Index for row of capture history array
rindx = list(ireefsiteid).index(recid[i])
cindx = xdata.transect.values[i]-1
Yobs[rindx,cindx] = 1
obs_night[rindx,cindx] = xdata.night.values[i]
team[rindx,cindx] = xdata.team.values[i]
# Store individual covariates
if not rindx in idrec:
idrec.append(rindx)
length[rindx] = xdata.dia.values[i]
# Record previously tagged
for i in range(len(Yobs)):
for j in range(1,K):
tag[i,j] = 1*(Yobs[i][:j].sum()>=1)
###Output
_____no_output_____
###Markdown
And allocate each individual (row) in `Yobs` to a specific reef and site
###Code
# Reef*site for matrix data
ReefSite_x = np.array([x.split("_")[0]+"_"+x.split("_")[1] for x in ireefsiteid])
len(ReefSite_x)
###Output
_____no_output_____
###Markdown
Next update the transect-level covariates where we haven't observed them (transects for which a given CoTS wasn't observed:
###Code
# Number of reefs*sites*transects
reefsitetrans = np.array([r+'_'+str(s)+'_'+str(i) for r,s,i in
zip(xdata.reef.values,xdata.site.values,xdata.transect.values)])
ireefsitetrans = np.unique(reefsitetrans)
nrst = len(ireefsitetrans)
# Number of reefs*sites
reefsite = np.array([r+'_'+str(s) for r,s in zip(xdata.reef.values,xdata.site.values)])
ireefsite = np.unique(reefsite)
nrsite = len(ireefsite)
# Fill in transect-level night and team values
for i in range(len(ireefsitetrans)):
indx = reefsitetrans==ireefsitetrans[i]
nval = max(xdata.night.values[indx])
tval = max(xdata.team.values[indx])
rsx = ireefsitetrans[i].split("_")[0]+"_"+ireefsitetrans[i].split("_")[1]
rindx = ReefSite_x==rsx
cindx = int(ireefsitetrans[i].split("_")[2])-1
team[rindx,cindx] = tval
if nval==1:
obs_night[rindx,cindx] = 1
# Make unobserved, non-night values = day
obs_night[obs_night==-1] = 0
len(reefsite)
###Output
_____no_output_____
###Markdown
With these elements in place we have imported the data and set it into the structure needed to model it. Yet the data we have so far includes only those individuals observed and we wish to make inferences about the individuals that were present but not detected as well. Data augmentationOne increasingly common approach is to augment the observation matrix with an arbirary number of unobserved individuals, dubbed the *parameter-expanded data-augmentation approach* (PXDA; outlined by [Dorazio & Royle 2012](http://ifasstat.ifas.ufl.edu/doraziowebsite/Publications/royle_dorazio2012.pdf)) which basically tacks on some extra zeros to the bottom of the observation matrix and allows the observed data to decide how many of the unobserved (augmented) zeros were likely to have been ones, given some form of covariate information. So the first step in the augmentation process is to decide how many additional (unobserved) CoTS should be included for each site. This number is arbitrary, but given that our best *a priori* guess about CoTS detectabilty is 0.8 (an informed guess from [Morgan Pratchett](http://www.coralcoe.org.au/researchers/morgan-pratchett)) and that we conducted 6 replicate transects, the probability of not seeing an individual CoTS at all seems low ($(1-0.8)^{6}<0.0001$). So we'll add 10 additional CoTS to each transect, with the exptectation that we've missed at most 1-2 individuals at a given site.
###Code
# Agumentation number per site
Naug = 10
# Total additional (unobserved) CoTS
Nz = Naug*nrsite
# Augmented observation matrix
Y = np.r_[Yobs, np.zeros((Nz,K))]
# Superpopulation size
M = len(Y)
M
Y[:-Nz]
###Output
_____no_output_____
###Markdown
So given that there are eight sites in total across two reefs, we've added 80 unobserved individuals to the data, for a total of 196 potential CoTS. Next we'll keep track of the observed and unobserved individuals for later use, using [numpy](http://www.numpy.org)'s [masked array](http://docs.scipy.org/doc/numpy/reference/maskedarray.html) to indicate the observed vs. unobserved records:
###Code
# Observed individuals
sighted = Y.sum(axis=1)>0
sighted_masked = np.ma.masked_equal(sighted,False)
sighted_masked
###Output
_____no_output_____
###Markdown
Later on, in the model, when this gets passed to a PyMC object it will indicate which values are observed and therefore fixed (the `True` values above) and which are unobserved and can therefore vary. The next step is to assign the agumented individuals to specific sites, and the transects to being day or night, for the various observer groups:
###Code
# Augment night array with 1/2 day and night obs
night = np.r_[obs_night, np.array([np.r_[np.zeros(K/2),np.ones(K/2)]]*Nz)]
night = night.T
# Assign augmented data to individual sites
ReefSite,Is = indexall(np.r_[ReefSite_x,np.array([[x]*Naug for x in np.unique(ReefSite_x)]).reshape(Nz,)])
nrs = len(ReefSite)
# Get hard-coral values for individual sites and zero-centre
hc = xdata.hc.values[np.array([list(reefsite).index(x) for x in ReefSite])]
hc_median = np.median(hc)
hc = hc-hc_median
# Assign observer teams to agumented data randomly
team = np.r_[team, team[np.random.choice(range(len(team)),Nz)]].astype(int).T
nteam = len(np.unique(team))
# Add zeros to augment tagged matrix
tagged = np.r_[tag, np.zeros((Nz,K))]
tagged = tagged.T
length
zip(np.array(['ID_'+str(i) for i in xrange(1,115)]),Y[:-Nz],np.array(ReefSite)[Is],hc[Is]+hc_median,length)
len(Is)
###Output
_____no_output_____
###Markdown
As the unobserved CoTS need to be provided a size. Here again we'll use `masked_array` to indicate that we don't know the size of some of the observed CoTS and the augmented individuals:
###Code
len(length[length==-999])+Naug
# Get mean observed length
meanlength = np.mean(length[length!=-999])
meanlength
# Individual lengths plus missing values
missing_length = np.r_[np.array(length), [-999]*Nz]
# Parameterization
length_masked = np.ma.masked_equal(missing_length, -999)
length_masked
###Output
_____no_output_____
###Markdown
Alternate survey dataAside from the mark-recapture data, abundance surveys were also conducted by AMPTO and FMP on the study sites, allowing us the opportunity to calibrate their methods to a known population size. AMPTO dataFirst looking at the AMPTO data, which is provided as per their standard catch-per-unit-effort (CPUE) format, in terms of CoTS killed per unit of search time (in minutes). The key problem with CPUE data is that it has proven to be an inconsistent index of abundance, meaning that CPUE increases or declines non-linearly with true abundance. This can be of three forms, namely:1. *hyperstability* - in which CPUE remains high as abundance declines (this is expected for clustered populations)2. *proportional* - in which CPUE declines linearly with abundance (never happens)3. *hyperdepletion* - in which CPUE declines more quickly than abundance (this is expected for dispersed populations)Determining the form of the CPUE-abundance relationshiop is difficult, primiarly because to estimate it requires independent estiamtes of abundance that are rarely obtained. Fortunately our study was designed to get this directly so we can determine the form of the AMPTO CPUE-abundance relationship from the site-level mark-recapture abundance estimates and the AMPTO CPUE data. First we'll import the CPUE data:
###Code
# Import data
adata = pd.read_csv('AMPTO_MR_data.csv')
adata
###Output
_____no_output_____
###Markdown
Which includes seven CPUE observations for sites on our two observation reefs.Next we'll calculate CPUE and align these observations to the mark-recapture data:
###Code
# APMTO abundance
ampto_abund = adata.nkill.values
# APMTO log-abundance
ampto_labund = np.log(ampto_abund)
# Calculate CPUE
cpue = ampto_abund/(adata.time.values*1.)
# Calculate AMPTO density in CoTS/m2
ampto_density = ampto_abund/(50*5.)
# Get reef-site keys
ampto_RS = adata.reef.values+'_'+adata.site.values.astype(str)
# Index ampto keys to MR study keys
rindx = match(ReefSite,list(ampto_RS))
# Align to MR data
cpue = cpue[rindx]
lcpue = np.log(cpue)
ampto_density = ampto_density[rindx]
ampto_ldensity = np.log(ampto_density+1)
ampto_RS = ampto_RS[rindx]
plt.hist(lcpue)
###Output
_____no_output_____
###Markdown
As a last step for subsequent plotting we'll calculate expected CPUE values across a range of CoTS densities in a 250 m$^{2}$ area
###Code
# Prediction range
ampto_predx = np.arange(1,35)
# Prediction range
ampto_predxD = np.arange(1,35)/(5*50.)
###Output
_____no_output_____
###Markdown
FMP - RHIS dataThe Queensland Parks and Wildlife Service conducts Reef Health and Impact Surveys (RHIS) as part of the Field Management Program (FMP) surveys that count CoTS are widely distribtuted throughout the GBRWHA.The FMP surveys provide the most detailed habitat information of the various GBR-based survey methods and are extensive, making them an important component of the effort to characterise CoTS population numbers.First we will import the FMP data
###Code
# Import FMP RHIS survey data
qdata = pd.read_csv('FMP_MR_data.csv')
qdata.columns.values
###Output
_____no_output_____
###Markdown
Because the RHIS surveys consist of three, 5m radius point counts for each site there are 24 observations that we need to align to our 8 50x5 survey areas. There are a couple of ways of doing this, the most simple being to sum them within each site, which conveniently gives us about the same survey area as the transects (total area is `78.5*3=235.6` m$^2$).
###Code
# Get reef-site keys
fmp_RS = qdata.reef.values+'_'+qdata.site.values.astype(str)
# Sum FMP observations for each ReefSite from the MR data to get FMP abundance
fmp_abund = np.array([np.sum(qdata.cots.values[fmp_RS==r]) for r in ReefSite])
# FMP density in CoTS/m2
fmp_density = fmp_abund/(np.pi*5**2*3)
fmp_ldensity = np.log(fmp_density)
# Predicted density range
fmp_densx = np.log(np.arange(1,50)/250.)
###Output
_____no_output_____
###Markdown
Joint Model in PyMCPyMC provides a flexible platform to develop what would otherwise be a complicated model. Here we are anchoring the calibration of AMPTO and FMP surveys to a mark-recapture study that essentially gives us a known population to work with. The first part of the model deals with the mark-recapture component of the study, followed by the AMPTO and FMP calibration models. Mark-recapture modelThe mark-recapture model starts with a prior for average detectability across all the sites:
###Code
# Global (overall) average logit-scale detectability at zero disc width
gamma_0 = pm.Normal('Global_intercept', mu=0.0, tau=0.01, value=0.0)
###Output
_____no_output_____
###Markdown
Next the model for site-level averages, with $\gamma_{0}$ passed as their overall intercept and the percent cover of hard coral present having an effect:
###Code
# Hard coral cover effect
gamma_1 = pm.Normal('Hard_coral', mu=0.0, tau=0.001, value=0.)
# Site-level model
g_mu = pm.Lambda('site_mu', lambda g0=gamma_0, g1=gamma_1: g0+g1*hc)
# Site-level variation (assumed constant)
sigma_g0 = pm.Uniform('site_SD', lower=0, upper=100, value=1.2)
tau_g0 = pm.Lambda('tau_g0', lambda sd=sigma_g0: sd**-2)
# Site-level likelihood
a0 = pm.Normal('Site', mu=g_mu, tau=tau_g0, value=np.zeros(nrsite))
###Output
_____no_output_____
###Markdown
At the next level in the hierarhcy lie covariates for each individual within a given site, which here includes only their disc width. Both for some individuals and for the augmented part of the population these were unobserved so the first step for the indivdual-scale part of the model is to sample the unobserved lengths from a model of the observed lengths. This is accomplished in PyMC using a hierarchical model and the `length_masked` array created above:
###Code
# Length mean
Lmu = pm.Uniform('Lmu', lower=1, upper=300, value=20)
# Length SD
sigma_0 = pm.Uniform('sigma_0', lower=0, upper=100, value=1.2)
# Length precision
tau_0 = pm.Lambda('tau_0', lambda sd=sigma_0: sd**-2)
# Imputed lengths for agumented group
iLength = pm.Normal('iLength', mu=Lmu, tau=tau_0, value=length_masked, observed=True)
# Add factor potential to ensure positive lengths
@pm.potential
def plength():
like = 0.
if any(iLength)<=0:
like += -np.inf
return like
###Output
_____no_output_____
###Markdown
Next we'll add uninformative priors for the effects of length and the transect-scale day/night and observation team covariates on detectability:
###Code
# Prior length effect
a1 = pm.Normal('Length', mu=0.0, tau=0.001, value=0.0)
# Night effect
a2 = pm.Normal('Night', mu=0.0, tau=0.001, value=0.0)
# Tag effect
a3 = pm.Normal('Tag', mu=0.0, tau=0.001, value=0.0)
# Observer team effects
o0 = pm.Normal('Team', mu=0.0, tau=0.001, value=np.zeros(nteam-1))
obseff = pm.Lambda('Observer', lambda o0=o0: np.r_[0.0,o0])
###Output
_____no_output_____
###Markdown
And with these in place we can complete the detection component of the mark-recapture model:
###Code
# Detection model
phi = pm.Lambda('phi', lambda a0=a0[Is],a1=a1,a2=a2,a3=a3,iL=iLength,obs=obseff[team]:
pm.invlogit([a0+a1*iL+a2*night[k]+a3*tagged[k]+obs[k] for k in range(K)]), trace=False)
###Output
_____no_output_____
###Markdown
The model thus far handles the probability of being observed or not, given a few relevant covariates and that an individual CoTS is present to be observed. This implies the second part of the PXDA approach, which is to estimate which (if any) of the unobserbed CoTS were present but undetected.This component of the model has a single parameter $\psi$, which is the probability of presence for all CoTS including those in the augmented data group, given an uninformative prior:
###Code
# P(presence) for superpopulation of individuals
psi = pm.Uniform('psi', lower=0, upper=1, value=0.2)
###Output
_____no_output_____
###Markdown
The next step is the critical one, where the latent (unobserbed) occupancy state is estimated for the augmented group. Because we pass the `sighted_masked` array to the model we are able to include a stochastic node where the values are constant (1) where a CoTS has been observed, and vary (0 or 1) according to the model where they have not:
###Code
# Occupancy state for agumented group
Z = pm.Bernoulli('Z', psi, value=sighted_masked, observed=True)
###Output
_____no_output_____
###Markdown
The rest of the mark-recpature model conditions the observed data on the probability of detection and occupancy:
###Code
# Detection given presence
muY = pm.Lambda('muY', lambda Z=Z, p=phi: np.transpose(Z*p))
# Likelihood
Yi = pm.Bernoulli('Yi', p=muY, value=Y, observed=True)
###Output
_____no_output_____
###Markdown
Finally a few key posterior estimates to keep track of, particularly the site-level densities that will become the baseline against which we can calibrate the AMPTO and FMP surveys:
###Code
# Posterior expected distribution
Zi = pm.Bernoulli('Zi', p=muY)
# Posterior estimate for total population size
N = pm.Lambda('N', lambda Z=Z: Z.sum())
# Posterior abundance at each site
mr_abund = pm.Lambda('MR_abund', lambda Z=Z: np.array([np.sum(Z[Is==i]) for i in xrange(nrsite)]))
# Add site labels
MRabund = [pm.Lambda('MR_abund_%s' %ReefSite[i], lambda abu=mr_abund[i]: np.sum(abu)) for i in xrange(nrsite)]
# Posterior density at each site
mr_density = pm.Lambda('MR_density', lambda abu=mr_abund: abu/(50.*5.))
mr_ldensity = pm.Lambda('MR_ldensity', lambda d=mr_density: np.log(d))
# Posterior estimate for average detectability
mu_detection = pm.Lambda('mu_detection', lambda phi=phi: np.median(phi))
mu_logit_detection = pm.Lambda('mu_logit_detection', lambda g0=gamma_0, a1=a1: g0+a1*meanlength)
# Posterior estimate for individual detectability
CoTS_detection = pm.Lambda('CoTS_detection', lambda phi=phi: phi)
###Output
_____no_output_____
###Markdown
AMPTO CPUE modelAMPTO has been conducting kill operations on starfish for a number of years in the northern part of the GBR, with divers injecting CoTS with lethal doeses of bisodium sulfate or, more recently, [bile salts](http://www.ampto.com.au/cots.htm). Once an outbreaking reef has been identified, divers descend on a site and continue to inject CoTS until they can find no new individuals. During kill operations AMPTO staff record the number of CoTS killed per unit time spent underwater, making their data analagous to the notorious [catch-per-unit-effort](https://sites.google.com/a/uw.edu/most-cited-fisheries/controversies/status-from-catches) (CPUE) metrics commonly used in fisheries. However, as discussed above, much of the controversy about CPUE relates to the fact that there is rarely a reliable way to estimate the relationship between catch and abundance. However with this study we can model that relationship explictily. A typical model for the relationship between CPUE and abundance is$$CPUE_{t} = qN_{t}^{\beta}$$where $q$ is a catchability coefficient, $\beta$ describes the level of hyperstability ($\beta \lt 1$) or hyperdepletion ($\beta \gt 1$) present, and $N_{t}$ is the true abundance at time $t$. In general estimating this relationship requires fishery-independent estimates of $N_{t}$ that, even when available, have notable levels of uncertainty that can greatly erode the ability to estimate $\beta$. [New Zealand guidelines](http://docs.niwa.co.nz/library/public/FAR2000-01.pdf) have suggested that with 4 to 8 observatons, an abundance reduction of 50% or more is required to accurately estimate $\beta$. In this case however our CoTS mark-recapture study gives us values of true abundance that will be accurately and precisely estimated, making it possible to estimate $\beta$ in a reasonable way. The first step in the CPUE model is to define the priors for $\beta$ and $\sigma_{cpue}$
###Code
# Shape prior
Beta = pm.Uniform('Shape', lower=0, upper=10, value=0.5)
# CPUE error
sigma_cpue = pm.Uniform('sigma_cpue', lower=0, upper=100, value=1.2)
tau_cpue = pm.Lambda('tau_cpue', lambda sd=sigma_cpue: sd**-2)
###Output
_____no_output_____
###Markdown
Next we can grab the detection esimates from the mark-recapture model
###Code
# Detectability for each site
phi_rs = pm.Lambda('phi_rs', lambda phi=phi: phi.T.mean(1))
q = pm.Lambda('q', lambda a0=phi_rs: np.array([np.mean(a0[Is==i]) for i in xrange(nrsite)]) )
###Output
_____no_output_____
###Markdown
Then estimate the expected relationship between true abundance and CPUE
###Code
# (Potenitailly) non-linear elationship to true abundance
Cmu = pm.Lambda('Cmu', lambda B=Beta, N=mr_abund, q=q: np.log(q)+B*np.log(N))
###Output
_____no_output_____
###Markdown
and pass it to the likelihood
###Code
# logNormal likelihood
Ci = pm.Normal('Ci', mu=Cmu, tau=tau_cpue, value=lcpue, observed=True)
###Output
_____no_output_____
###Markdown
Finally with the AMPTO model we'll calculate some posteriors for subsequent plotting
###Code
# Calculate expected values over plotting range
E_Ci = pm.Lambda('E_Ci', lambda B=Beta, q=mu_detection: np.log(q)+B*np.log(ampto_predx))
# Calculate predicted values over plotting range
P_Ci = pm.Normal('P_Ci', mu=E_Ci, tau=tau_cpue)
# Back calculate expected values over plotting range
B_Ci = pm.Lambda('B_Ci', lambda B=Beta, q=mu_detection, x=E_Ci: np.exp((x-np.log(q))/B) )
###Output
_____no_output_____
###Markdown
FMP estimatesWith the AMPTO data we have a lot of CPUE-based research to back up a choice of model, however for the FMP data there is little guidance about what the relationship between observed and true abundance might be, other than to correct for detectability.
###Code
# (Potential) FMP bias
FMP_bias = pm.Lambda('FMP_bias', lambda q=q, den=mr_density: fmp_density/q-den)
# Average FMP bias
Bfmp = pm.Lambda('Bfmp', lambda B=FMP_bias: sum(B)/(1.*len(B)))
###Output
_____no_output_____
###Markdown
With all these model elements in place the final step is to initialize the sampler and run the model
###Code
#M = pm.MCMC(locals(), db='sqlite', name='MRdb')
M = pm.MCMC(locals())
M.sample(1000000, 900000)
M.sample(1000000, 900000)
###Output
[-----------------100%-----------------] 1000000 of 1000000 complete in 4298.9 sec
###Markdown
ResultsSo with the model run, we can have a look at the posterior results
###Code
import datetime
dtx = str(datetime.datetime.now().ctime()).replace(" ", "_")
###Output
_____no_output_____
###Markdown
Average detectability
###Code
M.mu_detection.stats()
###Output
_____no_output_____
###Markdown
Miscellaneous parameters
###Code
# Posterior summary plot
plt.style.use('bmh')
pm.Matplot.summary_plot([M.gamma_0, M.gamma_1, M.a1, M.a2, M.a3, M.a0, M.o0, M.Beta, M.q])
###Output
_____no_output_____
###Markdown
Mark-recapture resultsRunning through these in order, first the effect of hard coral on detectability
###Code
# Plot effect of hard coral cover on detection
fig = plt.figure(figsize=(10, 5),facecolor='white')
gs = mp.gridspec.GridSpec(1,2)
# Trace
ax1 = fig.add_subplot(gs[0,0])
ax1.plot(M.gamma_1.trace())
ax1.set_title('$\gamma_{0}$',fontsize=20)
# Histogram
ax2 = fig.add_subplot(gs[0,1])
ax2.hist(M.gamma_1.trace())
ax2.set_title('Hard coral',fontsize=15);
###Output
_____no_output_____
###Markdown
Next the effect of length on detectability
###Code
# Plot effect of disc width on detection
fig = plt.figure(figsize=(10, 5),facecolor='white')
gs = mp.gridspec.GridSpec(1,2)
# Trace
ax1 = fig.add_subplot(gs[0,0])
ax1.plot(M.a1.trace())
ax1.set_title('$a_{1}$',fontsize=20)
# Histogram
ax2 = fig.add_subplot(gs[0,1])
ax2.hist(M.a1.trace())
ax2.set_title('Disc width',fontsize=15);
###Output
_____no_output_____
###Markdown
We can also have a look average detectabilty between day and night
###Code
# Plot effect of night on detection
fig = plt.figure(figsize=(10, 5),facecolor='white')
gs = mp.gridspec.GridSpec(1,2)
# Trace
ax1 = fig.add_subplot(gs[0,0])
ax1.plot(M.a2.trace())
ax1.set_title('$a_{2}$',fontsize=20)
# Histogram
ax2 = fig.add_subplot(gs[0,1])
ax2.hist(M.a2.trace())
ax2.set_title('Night',fontsize=15);
###Output
_____no_output_____
###Markdown
The effect of tagging was also evidently positive
###Code
# Plot effect of tagging on detection
fig = plt.figure(figsize=(10, 5),facecolor='white')
gs = mp.gridspec.GridSpec(1,2)
# Trace
ax1 = fig.add_subplot(gs[0,0])
ax1.plot(M.a3.trace())
ax1.set_title('$a_{3}$',fontsize=20)
# Histogram
ax2 = fig.add_subplot(gs[0,1])
ax2.hist(M.a3.trace())
ax2.set_title('Tag',fontsize=15);
###Output
_____no_output_____
###Markdown
Looking at inter-site variabilty next we can see that sites vary widely in terms of their average detectability
###Code
# Plot effect of site on detection
fig = plt.figure(figsize=(10, 25),facecolor='white')
gs = mp.gridspec.GridSpec(8,2)
trace_ = M.a0.trace().T
for i in range(8):
# Trace
ax1 = fig.add_subplot(gs[i,0])
ax1.plot(trace_[i])
ax1.set_title('$a_{0j}$',fontsize=20)
# Histogram
ax2 = fig.add_subplot(gs[i,1])
ax2.hist(trace_[i])
ax2.set_xlim(-7,2)
ax2.set_title('%s'%ReefSite[i],fontsize=15);
###Output
_____no_output_____
###Markdown
Next looking at inter-observer bias among the teams we can see there is little evidence of bias among the AIMS staff
###Code
# Plot effect of observer on detection
fig = plt.figure(figsize=(10, 15),facecolor='white')
gs = mp.gridspec.GridSpec(3,2)
trace_ = M.o0.trace().T
for i in range(2):
# Trace
ax1 = fig.add_subplot(gs[i,0])
ax1.plot(trace_[i])
ax1.set_title('$o_{0}$',fontsize=20)
# Histogram
ax2 = fig.add_subplot(gs[i,1])
ax2.hist(trace_[i])
#ax2.set_xlim(-7,2)
ax2.set_title('Team %s'%(i+2),fontsize=15);
###Output
_____no_output_____
###Markdown
AMPTO resultsThe next bit to look at is the relationship between AMPTO CPUE and *true* abundance, estiamted from the mark-recapture model. The only parameter we estimated here is $\beta$, which describes the relationship
###Code
# Beta parameter
fig = plt.figure(figsize=(10, 5),facecolor='white')
gs = mp.gridspec.GridSpec(1,2)
# Trace
ax1 = fig.add_subplot(gs[0,0])
ax1.plot(M.Beta.trace())
ax1.set_title('$beta$',fontsize=20)
# Histogram
ax2 = fig.add_subplot(gs[0,1])
ax2.hist(M.Beta.trace())
ax2.set_title('CPUE shape',fontsize=15);
###Output
_____no_output_____
###Markdown
FMP Results
###Code
pm.Matplot.summary_plot(M.FMP_bias)
###Output
_____no_output_____
###Markdown
The bias between FMP and True estimates is consistently 5% or less when detectabiltiy is accounted for, providing a decent level of agreement
###Code
# Scale to hectares
hascale = 10000
# Estimated true log-density
tdens = np.array([np.median(x) for x in M.mr_density.trace().T])
# Site-level detectability
s_detect = np.array([np.median(x) for x in M.q.trace().T])
# 1:1 line
plt.plot((0,1*hascale),(0,1*hascale))
# Plot data
plt.plot(tdens*s_detect*hascale,fmp_density*hascale,'0.20', marker='.', markersize=15, linestyle='None')
plt.xlim(0,0.12*hascale)
plt.ylim(0,0.1*hascale)
# Axis labels
plt.xlabel('True CoTS densitiy (ha)', fontsize=15)
plt.ylabel('FMP CoTS densitiy (ha)', fontsize=15)
###Output
_____no_output_____
###Markdown
Finally let's have a look at overall detectability, which will be what will be subequently applied to the RHIS
###Code
# Average detectability
fig = plt.figure(figsize=(10, 5),facecolor='white')
gs = mp.gridspec.GridSpec(1,2)
# Trace
ax1 = fig.add_subplot(gs[0,0])
ax1.plot(M.mu_detection.trace())
ax1.set_title('$\mu$',fontsize=20)
# Histogram
ax2 = fig.add_subplot(gs[0,1])
ax2.hist(M.mu_detection.trace())
ax2.set_title('Average detectability',fontsize=15);
###Output
_____no_output_____
###Markdown
Up nextWith this the analysis for the mark-recapture and calibration study is complete. The next step in the process is to use these calibration results to integrate the FMP and AMPTO results to estimate CoTS densities throughout the Cairns sector. Figure 2
###Code
## Plot parameters
from matplotlib.patches import Rectangle
import seaborn as sns
# Set up plot
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(12,12))
ax1,ax2,ax3,ax4 = axes.flat[0], axes.flat[1], axes.flat[2], axes.flat[3]
plt.figure(figsize=(10, 7))
plt.rcParams.update({'font.size': 12})
plt.style.use('bmh')
### ======================================= Nussiance parameters - a =================================== ###
# Create parameter list for plotting
parlist = [M.gamma_1, M.a1, M.a2, M.a3, M.o0]
parnom = np.array(['Hard coral', 'Disc width', 'Night', 'Tagged','Team2', 'Team3'])
# Indexing
tindx = np.array([len(np.shape(x.trace())) for x in parlist])==2
ny = np.ones(len(parlist))
ny[tindx] = np.array([len(x.trace().T) for x in np.array(parlist)[tindx]])
# Create matrix of traces
nreps = max(np.shape(parlist[0].trace()))
npar = ny.sum().astype(int)
parmat = np.zeros(shape=(npar,nreps))
count = 0
for i in range(len(parlist)):
tmp = parlist[i]
if tindx[i]:
for rec in tmp.trace().T:
parmat[count] = rec
count += 1
else:
parmat[count] = tmp.trace()
count += 1
ax1.scatter(np.percentile(parmat,50,1),range(npar), s=75, c='#101010')
ax1.plot((0,0),(0,npar),'--')
ax1.set_ylim(-0.5,npar)
ax1.set_yticklabels(['poo','Hard coral', 'Disc width', 'Night', 'Tagged','Team2', 'Team3'])
for i in range(npar):
ax1.plot((np.percentile(parmat[i],25),np.percentile(parmat[i],75)),(i,i), c='#101010',linewidth=5)
ax1.plot((np.percentile(parmat[i],2.5),np.percentile(parmat[i],97.5)),(i,i), c='#101010')
ax1.set(xlabel='Effect size',ylabel='')
ax1.annotate('a', (-1.5,6),fontsize=20, fontweight='bold')
### ======================================= Disc width - b =================================== ###
# Observed disc widths as integers
obs_len = np.round(length[length!=-999]).astype(int)
# Prediction range for marginal length effects
disc_range = np.arange(1,max(obs_len))
# Posterior HPD intercept
g0 = np.median(M.gamma_0.trace())
# Posterior length effect
a1 = M.a1.trace()
# Expected values over range
ypred = pm.invlogit(g0+np.median(a1)*disc_range)
# Uncertainty intervals
ypred_lo = pm.invlogit(g0+np.percentile(a1,2.5)*disc_range)
ypred_hi = pm.invlogit(g0+np.percentile(a1,97.5)*disc_range)
# Plot observed range box
ax2.set_xlim(0,50)
ax2.set_ylim(0,1.01)
ax2.add_patch(Rectangle((min(obs_len[obs_len>1]),0), max(obs_len)-min(obs_len[obs_len>1]), 1, facecolor="0.09",
edgecolor="none", alpha=0.2))
ax2.set_xlabel('Disc width (cm)')
ax2.set_ylabel('P(detection)')
ax2.text(27,0.05,'Observed range')
# Plot observed detection
jitt1 = pm.rnormal(0,100,size=len(obs_len[obs_len>0]))*0.
jitt2 = pm.rnormal(0,10000,size=len(obs_len[obs_len>0]))*0
ax2.scatter(obs_len[obs_len>0]+jitt1,np.array(Yobs.sum(1)/6.)[obs_len>0]+jitt2, s=50, c='#101010', alpha=0.5)
# Plot uncertainty intervals
ax2.plot(disc_range,ypred_lo, ls='--',color='black')
ax2.plot(disc_range,ypred_hi, ls='--',color='black')
# Plot marginal relationship
ax2.plot(disc_range,ypred,color='b')
ax2.annotate('b', (0.,1),fontsize=20, fontweight='bold')
### ======================================= Day/Night - c =================================== ###
# Posterior density for median detectability
g0 = np.array([np.median(x) for x in M.a0.trace()])
# Posterior density for length effect at average disc with
a1Lmu = np.median(M.a1.trace())*np.median(M.Lmu.trace())
# Posterior density for night
a2 = M.a2.trace()
# Density of daytime observations
day_dens = pm.invlogit(g0+a1Lmu)
# Density of night observations
night_dens = pm.invlogit(g0+a1Lmu+a2)
# Plot day posterior density
sns.distplot(day_dens, hist=False, kde_kws={"shade": True},ax=ax3)
ax3.text(0.83,5.1,'Day', fontsize=15)
# Plot night posterior density
sns.distplot(night_dens, hist=False, kde_kws={"shade": True},ax=ax3)
ax3.text(0.61,2.3,'Night', fontsize=15)
# Pretty it up
ax3.set(xlabel='P(detection)',ylabel='Posterior density')
ax3.set_xlim(0,1)
ax3.annotate('c', (0.,6),fontsize=20, fontweight='bold')
### ======================================= Location - d =================================== ###
# Posterior density for each site
a0 = M.a0.trace()
# Posterior density for length effect at average disc with
a1Lmu = np.median(M.a1.trace())*np.median(M.Lmu.trace())
# Posterior site-level average detectability
site_detect = pm.invlogit(a0+a1Lmu).T
colours = sns.color_palette("muted", nrs)
# Plot posterior density for each site
pal = sns.color_palette("hls", nrs)
for data,k,c in zip(site_detect, ReefSite, pal):
sns.kdeplot(data, color=c, label=k, shade=True,ax=ax4)
ax4.legend(loc='upper left')
# Pretty it up
ax4.set_xlim(0,1)
ax4.set(xlabel='P(detection)',ylabel='Posterior density')
ax4.annotate('d', (0.,12),fontsize=20, fontweight='bold')
fig.savefig('Figure_2.pdf')
###Output
-c:78: VisibleDeprecationWarning: boolean index did not match indexed array along dimension 0; dimension is 116 but corresponding boolean dimension is 93
###Markdown
Figure 3
###Code
# Set up plot
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12,6))
ax1,ax2 = axes.flat[0], axes.flat[1]
plt.rcParams.update({'font.size': 12})
plt.style.use('bmh')
# Individual points and colors
promarker = np.array(["o","o","s","s","^","^","<","<"])
procolour = pal
proedcol = np.array(["black","black","black","black","black","black","black","black"])
### ======================================= AMPTO calibration - a =================================== ###
hascale = 10000
# Estimated true abundance
tabund = np.array([np.median(x) for x in M.mr_abund.trace().T])
tdens = tabund/(50*5.)
# Expected CPUE
ypred = M.E_Ci.trace().T
ypred_mu = np.exp(np.array([np.median(y) for y in ypred]))
# CPUE credible intervals
ypred_lo = np.exp(np.array([np.percentile(y,2.5) for y in ypred]))
ypred_hi = np.exp(np.array([np.percentile(y,97.5) for y in ypred]))
# CPUE prediction intervals
ypred2 = M.P_Ci.trace().T
ypred_l = np.exp(np.array([np.percentile(y,2.5) for y in ypred2]))
ypred_h = np.exp(np.array([np.percentile(y,97.5) for y in ypred2]))
# Plot data
[ax1.scatter(tdens[i]*hascale,cpue[i],c=procolour[i],linewidths=.5,s=120,marker=promarker[i],
edgecolor=proedcol[i]) for i in range(len(cpue))]
ax1.set_ylim(0,3.5)
# Plot expected value
ax1.plot(ampto_predxD*hascale,ypred_mu)
# Plot credible intervals
ax1.plot(ampto_predxD*hascale,ypred_lo,ls=':',color='black')
ax1.plot(ampto_predxD*hascale,ypred_hi,ls=':',color='black')
# Axis labels
ax1.set(xlabel='CoTS density (ha)',ylabel='CPUE (CoTS/min)')
ax1.annotate('a', (0.,3.5),fontsize=20, fontweight='bold')
ax1.set_xlim(0,0.14*hascale)
### ======================================= FMP calibration - b =================================== ###
# Estimated detectability
s_detect = np.array([np.median(x) for x in M.q.trace().T])
s_detectl95 = np.array([np.percentile(x,2.5) for x in M.q.trace().T])
s_detectu95 = np.array([np.percentile(x,97.5) for x in M.q.trace().T])
s_detectl50 = np.array([np.percentile(x,25) for x in M.q.trace().T])
s_detectu50 = np.array([np.percentile(x,75) for x in M.q.trace().T])
# Site-level detectability
s_detect = np.array([np.median(x) for x in M.q.trace().T])
# 1:1 line
ax2.plot((0,1*hascale),(0,1*hascale),c="black")
# Plot data
for i in range(nrs):
ax2.plot((tdens[i]*hascale,tdens[i]*hascale),(fmp_density[i]*hascale/s_detectu95[i],
fmp_density[i]*hascale/s_detectl95[i]), c='#101010',linewidth=1)
ax2.plot((tdens[i]*hascale,tdens[i]*hascale),(fmp_density[i]*hascale/s_detectu50[i],
fmp_density[i]*hascale/s_detectl50[i]), c='#101010',linewidth=3.5)
ax2.scatter(tdens[i]*hascale, fmp_density[i]*hascale/s_detect[i], c=procolour[i],
linewidths=.5,s=120,marker=promarker[i], edgecolor=proedcol[i],zorder=3)
ax2.set_xlim(0,0.14*hascale)
ax2.set_ylim(0,0.14*hascale)
# Axis labels
ax2.set(xlabel='CoTS density (ha)',ylabel='Calibrated CoTS densitiy (ha)')
ax2.annotate('b', (0.,1400),fontsize=20, fontweight='bold')
leg = ax2.legend(ReefSite, loc='lower right')
for col,lin,mark in zip(procolour,leg.get_lines(),promarker):
lin.set_color(col)
lin.set_marker(mark)
lin.set_linestyle("")
lin.set_markersize(10)
lin.set_markevery(2)
fig.savefig('Figure_3.pdf')
###Output
_____no_output_____ |
Arquivos ipynb/Base_Modelo.ipynb | ###Markdown
ModeloO objetivo aqui é desenvolver um padrão a ser adotados para todas as análises 1. Importar bibliotecas2. Importar dados3. Determinas as perguntas que se pretende responder3. Resumo dados - Análise previa de quais os dados disponiveis - Verificar Valores nulos e tipos dos dados (E fazer as mudanças necessarias) - [referencia](http://fcpython.com/data-analysis/dealing-with-missing-data) - Retirar dados que não serão utilizados4. Separar os dados que respondem as perguntas previamente levantadas6. Apresentar os resultados (Ex.: Graficos)7. Revisar o conteudo - styling nos dataframes - adição de imagens, videos, etc - Revisar texto descritivo 1 - Importando bibliotecasAbaixo estão as principais bibliotecas
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import datetime as dt
###Output
_____no_output_____ |
asr_lab5_solutions.ipynb | ###Markdown
Lab 5 - WFST operationsSo far we've used WFSTs mainly as a usual structure for encoding and traversing HMMs. In this lab we'll move away from HMM acoustic modelling and look at how WFST operations can be used to avoid the need for specialised algorithms in speech and language processing. It is intended to give you insight into how these operations are used to construct HMMs encapsulating langauge model, pronunciation and acoustic modelling assumptions – the so-called "HCLG" WFST.This lab will focus on the lexicon transducer, $L$, and grammar transducer, $G$.We'll use some of the following operations, defined by Openfst:* `fst.determinize(f)` creates determinized version of `f`* `fst.compose(f1,f2)` composes FSTs `f1` and `f2`* `fst.shortestpath(f)` returns the shortest path (in terms of weight) through `f` from the start to a final state* `f.minimize()` creates minimized version of `f`* `f.project(project_output=False)` for every arc in `f`, copies the input label to the output label (or vice versa, if `project_output=True`).* `f.rmepsilon()` removes epsilon transitions – those arcs where both input and output labels are emptyFor efficiency, the compostion of `f1` and `f2` requires either the output arcs of `f1` or input arcs of `f2` to be sorted prior to `compose()` being called. You can do this by calling `f1.arcsort(sort_type='olabel')` or `f2.arcsort(sort_type='ilabel')`.The functions above assume that `openfst_python` has been imported as `fst`. Note that the first three functions above return a new WFST; the others modify the WFST *in place*, meaning that the original WFST is modified directly.For convenience, we've provided a python module `helper_functions` that provides the `parse_lexicon()` and `generate_symbol_tables()` from the [Lab 1 solutions](https://github.com/Ore-an/asr_lab1/blob/master/asr_lab1_solutions.ipynb). And here is a function to generate an $L$ transducer:
###Code
import openfst_python as fst
from helper_functions import parse_lexicon, generate_symbol_tables
lex = parse_lexicon('lexicon.txt')
word_table, phone_table, state_table = generate_symbol_tables(lex) # we won't use state_table in this lab
def generate_L_wfst(lex):
""" Express the lexicon in WFST form
Args:
lexicon (dict): lexicon to use, created from the parse_lexicon() function
Returns:
the constructed lexicon WFST
"""
L = fst.Fst()
# create a single start state
start_state = L.add_state()
L.set_start(start_state)
for (word, pron) in lex.items():
current_state = start_state
for (i,phone) in enumerate(pron):
next_state = L.add_state()
if i == len(pron)-1:
# add word output symbol on the final arc
L.add_arc(current_state, fst.Arc(phone_table.find(phone), \
word_table.find(word), None, next_state))
else:
L.add_arc(current_state, fst.Arc(phone_table.find(phone),0, None, next_state))
current_state = next_state
L.set_final(current_state)
L.set_input_symbols(phone_table)
L.set_output_symbols(word_table)
return L
L = generate_L_wfst(lex)
L.arcsort()
###Output
_____no_output_____
###Markdown
For the exercises, here are two functions to generate linear WFSTs for an arbitary sequence of phones or words. (Yes, they are really just variants of the same function!)
###Code
def generate_linear_phone_wfst(phone_list):
P = fst.Fst()
current_state = P.add_state()
P.set_start(current_state)
for p in phone_list:
next_state = P.add_state()
P.add_arc(current_state, fst.Arc(phone_table.find(p), phone_table.find(p), None, next_state))
current_state = next_state
P.set_final(current_state)
P.set_input_symbols(phone_table)
P.set_output_symbols(phone_table)
return P
def generate_linear_word_wfst(word_list):
W = fst.Fst()
current_state = W.add_state()
W.set_start(current_state)
for w in word_list:
next_state = W.add_state()
W.add_arc(current_state, fst.Arc(word_table.find(w), word_table.find(w), None, next_state))
current_state = next_state
W.set_final(current_state)
W.set_input_symbols(word_table)
W.set_output_symbols(word_table)
return W
###Output
_____no_output_____
###Markdown
Exercises1. Suppose you are given a sequence of phones, in the form `['p','ih','k','t']`, and the $L$ transducer created above. Write a function that returns the matching word from the lexicon for any given phone sequence, or else `None` if no matching word is found. Write two functions: 1. That works for $L$ as provided by the code above 2. That works only on a determinized version of $L$ – and test it on the output of `fst.determinize(L)` This should enable you to see why determinization is a very useful WFST operation!
###Code
import math
def transduce_sequence_nondet(f, in_seq):
"""Return transduced sequence given input sequence and non determinized FST
Args:
f (fst.Fst()): a non determinized FST
in_seq (list[str]): the sequence of strings to transduce
Returns:
out_seq (list[str]): the sequence of transduced symbols
"""
seq_len = len(in_seq)
in_seq.append('<EOS>') # adding a padding symbol at the end for possible final eps traversal
eps = f.input_symbols().find('<eps>')
queue = [(f.start(), 0, [])] # the tuple is (state, index in input sequence, output)
while queue:
curr_state, i, output = queue.pop(0) # pop first element in list
if i <= seq_len: # <= because we could traverse epsilons even when the input sequence ended
label = f.input_symbols().find(in_seq[i]) # transform label into index in table
for arc in f.arcs(curr_state):
if arc.ilabel == label:
new_output = output + [arc.olabel]
queue.append((arc.nextstate, i+1, new_output))
elif arc.ilabel == eps and arc.nextstate != curr_state:
new_output = output + [arc.olabel]
queue.append((arc.nextstate, i, new_output)) # we're not advancing in the input sequence because it's epsilon
if i == seq_len:
final_weight = float(f.final(curr_state))
if final_weight != math.inf: # if this is a final state
out_seq = [f.output_symbols().find(w) for w in output if w != eps] # find the labels in the table, remove epsilons
return out_seq
print("Can't transduce the sequence with provided FST") # return exits the function, so this is printed only when the stack is empty and we didn't find a path
seq = ['p','ih','k','t']
print(transduce_sequence_nondet(L, seq))
def transduce_sequence_det(f, seq):
"""Return transduced sequence given input sequence and determinized FST
Args:
f (fst.Fst()): a determinized FST
in_seq (list[str]): the sequence of strings to transduce
Returns:
out_seq (list[str]): the sequence of transduced symbols
"""
seq_len = len(seq)
eps = f.input_symbols().find('<eps>')
curr_state = f.start()
output = []
for i in range(seq_len):
found = False
label = f.input_symbols().find(seq[i])
for arc in f.arcs(curr_state):
if arc.ilabel == label:
output += [arc.olabel]
curr_state = arc.nextstate
found = True
break # no need to keep going through other arcs, as it's determinized
if not found:
print("Can't transduce the sequence with provided FST")
final_weight = float(f.final(curr_state))
if final_weight != math.inf: # if this is a final state
out_seq = [f.output_symbols().find(w) for w in output if w != eps] # find the labels in the table, remove epsilons
return out_seq
else:
print("Can't transduce the sequence with provided FST")
seq = ['p','ih','k','t']
Ldet = fst.determinize(L)
print(transduce_sequence_det(Ldet, seq))
###Output
['picked']
###Markdown
2. WFST composition allows you to achieve the same result much more easily. Create a linear WFST, $P$, corresponding to a string of phones, and compute $P \circ L$. Then use the projection and epsilon removal operations to display just the matching word.
###Code
seq = ['p','ih','k','t']
P = generate_linear_phone_wfst(seq)
P.arcsort(sort_type='ilabel')
comp = fst.compose(P, L)
comp.project(project_output=True).rmepsilon()
###Output
_____no_output_____
###Markdown
3. Modify your lexicon WFST slightly to allow a list of phones to be "decoded" to a sequence of multiple words from the lexicon, using composition. Try it with `['p','eh','k','ah','v','p','iy','t','er']`.
###Code
# this modified the Lexicon WFST directly - you could have done it more simply by adding
# extra code to the generate_L_wfst() function above
start_state = L.start()
for state in L.states():
if float(L.final(state)) != math.inf:
L.add_arc(state, fst.Arc(0, 0, None, start_state)) # add arc to start
Ldet = fst.determinize(L)
seq = ['p','eh','k','ah','v','p','iy','t','er']
P = generate_linear_phone_wfst(seq)
P.arcsort(sort_type='ilabel')
comp = fst.compose(P, L)
comp.project(project_output=True).rmepsilon()
###Output
_____no_output_____
###Markdown
4. Now solve the reverse problem: create a word-sequence WFST, $W$, and use composition to expand it into a sequence of phones.
###Code
seq = ['peck', 'of', 'peter']
W = generate_linear_word_wfst(seq)
W.arcsort(sort_type='olabel')
comp = fst.compose(L, W)
comp.project().rmepsilon()
###Output
_____no_output_____
###Markdown
5. Another advantage of WFST composition to solve these kind of problems are that it is easy to encode uncertainty in the input (a bit like in real ASR). For example, consider this WFST, in which the multiple arcs denote alternative phone transcriptions from the acoustic model:
###Code
def create_alt_phone_wfst(phone_alternatives):
P = fst.Fst()
current_state = P.add_state()
P.set_start(current_state)
for alt in phone_alternatives:
next_state = P.add_state()
for p in alt:
if p=='*':
P.set_final(current_state)
else:
P.add_arc(current_state, fst.Arc(phone_table.find(p), phone_table.find(p), None, next_state))
current_state = next_state
P.set_final(current_state)
P.set_input_symbols(phone_table)
P.set_output_symbols(phone_table)
return P
altP = create_alt_phone_wfst([['p'],['ay'],['p'],['er'],['p'],['eh','ih'],['k'],['t','<eps>'],['ah','<eps>'],['l','v','*'],['d','*']])
altP
###Output
_____no_output_____
###Markdown
Again, perform composition with your $L$ from Question 3, and observe the result. (Notice particularly what happens to the `` transitions during composition.
###Code
altP.arcsort()
comp = fst.compose(altP, L)
comp
uncertainP = comp.project(True).rmepsilon()
uncertainP
###Output
_____no_output_____
###Markdown
6. We could have added weights to the arcs of the WFST above to describe the probability of the phone alternatives given by the acoustic model – this would have enabled you to find the most likely sequence of words. Without this information, let's instead use a $G$ WFST to find the most likely sequence. Let's assume that a word sequence taken from the passage "peter piper picked a peck of pickled peppers" is most likely. Design a $G$ WFST that accepts any sequence of words from the lexicon, but adds a cost of 1.0 to any word transition not in the passage. Given $G$, use composition to recover the most likely word sequence from the uncertain $P$. **Note on this solution**Peter and Andrea independently came up with solutions to this question. It is well worth looking at both!
###Code
def generate_G_wfst_peter(wseq):
G = fst.Fst()
start_state = G.add_state()
G.set_start(start_state)
prev_state = None
for w in wseq.split():
current_state = G.add_state()
# add transition from the start with cost 1
G.add_arc(start_state, fst.Arc(word_table.find(w), word_table.find(w), 1.0, current_state))
# arc from previous word with cost of zero
if prev_state:
G.add_arc(prev_state, fst.Arc(word_table.find(w), word_table.find(w), 0, current_state))
# <eps> transition back to the start
G.add_arc(current_state, fst.Arc(0, 0, 0, start_state))
prev_state = current_state
G.set_final(start_state)
G.set_input_symbols(word_table)
G.set_output_symbols(word_table)
return G
string = "peter piper picked a peck of pickled peppers"
G = generate_G_wfst_peter(string)
G
def generate_G_wfst_andrea(wseq):
G = fst.Fst()
word2state = {} # we will map each word in the lexicon to a state
state2word = {} # and vice-versa; note this is different from the symbol table mapping
# create a single start state
start_state = G.add_state()
G.set_start(start_state)
word2state['<s>'] = start_state # we're using a start-of-sentence token to not overload epsilon
state2word[start_state] = '<eps>' # epsilon is fine when we're looking up the state to put a label on the arc
wseq = '<s> ' + wseq
for word in lex.keys():
idx = G.add_state()
word2state[word] = idx
state2word[idx] = word
bigrams = [w for w in zip(wseq.split(" ")[:-1], wseq.split(" ")[1:])] # zipping together the list with itself with offset 1
passage_state_trans = [(word2state[x], word2state[y]) for x,y in bigrams] # state indexes for transitions existing in the passage, the ones we don't have to penalize
for state1 in G.states():
if state1 != start_state:
G.set_final(state1)
for state2 in G.states():
if (state1, state2) in passage_state_trans:
weight = 0
else:
weight = 1.0
word = state2word[state2]
label = word_table.find(word)
G.add_arc(state1, fst.Arc(label, label, weight, state2))
G.set_input_symbols(word_table)
G.set_output_symbols(word_table)
return G
string = "peter piper picked a peck of pickled peppers"
G2 = generate_G_wfst_andrea(string)
G2 = fst.determinize(G2)
G2
# Composition with Peter's G wfst
G.arcsort(sort_type='olabel')
comp = fst.compose(uncertainP, G)
comp.rmepsilon()
# Note that by default when the weight is 0 it's not printed out in the graph
# so the arcs that don't have a 1 are the most probable (log(0) > log(-1))
# Composition with Andrea's G wfst - notice how the results are the same
# even thought the G wfsts are very different.
G2.arcsort(sort_type='olabel')
comp2 = fst.compose(uncertainP, G2)
comp2.rmepsilon()
fst.shortestpath(comp)
fst.shortestpath(comp2)
###Output
_____no_output_____
###Markdown
If you have more time Use WFST composition to implement a "predictive text"-style algorithm, that, given a partial phone sequence such as `['p']` or `['p','ih']`, returns a WFST giving all matching words. You'll need to make some special modifications to $P$ or $L$, or both. On a determinized $L$ transducer this is a highly efficient way of solving this problem.
###Code
# There are very many ways this problem can be solved. Our skeleton code add extra arcs with
# special <rho> symbol. This symbol will represent unterminated sequences, and will be transduced
# to words to be output at every intermediate state.
# The same <rho> symbol is added the end of the partial pronunciation.
# In a real application, the lexicon would be determinised *before* the <rho> arcs are added
def generate_predictive_L_wfst(lex):
""" express the lexicon in WFST form s.t. composition with partial sequence gives matching words
Args:
lexicon (dict): lexicon to use, created from the parse_lexicon() function
Returns:
the constructed WFST
"""
Lpred = fst.Fst()
rho = phone_table.add_symbol('<rho>')
# create a single start state
start_state = Lpred.add_state()
Lpred.set_start(start_state)
for (word, pron) in lex.items():
state_list = []
current_state = start_state
for (i,phone) in enumerate(pron):
next_state = Lpred.add_state()
state_list.append(next_state)
if i == len(pron)-1:
# add word output symbol on the final arc
Lpred.add_arc(current_state, fst.Arc(phone_table.find(phone), \
word_table.find(word), None, next_state))
else:
Lpred.add_arc(current_state, fst.Arc(phone_table.find(phone), 0, None, next_state))
current_state = next_state
Lpred.set_final(current_state)
for state in state_list:
if state != current_state:
Lpred.add_arc(state, fst.Arc(phone_table.find('<rho>'), word_table.find(word), None, current_state))
else:
Lpred.add_arc(state, fst.Arc(phone_table.find('<rho>'), 0, None, current_state)) # in the final state the word was already output, so no word on output
Lpred.set_input_symbols(phone_table)
Lpred.set_output_symbols(word_table)
return Lpred
Lpred = generate_predictive_L_wfst(lex)
Lpred.arcsort()
seq = ['p']
seq += ['<rho>']
Ppred = generate_linear_phone_wfst(seq)
Ppred.arcsort(sort_type='ilabel')
comp = fst.compose(Ppred, Lpred)
comp.project(True).rmepsilon()
###Output
_____no_output_____ |
01-intro-101/python/practices/03-bus/your-solution-here/03b_manipulacion-datos-python.ipynb | ###Markdown
Introducción`Pandas` es un paquete de `Python` que nos facilita la manipulación y el análisis de datos. Incorpora estructuras de datos rápidas y flexibles diseñadas para trabajar con datos relacionales o etiquetados de manera intuitiva.`Pandas` nos permite trabajar con diferentes tipos de datos:- Tabulares con columnas heterogéneas, cómo `Excels`, `CSV` o tablas `SQL`- Series temporales, ordenadas o no- Matrices- Datos estadísticos y observacionales de todo tipo
###Code
import pandas as pd
###Output
_____no_output_____
###Markdown
Estructuras de datosLas dos estructuras de datos que nos ofrece `Pandas` son las `Series` y el `DataFrame`.  SeriesLas `Series` son `arrays` unidimensionales que pueden guardar datos de cualquier tipo, y tienen un `index`. En este ejemplo vemos cómo podemos crear una `Series` donde el `index` corresponde al año y los valores la cantidad de $CO_2$ en la atmósfera medido en partes por millón.
###Code
carbon_dioxide_ppm = pd.Series(
[295, 297, 299, 302, 305, 309, 314, 322, 335, 351, 373, 403],
index = [1900, 1910, 1920, 1930, 1940, 1950, 1960, 1970, 1980, 1990, 2000, 2010]
)
carbon_dioxide_ppm
###Output
_____no_output_____
###Markdown
DataFrameLos `DataFrame` son `arrays` bidimensionales o matrices, indexados por filas y por columnas, y que también pueden guardar datos de cualquier tipo. Por ejemplo, podemos crear un `DataFrame` con las frecuencias de los nombres más puestos en Catalunya durante el 2016 [según el Idescat](https://www.idescat.cat/nadons/).
###Code
onomastica = {
'Noms': [
'Marc',
'Martina',
'Àlex/Álex',
'Júlia/Julia',
'Laia',
'Lucía',
'Maria/María',
'Jan',
'Martí',
'Hugo'
],
'Sexe': ['H', 'D', 'H', 'D', 'D', 'D', 'D', 'H', 'H', 'H'],
'2016': [832, 702, 656, 649, 582, 573, 566, 562, 557, 553]
}
pd.DataFrame(onomastica, columns = ['Noms', 'Sexe', '2016'])
###Output
_____no_output_____
###Markdown
Selección de columnas El `DataFrame` que usaremos en este ejemplo corresponde al dataset de la competición [Titanic: Machine Learning from Disaster](https://www.kaggle.com/c/titanic) de [Kaggle](https://www.kaggle.com/).
###Code
df = pd.read_csv('data/titanic.csv')
df.head()
###Output
_____no_output_____
###Markdown
Podemos seleccionar una columna determinada de un `DataFrame`, por ejemplo, la columna _Name_ indistintamente con `df.Name` o `df['Name']`.
###Code
df.Name.head()
df.Name.equals(df['Name'])
###Output
_____no_output_____
###Markdown
Cada columna de nuestro `DataFrame` es un objeto de tipo `Series`.
###Code
type(df.Name)
###Output
_____no_output_____
###Markdown
Para seleccionar más de una columna, utilizaremos un `array` con los nombres de las columnas.
###Code
df[['Name', 'Sex']].head()
###Output
_____no_output_____
###Markdown
Filtrado de filas `Pandas` nos ofrece diferentes maneras para filtrar los datos de un `DataFrame`. Filtrado de filas basado en condiciones En muchos de los casos querremos seleccionar un subconjunto de filas que cumplan alguna condición. Por ejemplo, podemos seleccionar aquellos pasajeros de hayan sobrevivido a la tragedia del Titanic.
###Code
survivors = df[df.Survived == 1]
survivors.head()
###Output
_____no_output_____
###Markdown
La condición puede ser tan compleja como queramos. Por ejemplo, a continuación seleccionaremos aquellos pasajeros menores de 21 años que hayan sobrevivido y estuvieran en alguna cabina.
###Code
survivors = df[(df.Survived == 1) & (df.Age < 21) & ~(pd.isna(df.Cabin))]
survivors.head()
###Output
_____no_output_____
###Markdown
Filtrado de filas con la función mapPara poder hacer filtros más complejos, a veces nos será muy útil la función [`map`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.map.html). La función [`map`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.map.html) nos permite aplicar una función a todos los valores de un objeto `Series`. Recordemos que cada columna de nuestro `DataFrame` es individualmente un objeto del tipo `Series`. En el siguiente ejemplo, aplicaremos una función a cada elemento de la columna `Name` que nos indicará si contiene el texto _Mr._.
###Code
df[df.Name.map(lambda name: 'Mr.' in name)].head()
###Output
_____no_output_____
###Markdown
Dividir los datos en dos conjuntos de train y test Un caso muy habitual en problemas de _machine learning_ es tener que dividir nuestro _datatset_ en dos trozos o _splits_: el _dataset_ de _train_ y el _dataset_ de _test_. El _dataset_ de _train_ nos servirá para entrenar el modelo con los métodos de _machine learning_ que escojamos. Y el _dataset_ de _test_, para evaluar el modelo a partir de comparar las predicciones del modelo en este último dataset.En el cas más habitual, en el que nuestros datos no sean series temporales, dividiremos los datos en dos _splits_ aleatorios. A continuación veremos dos de las muchas maneras que tenemos para hacerlo. Función sampleLa función [`sample`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.sample.html) de `pandas` retorna una muestra aleatoria de nuestro _dataset_. En el ejemplo siguiente, primero crearemos el _dataset_ de _train_, especificando la proporción del _dataset_ original que queremos en el parámetro `frac`, y después asignaremos al _dataset_ de _test_ el resto de las filas que no han sido seleccionadas.
###Code
train = df.sample(frac=0.8, random_state=200)
test = df.drop(train.index)
train.head()
###Output
_____no_output_____
###Markdown
Sickit-learnLa segunda opción es utilizando la librería `Scikit-learn` nos proporciona herramientas simples y eficientes para hacer minería y análisis de datos, y además es de las más usadas en _machine learning_. La función [`train_test_split`](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) divide nuestros datos en dos _datasets_ de _train_ y _test_ especificando la proporción del _dataset_ de _test_ en el parámetre `test_size`. Este segundo método nos permite explicitar cuál será nuestra variable dependiente, nuestra _y_ o _target_ en un posterior análisis, en nuestro caso la columna `Survived`.
###Code
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
# Create the features matrix
X = df.drop('Survived', axis=1)
# Create the target vector
y = df.Survived
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3)
X_train.head()
y_train.head()
###Output
_____no_output_____ |
triple_agent/reports/external_reports/event_reports/Summer Cup 2019 Secret Awards.ipynb | ###Markdown
Gold Spot Awards: Groups Only 🏆 **The Rush Award** for fastest completion of missions
###Code
fastest_time = None
fastest_game = None
def rush_award(games, data_dictionary):
global fastest_time
global fastest_game
for game in games:
if game.win_type == WinType.MissionsWin:
for event in game.timeline:
if event.category & TimelineCategory.GameEnd:
this_duration = event.elapsed_time
data_exists = data_dictionary.get(game.spy, None)
if data_exists is None:
data_dictionary[game.spy] = this_duration
else:
data_dictionary[game.spy] = min(data_dictionary[game.spy], this_duration)
if fastest_time is None or this_duration < fastest_time:
fastest_time = this_duration
fastest_game = game
query(all_replays, DataQueryProperties(query_function=rush_award, primary_order=sum, limit=15), AxisProperties(title="Rush Award",force_bar=True, data_label_style=PlotLabelStyle.Plain))
print(f'{fastest_game.spy} completed missions on {fastest_game.venue} against {fastest_game.sniper} in {fastest_time:0.1f} seconds')
###Output
_____no_output_____
###Markdown
🏆 **The Assassin Award** for fastest termination of the spy
###Code
fastest_time = None
fastest_game = None
def assassin_award(games, data_dictionary):
global fastest_time
global fastest_game
for game in games:
if game.win_type == WinType.SpyShot:
for event in game.timeline:
if event.category & TimelineCategory.GameEnd:
this_duration = event.elapsed_time
data_exists = data_dictionary.get(game.sniper, None)
if data_exists is None:
data_dictionary[game.sniper] = this_duration
else:
data_dictionary[game.sniper] = min(data_dictionary[game.sniper], this_duration)
if fastest_time is None or this_duration < fastest_time:
fastest_time = this_duration
fastest_game = game
query(all_replays, DataQueryProperties(query_function=assassin_award, primary_order=sum, limit=15), AxisProperties(title="Assassin Award",force_bar=True, data_label_style=PlotLabelStyle.Plain))
print(f'{fastest_game.sniper} shot {fastest_game.spy} on {fastest_game.venue} in {fastest_time:0.1f} seconds')
###Output
_____no_output_____
###Markdown
🏆 **The Invisible Man Award** for most lowlights achieved as spy *Spy doesn't have to win but must be lowlit and alive at the conclusion of the game*
###Code
def invisible_man_award(games, data_dictionary):
for game in games:
if game.win_type != WinType.SpyShot:
last_light = None
for event in game.timeline:
if event.category & TimelineCategory.SniperLights and Roles.Spy in event.role:
if 'less' in event.event:
last_light = 'low'
elif 'neutral' in event.event:
last_light = 'neutral'
else:
last_light = 'high'
if last_light == 'low':
data_dictionary[game.spy] += 1
query(
all_replays,
DataQueryProperties(query_function=invisible_man_award, primary_order=sum, reverse_primary_order=True, limit=15),
AxisProperties(title="Invisible Man Award",force_bar=True, data_label_style=PlotLabelStyle.Plain)
)
###Output
_____no_output_____
###Markdown
Tin Spot Awards: Groups Only 🏆 **The Down to the Wire Award** for longest OT spy win
###Code
longest_ot = None
longest_ot_game = None
def down_to_the_wire_award(games, data_dictionary):
global longest_ot
global longest_ot_game
for game in games:
if game.win_type & WinType.SpyWin:
overtime_start = None
for event in game.timeline:
if event.category & TimelineCategory.Overtime:
overtime_start = event.elapsed_time
if event.category & TimelineCategory.GameEnd and overtime_start is not None:
total_ot_time = event.elapsed_time - overtime_start
if longest_ot is None or total_ot_time > longest_ot:
longest_ot = total_ot_time
longest_ot_game = game
data_exists = data_dictionary.get(game.spy, None)
if data_exists is None:
data_dictionary[game.spy] = total_ot_time
else:
data_dictionary[game.spy] = max(data_dictionary[game.spy], total_ot_time)
query(
all_replays,
DataQueryProperties(query_function=down_to_the_wire_award, primary_order=sum, reverse_primary_order=True, limit=15),
AxisProperties(title="Down To The Wire Award",force_bar=True, data_label_style=PlotLabelStyle.Plain)
)
print(f'{longest_ot_game.spy} survived overtime for {longest_ot:0.1f} seconds on {longest_ot_game.venue} against {longest_ot_game.sniper}')
###Output
_____no_output_____
###Markdown
🏆 **The Friendly Fire Award** for most civilian casualties
###Code
def friendly_fire_award(games, data_dictionary):
for game in games:
if game.win_type == WinType.CivilianShot:
data_dictionary[game.sniper] += 1
query(
all_replays,
DataQueryProperties(query_function=friendly_fire_award, primary_order=sum, reverse_primary_order=True, limit=15),
AxisProperties(title="Friendly Fire Award",force_bar=True, data_label_style=PlotLabelStyle.Plain)
)
###Output
_____no_output_____
###Markdown
🏆 **The Johnny English Award** for most blows of cover *Cover blows include coughing, statue clank and drink drops*
###Code
def johnny_english_award(games, data_dictionary):
for game in games:
for event in game.timeline:
if event.event in [
#clank
"dropped statue.",
#cough
"banana bread aborted.", "action test red: contact double agent",
#crash
"purloin guest list aborted."
]:
data_dictionary[game.spy] += 1
def johnny_english_award_v2(games, data_dictionary):
for game in games:
for event in game.timeline:
if event.event in [
#clank
"dropped statue.",
#cough
"banana bread aborted.", "action test red: contact double agent",
#crash
"purloin guest list aborted.",
#red watch check
"action test red: check watch", "aborted watch check to add time."
]:
data_dictionary[game.spy] += 1
query(
all_replays,
DataQueryProperties(query_function=johnny_english_award_v2, primary_order=sum, reverse_primary_order=True, limit=15),
AxisProperties(title="Johnny English Award",force_bar=True, data_label_style=PlotLabelStyle.Plain)
)
###Output
_____no_output_____ |
VQGAN_CLIP_Animation_demo.ipynb | ###Markdown
論文 https://arxiv.org/abs/2012.09841 GitHub https://github.com/chigozienri/VQGAN-CLIP-animations ランタイムの設定「ランタイム」→「ランタイムのタイプを変更」→「ハードウェアアクセラレータ」をGPUに変更 実行方法「ランタイム」→「すべてのセルを実行」を選択
###Code
!nvidia-smi
###Output
_____no_output_____
###Markdown
Google Driveのマウント
###Code
from google.colab import drive
drive.mount('/content/drive')
###Output
_____no_output_____
###Markdown
Workspace作成
###Code
!mkdir '/content/drive/MyDrive/vqgan'
!mkdir '/content/drive/MyDrive/vqgan/images'
working_dir = '/content/drive/MyDrive/vqgan'
###Output
_____no_output_____
###Markdown
ライブラリのインストール
###Code
%cd /content/
print("Downloading CLIP...")
!git clone https://github.com/openai/CLIP &> /dev/null
print("Downloading Python AI libraries...")
!git clone https://github.com/CompVis/taming-transformers &> /dev/null
!pip install ftfy regex tqdm omegaconf pytorch-lightning &> /dev/null
!pip install kornia &> /dev/null
!pip install einops &> /dev/null
print("Installing libraries for handling metadata...")
!pip install stegano &> /dev/null
!apt install exempi &> /dev/null
!pip install python-xmp-toolkit &> /dev/null
!pip install imgtag &> /dev/null
!pip install pillow==7.1.2 &> /dev/null
print("Installing Python video creation libraries...")
!pip install imageio-ffmpeg &> /dev/null
path = f'{working_dir}/steps'
!mkdir --parents {path}
print("Installation finished.")
###Output
_____no_output_____
###Markdown
ライブラリのインポート
###Code
import argparse
import math
from pathlib import Path
import sys
import os
import cv2
import pandas as pd
import numpy as np
import subprocess
import ast
sys.path.append('/content/taming-transformers')
# Some models include transformers, others need explicit pip install
try:
import transformers
except Exception:
!pip install transformers
import transformers
from IPython import display
from base64 import b64encode
from omegaconf import OmegaConf
from PIL import Image
from taming.models import cond_transformer, vqgan
import torch
from torch import nn, optim
from torch.nn import functional as F
from torchvision import transforms
from torchvision.transforms import functional as TF
from tqdm.notebook import tqdm
from CLIP import clip
import kornia.augmentation as K
import numpy as np
import imageio
from PIL import ImageFile, Image
from imgtag import ImgTag # metadata
from libxmp import * # metadata
import libxmp # metadata
from stegano import lsb
import json
ImageFile.LOAD_TRUNCATED_IMAGES = True
###Output
_____no_output_____
###Markdown
util関数定義
###Code
def sinc(x):
return torch.where(x != 0, torch.sin(math.pi * x) / (math.pi * x), x.new_ones([]))
def lanczos(x, a):
cond = torch.logical_and(-a < x, x < a)
out = torch.where(cond, sinc(x) * sinc(x/a), x.new_zeros([]))
return out / out.sum()
def ramp(ratio, width):
n = math.ceil(width / ratio + 1)
out = torch.empty([n])
cur = 0
for i in range(out.shape[0]):
out[i] = cur
cur += ratio
return torch.cat([-out[1:].flip([0]), out])[1:-1]
def resample(input, size, align_corners=True):
n, c, h, w = input.shape
dh, dw = size
input = input.view([n * c, 1, h, w])
if dh < h:
kernel_h = lanczos(ramp(dh / h, 2), 2).to(input.device, input.dtype)
pad_h = (kernel_h.shape[0] - 1) // 2
input = F.pad(input, (0, 0, pad_h, pad_h), 'reflect')
input = F.conv2d(input, kernel_h[None, None, :, None])
if dw < w:
kernel_w = lanczos(ramp(dw / w, 2), 2).to(input.device, input.dtype)
pad_w = (kernel_w.shape[0] - 1) // 2
input = F.pad(input, (pad_w, pad_w, 0, 0), 'reflect')
input = F.conv2d(input, kernel_w[None, None, None, :])
input = input.view([n, c, h, w])
return F.interpolate(input, size, mode='bicubic', align_corners=align_corners)
class ReplaceGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, x_forward, x_backward):
ctx.shape = x_backward.shape
return x_forward
@staticmethod
def backward(ctx, grad_in):
return None, grad_in.sum_to_size(ctx.shape)
replace_grad = ReplaceGrad.apply
class ClampWithGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, input, min, max):
ctx.min = min
ctx.max = max
ctx.save_for_backward(input)
return input.clamp(min, max)
@staticmethod
def backward(ctx, grad_in):
input, = ctx.saved_tensors
return grad_in * (grad_in * (input - input.clamp(ctx.min, ctx.max)) >= 0), None, None
clamp_with_grad = ClampWithGrad.apply
def vector_quantize(x, codebook):
d = x.pow(2).sum(dim=-1, keepdim=True) + codebook.pow(2).sum(dim=1) - 2 * x @ codebook.T
indices = d.argmin(-1)
x_q = F.one_hot(indices, codebook.shape[0]).to(d.dtype) @ codebook
return replace_grad(x_q, x)
class Prompt(nn.Module):
def __init__(self, embed, weight=1., stop=float('-inf')):
super().__init__()
self.register_buffer('embed', embed)
self.register_buffer('weight', torch.as_tensor(weight))
self.register_buffer('stop', torch.as_tensor(stop))
def forward(self, input):
input_normed = F.normalize(input.unsqueeze(1), dim=2)
embed_normed = F.normalize(self.embed.unsqueeze(0), dim=2)
dists = input_normed.sub(embed_normed).norm(dim=2).div(2).arcsin().pow(2).mul(2)
dists = dists * self.weight.sign()
return self.weight.abs() * replace_grad(dists, torch.maximum(dists, self.stop)).mean()
def parse_prompt(prompt):
vals = prompt.rsplit(':', 2)
vals = vals + ['', '1', '-inf'][len(vals):]
return vals[0], float(vals[1]), float(vals[2])
class MakeCutouts(nn.Module):
def __init__(self, cut_size, cutn, cut_pow=1.):
super().__init__()
self.cut_size = cut_size
self.cutn = cutn
self.cut_pow = cut_pow
self.augs = nn.Sequential(
K.RandomHorizontalFlip(p=0.5),
# K.RandomSolarize(0.01, 0.01, p=0.7),
K.RandomSharpness(0.3,p=0.4),
K.RandomAffine(degrees=30, translate=0.1, p=0.8, padding_mode='border'),
K.RandomPerspective(0.2,p=0.4),
K.ColorJitter(hue=0.01, saturation=0.01, p=0.7))
self.noise_fac = 0.1
def forward(self, input):
sideY, sideX = input.shape[2:4]
max_size = min(sideX, sideY)
min_size = min(sideX, sideY, self.cut_size)
cutouts = []
for _ in range(self.cutn):
size = int(torch.rand([])**self.cut_pow * (max_size - min_size) + min_size)
offsetx = torch.randint(0, sideX - size + 1, ())
offsety = torch.randint(0, sideY - size + 1, ())
cutout = input[:, :, offsety:offsety + size, offsetx:offsetx + size]
cutouts.append(resample(cutout, (self.cut_size, self.cut_size)))
batch = self.augs(torch.cat(cutouts, dim=0))
if self.noise_fac:
facs = batch.new_empty([self.cutn, 1, 1, 1]).uniform_(0, self.noise_fac)
batch = batch + facs * torch.randn_like(batch)
return batch
def load_vqgan_model(config_path, checkpoint_path):
config = OmegaConf.load(config_path)
if config.model.target == 'taming.models.vqgan.VQModel':
model = vqgan.VQModel(**config.model.params)
model.eval().requires_grad_(False)
model.init_from_ckpt(checkpoint_path)
elif config.model.target == 'taming.models.cond_transformer.Net2NetTransformer':
parent_model = cond_transformer.Net2NetTransformer(**config.model.params)
parent_model.eval().requires_grad_(False)
parent_model.init_from_ckpt(checkpoint_path)
model = parent_model.first_stage_model
else:
raise ValueError(f'unknown model type: {config.model.target}')
del model.loss
return model
def resize_image(image, out_size):
ratio = image.size[0] / image.size[1]
area = min(image.size[0] * image.size[1], out_size[0] * out_size[1])
size = round((area * ratio)**0.5), round((area / ratio)**0.5)
return image.resize(size, Image.LANCZOS)
###Output
_____no_output_____
###Markdown
default画像取得デフォルトで使用するinitial_image, target_imageを取得
###Code
%cd /content/drive/MyDrive/vqgan/images
!wget https://www.pakutaso.com/shared/img/thumb/nantoshi21PAR519902088_TP_V4.jpg
!wget https://www.pakutaso.com/shared/img/thumb/yuka16011215IMG_5574_TP_V4.jpg
src_img = Image.open('/content/drive/MyDrive/vqgan/images/nantoshi21PAR519902088_TP_V4.jpg')
dst_img = Image.open('/content/drive/MyDrive/vqgan/images/yuka16011215IMG_5574_TP_V4.jpg')
src_img = src_img.resize((src_img.width // 2, src_img.height // 2))
dst_img = dst_img.resize((dst_img.width // 2, dst_img.height // 2))
src_img.save('/content/drive/MyDrive/vqgan/images/nantoshi21PAR519902088_TP_V4.jpg')
dst_img.save('/content/drive/MyDrive/vqgan/images/yuka16011215IMG_5574_TP_V4.jpg')
%cd /content/
###Output
_____no_output_____
###Markdown
パラメータ設定| Parameter | Usage ||---|---|| `key_frames` | 実行中にキーフレームを使用してパラメータを変更するかどうか || `text_prompts` | テキストプロンプト (E.g. Apple': {10: 1, 20: 0}, 'Orange': {10: 0, 20: 1} Appleは10フレーム目で最大、20フレームで最小)|| `width` | 出力の幅(ピクセル単位)。 これは16の倍数に切り捨てられます || `height` | 出力の高さ(ピクセル単位)。 これは16の倍数に切り捨てられます || `model` | モデルの選択、上記からダウンロードする必要があります || `interval` | ノートブックにフレームを表示する頻度(実際の出力には影響しません) || `initial_image` | 開始する画像(ファイルへの相対パス)(E.g. ./content/src.jpg) || `target_images` | ターゲットへの画像プロンプト(ファイルへの相対パス)(E.g. './content/init.jpg': {0: 1, 10: 0}, './content/final.jpg': {0: 0, 10: 1}) || `seed` | ランダムシード。正の整数に設定されている場合、実行は繰り返し可能です(-1に設定されている場合、毎回同じ入力に対して同じ出力を取得し、ランダムシードが使用されます。 || `max_frames` | アニメーションのフレーム数 || `angle` | 各フレーム間で時計回りに回転する角度(度単位)(E.g. 10: 0, 30: 1, 50: -1) || `zoom` | 各フレームをズームインするための係数、1はズームなし、1未満はズームアウト、1を超える場合はズームイン(正の値のみ)(E.g. 10: 1, 30: 1.2, 50: 0.9) || `translation_x` | 各フレームを右にシフトするピクセル数 || `translation_y` | 各フレームを下にシフトするピクセル数 || `iterations_per_frame` | 各フレームでVQGAN + CLIPメソッドを実行する回数 || `save_all_iterations` | デバッグ中、通常の操作ではFalseに設定 |---------多数のキーフレームを使用してアニメーションを作成する場合は、@ EphemeralIncによるこのスプレッドシートを試して、文字列を作成してください: https://docs.google.com/spreadsheets/d/1sJ0PMHUPIYkS7LSxhzTThEP7rZ5CFonz-dBxqe8F2uc。 https://keyframe-string-generator.glitch.me/ または https://audio-keyframe-generator.glitch.me/ を試して、ビジュアルエディターまたはオーディオファイルを使用して文字列を作成することもできます。
###Code
key_frames = True #@param {type:"boolean"}
text_prompts = "'Moon': {10: 0, 60: 1}, 'Sun': {10: 1, 60: 0}" #@param {type:"string"}
width = 400 #@param {type:"number"}
height = 400 #@param {type:"number"}
model = "vqgan_imagenet_f16_16384" #@param ["vqgan_imagenet_f16_16384", "vqgan_imagenet_f16_1024", "wikiart_16384", "coco", "faceshq", "sflckr"]
interval = 1#@param {type:"number"}
initial_image = "/content/drive/MyDrive/vqgan/images/nantoshi21PAR519902088_TP_V4.jpg"#@param {type:"string"}
target_images = "'/content/drive/MyDrive/vqgan/images/yuka16011215IMG_5574_TP_V4.jpg': {10: 0, 60: 1}"#@param {type:"string"}
seed = 1#@param {type:"number"}
max_frames = 60#@param {type:"number"}
angle = "10: 0, 30: 1, 60: -1"#@param {type:"string"}
# @markdown <b>Careful:</b> do not use negative or 0 zoom. If you want to zoom out, use a number between 0 and 1.
zoom = "10: 1, 30: 1.2, 60: 0.9"#@param {type:"string"}
translation_x = "0: 0"#@param {type:"string"}
translation_y = "0: 0"#@param {type:"string"}
iterations_per_frame = "0: 10"#@param {type:"string"}
save_all_iterations = False#@param {type:"boolean"}
# option -C - skips download if already exists
!curl -C - -L -o {model}.yaml -C - 'https://heibox.uni-heidelberg.de/d/8088892a516d4e3baf92/files/?p=%2Fconfigs%2Fmodel.yaml&dl=1' #ImageNet 1024
!curl -C - -L -o {model}.ckpt -C - 'https://heibox.uni-heidelberg.de/d/8088892a516d4e3baf92/files/?p=%2Fckpts%2Flast.ckpt&dl=1' #ImageNet 1024
if initial_image != "":
print(
"WARNING: You have specified an initial image. Note that the image resolution "
"will be inherited from this image, not whatever width and height you specified. "
"If the initial image resolution is too high, this can result in out of memory errors."
)
elif width * height > 160000:
print(
"WARNING: The width and height you have specified may be too high, in which case "
"you will encounter out of memory errors either at the image generation stage or the "
"video synthesis stage. If so, try reducing the resolution"
)
model_names={
"vqgan_imagenet_f16_16384": 'ImageNet 16384',
"vqgan_imagenet_f16_1024":"ImageNet 1024",
"wikiart_1024":"WikiArt 1024",
"wikiart_16384":"WikiArt 16384",
"coco":"COCO-Stuff",
"faceshq":"FacesHQ",
"sflckr":"S-FLCKR"
}
model_name = model_names[model]
if seed == -1:
seed = None
def parse_key_frames(string, prompt_parser=None):
"""Given a string representing frame numbers paired with parameter values at that frame,
return a dictionary with the frame numbers as keys and the parameter values as the values.
Parameters
----------
string: string
Frame numbers paired with parameter values at that frame number, in the format
'framenumber1: (parametervalues1), framenumber2: (parametervalues2), ...'
prompt_parser: function or None, optional
If provided, prompt_parser will be applied to each string of parameter values.
Returns
-------
dict
Frame numbers as keys, parameter values at that frame number as values
Raises
------
RuntimeError
If the input string does not match the expected format.
Examples
--------
>>> parse_key_frames("10:(Apple: 1| Orange: 0), 20: (Apple: 0| Orange: 1| Peach: 1)")
{10: 'Apple: 1| Orange: 0', 20: 'Apple: 0| Orange: 1| Peach: 1'}
>>> parse_key_frames("10:(Apple: 1| Orange: 0), 20: (Apple: 0| Orange: 1| Peach: 1)", prompt_parser=lambda x: x.lower()))
{10: 'apple: 1| orange: 0', 20: 'apple: 0| orange: 1| peach: 1'}
"""
try:
# This is the preferred way, the regex way will eventually be deprecated.
frames = ast.literal_eval('{' + string + '}')
if isinstance(frames, set):
# If user forgot keyframes, just set value of frame 0
(frame,) = list(frames)
frames = {0: frame}
return frames
except Exception:
import re
pattern = r'((?P<frame>[0-9]+):[\s]*[\(](?P<param>[\S\s]*?)[\)])'
frames = dict()
for match_object in re.finditer(pattern, string):
frame = int(match_object.groupdict()['frame'])
param = match_object.groupdict()['param']
if prompt_parser:
frames[frame] = prompt_parser(param)
else:
frames[frame] = param
if frames == {} and len(string) != 0:
raise RuntimeError(f'Key Frame string not correctly formatted: {string}')
return frames
# Defaults, if left empty
if angle == "":
angle = "0"
if zoom == "":
zoom = "1"
if translation_x == "":
translation_x = "0"
if translation_y == "":
translation_y = "0"
if iterations_per_frame == "":
iterations_per_frame = "10"
if key_frames:
parameter_dicts = dict()
parameter_dicts['zoom'] = parse_key_frames(zoom, prompt_parser=float)
parameter_dicts['angle'] = parse_key_frames(angle, prompt_parser=float)
parameter_dicts['translation_x'] = parse_key_frames(translation_x, prompt_parser=float)
parameter_dicts['translation_y'] = parse_key_frames(translation_y, prompt_parser=float)
parameter_dicts['iterations_per_frame'] = parse_key_frames(iterations_per_frame, prompt_parser=int)
text_prompts_dict = parse_key_frames(text_prompts)
if all([isinstance(value, dict) for value in list(text_prompts_dict.values())]):
for key, value in list(text_prompts_dict.items()):
parameter_dicts[f'text_prompt: {key}'] = value
else:
# Old format
text_prompts_dict = parse_key_frames(text_prompts, prompt_parser=lambda x: x.split('|'))
for frame, prompt_list in text_prompts_dict.items():
for prompt in prompt_list:
prompt_key, prompt_value = prompt.split(":")
prompt_key = f'text_prompt: {prompt_key.strip()}'
prompt_value = prompt_value.strip()
if prompt_key not in parameter_dicts:
parameter_dicts[prompt_key] = dict()
parameter_dicts[prompt_key][frame] = prompt_value
image_prompts_dict = parse_key_frames(target_images)
if all([isinstance(value, dict) for value in list(image_prompts_dict.values())]):
for key, value in list(image_prompts_dict.items()):
parameter_dicts[f'image_prompt: {key}'] = value
else:
# Old format
image_prompts_dict = parse_key_frames(target_images, prompt_parser=lambda x: x.split('|'))
for frame, prompt_list in image_prompts_dict.items():
for prompt in prompt_list:
prompt_key, prompt_value = prompt.split(":")
prompt_key = f'image_prompt: {prompt_key.strip()}'
prompt_value = prompt_value.strip()
if prompt_key not in parameter_dicts:
parameter_dicts[prompt_key] = dict()
parameter_dicts[prompt_key][frame] = prompt_value
def add_inbetweens():
global text_prompts
global target_images
global zoom
global angle
global translation_x
global translation_y
global iterations_per_frame
global text_prompts_series
global target_images_series
global zoom_series
global angle_series
global translation_x_series
global translation_y_series
global iterations_per_frame_series
global model
global args
def get_inbetweens(key_frames_dict, integer=False):
"""Given a dict with frame numbers as keys and a parameter value as values,
return a pandas Series containing the value of the parameter at every frame from 0 to max_frames.
Any values not provided in the input dict are calculated by linear interpolation between
the values of the previous and next provided frames. If there is no previous provided frame, then
the value is equal to the value of the next provided frame, or if there is no next provided frame,
then the value is equal to the value of the previous provided frame. If no frames are provided,
all frame values are NaN.
Parameters
----------
key_frames_dict: dict
A dict with integer frame numbers as keys and numerical values of a particular parameter as values.
integer: Bool, optional
If True, the values of the output series are converted to integers.
Otherwise, the values are floats.
Returns
-------
pd.Series
A Series with length max_frames representing the parameter values for each frame.
Examples
--------
>>> max_frames = 5
>>> get_inbetweens({1: 5, 3: 6})
0 5.0
1 5.0
2 5.5
3 6.0
4 6.0
dtype: float64
>>> get_inbetweens({1: 5, 3: 6}, integer=True)
0 5
1 5
2 5
3 6
4 6
dtype: int64
"""
key_frame_series = pd.Series([np.nan for a in range(max_frames)])
for i, value in key_frames_dict.items():
key_frame_series[i] = value
key_frame_series = key_frame_series.astype(float)
key_frame_series = key_frame_series.interpolate(limit_direction='both')
if integer:
return key_frame_series.astype(int)
return key_frame_series
if key_frames:
text_prompts_series_dict = dict()
for parameter in parameter_dicts.keys():
if len(parameter_dicts[parameter]) > 0:
if parameter.startswith('text_prompt:'):
try:
text_prompts_series_dict[parameter] = get_inbetweens(parameter_dicts[parameter])
except RuntimeError as e:
raise RuntimeError(
"WARNING: You have selected to use key frames, but you have not "
"formatted `text_prompts` correctly for key frames.\n"
"Please read the instructions to find out how to use key frames "
"correctly.\n"
)
text_prompts_series = pd.Series([np.nan for a in range(max_frames)])
for i in range(max_frames):
combined_prompt = []
for parameter, value in text_prompts_series_dict.items():
parameter = parameter[len('text_prompt:'):].strip()
combined_prompt.append(f'{parameter}: {value[i]}')
text_prompts_series[i] = ' | '.join(combined_prompt)
image_prompts_series_dict = dict()
for parameter in parameter_dicts.keys():
if len(parameter_dicts[parameter]) > 0:
if parameter.startswith('image_prompt:'):
try:
image_prompts_series_dict[parameter] = get_inbetweens(parameter_dicts[parameter])
except RuntimeError as e:
raise RuntimeError(
"WARNING: You have selected to use key frames, but you have not "
"formatted `image_prompts` correctly for key frames.\n"
"Please read the instructions to find out how to use key frames "
"correctly.\n"
)
target_images_series = pd.Series([np.nan for a in range(max_frames)])
for i in range(max_frames):
combined_prompt = []
for parameter, value in image_prompts_series_dict.items():
parameter = parameter[len('image_prompt:'):].strip()
combined_prompt.append(f'{parameter}: {value[i]}')
target_images_series[i] = ' | '.join(combined_prompt)
try:
angle_series = get_inbetweens(parameter_dicts['angle'])
except RuntimeError as e:
print(
"WARNING: You have selected to use key frames, but you have not "
"formatted `angle` correctly for key frames.\n"
"Attempting to interpret `angle` as "
f'"0: ({angle})"\n'
"Please read the instructions to find out how to use key frames "
"correctly.\n"
)
angle = f"0: ({angle})"
angle_series = get_inbetweens(parse_key_frames(angle))
try:
zoom_series = get_inbetweens(parameter_dicts['zoom'])
except RuntimeError as e:
print(
"WARNING: You have selected to use key frames, but you have not "
"formatted `zoom` correctly for key frames.\n"
"Attempting to interpret `zoom` as "
f'"0: ({zoom})"\n'
"Please read the instructions to find out how to use key frames "
"correctly.\n"
)
zoom = f"0: ({zoom})"
zoom_series = get_inbetweens(parse_key_frames(zoom))
for i, zoom in enumerate(zoom_series):
if zoom <= 0:
print(
f"WARNING: You have selected a zoom of {zoom} at frame {i}. "
"This is meaningless. "
"If you want to zoom out, use a value between 0 and 1. "
"If you want no zoom, use a value of 1."
)
try:
translation_x_series = get_inbetweens(parameter_dicts['translation_x'])
except RuntimeError as e:
print(
"WARNING: You have selected to use key frames, but you have not "
"formatted `translation_x` correctly for key frames.\n"
"Attempting to interpret `translation_x` as "
f'"0: ({translation_x})"\n'
"Please read the instructions to find out how to use key frames "
"correctly.\n"
)
translation_x = f"0: ({translation_x})"
translation_x_series = get_inbetweens(parse_key_frames(translation_x))
try:
translation_y_series = get_inbetweens(parameter_dicts['translation_y'])
except RuntimeError as e:
print(
"WARNING: You have selected to use key frames, but you have not "
"formatted `translation_y` correctly for key frames.\n"
"Attempting to interpret `translation_y` as "
f'"0: ({translation_y})"\n'
"Please read the instructions to find out how to use key frames "
"correctly.\n"
)
translation_y = f"0: ({translation_y})"
translation_y_series = get_inbetweens(parse_key_frames(translation_y))
try:
iterations_per_frame_series = get_inbetweens(
parameter_dicts['iterations_per_frame'], integer=True
)
except RuntimeError as e:
print(
"WARNING: You have selected to use key frames, but you have not "
"formatted `iterations_per_frame` correctly for key frames.\n"
"Attempting to interpret `iterations_per_frame` as "
f'"0: ({iterations_per_frame})"\n'
"Please read the instructions to find out how to use key frames "
"correctly.\n"
)
iterations_per_frame = f"0: ({iterations_per_frame})"
iterations_per_frame_series = get_inbetweens(
parse_key_frames(iterations_per_frame), integer=True
)
else:
text_prompts = [phrase.strip() for phrase in text_prompts.split("|")]
if text_prompts == ['']:
text_prompts = []
if target_images == "None" or not target_images:
target_images = []
else:
target_images = target_images.split("|")
target_images = [image.strip() for image in target_images]
angle = float(angle)
zoom = float(zoom)
translation_x = float(translation_x)
translation_y = float(translation_y)
iterations_per_frame = int(iterations_per_frame)
if zoom <= 0:
print(
f"WARNING: You have selected a zoom of {zoom}. "
"This is meaningless. "
"If you want to zoom out, use a value between 0 and 1. "
"If you want no zoom, use a value of 1."
)
args = argparse.Namespace(
prompts=text_prompts,
image_prompts=target_images,
noise_prompt_seeds=[],
noise_prompt_weights=[],
size=[width, height],
init_weight=0.,
clip_model='ViT-B/32',
vqgan_config=f'{model}.yaml',
vqgan_checkpoint=f'{model}.ckpt',
step_size=0.1,
cutn=64,
cut_pow=1.,
display_freq=interval,
seed=seed,
)
add_inbetweens()
path = f'{working_dir}/steps'
!rm -r {path}
!mkdir --parents {path}
#@title Actually do the run...
# Delete memory from previous runs
!nvidia-smi -caa
for var in ['device', 'model', 'perceptor', 'z']:
try:
del globals()[var]
except:
pass
try:
import gc
gc.collect()
except:
pass
try:
torch.cuda.empty_cache()
except:
pass
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('Using device:', device)
if not key_frames:
if text_prompts:
print('Using text prompts:', text_prompts)
if target_images:
print('Using image prompts:', target_images)
if args.seed is None:
seed = torch.seed()
else:
seed = args.seed
torch.manual_seed(seed)
print('Using seed:', seed)
model = load_vqgan_model(args.vqgan_config, args.vqgan_checkpoint).to(device)
perceptor = clip.load(args.clip_model, jit=False)[0].eval().requires_grad_(False).to(device)
cut_size = perceptor.visual.input_resolution
e_dim = model.quantize.e_dim
f = 2**(model.decoder.num_resolutions - 1)
make_cutouts = MakeCutouts(cut_size, args.cutn, cut_pow=args.cut_pow)
n_toks = model.quantize.n_e
toksX, toksY = args.size[0] // f, args.size[1] // f
sideX, sideY = toksX * f, toksY * f
z_min = model.quantize.embedding.weight.min(dim=0).values[None, :, None, None]
z_max = model.quantize.embedding.weight.max(dim=0).values[None, :, None, None]
stop_on_next_loop = False # Make sure GPU memory doesn't get corrupted from cancelling the run mid-way through, allow a full frame to complete
def read_image_workaround(path):
"""OpenCV reads images as BGR, Pillow saves them as RGB. Work around
this incompatibility to avoid colour inversions."""
im_tmp = cv2.imread(path)
return cv2.cvtColor(im_tmp, cv2.COLOR_BGR2RGB)
for i in range(max_frames):
if stop_on_next_loop:
break
if key_frames:
text_prompts = text_prompts_series[i]
text_prompts = [phrase.strip() for phrase in text_prompts.split("|")]
if text_prompts == ['']:
text_prompts = []
args.prompts = text_prompts
target_images = target_images_series[i]
if target_images == "None" or not target_images:
target_images = []
else:
target_images = target_images.split("|")
target_images = [image.strip() for image in target_images]
args.image_prompts = target_images
angle = angle_series[i]
zoom = zoom_series[i]
translation_x = translation_x_series[i]
translation_y = translation_y_series[i]
iterations_per_frame = iterations_per_frame_series[i]
print(
f'text_prompts: {text_prompts}',
f'image_prompts: {target_images}',
f'angle: {angle}',
f'zoom: {zoom}',
f'translation_x: {translation_x}',
f'translation_y: {translation_y}',
f'iterations_per_frame: {iterations_per_frame}'
)
try:
if i == 0 and initial_image != "":
img_0 = read_image_workaround(initial_image)
z, *_ = model.encode(TF.to_tensor(img_0).to(device).unsqueeze(0) * 2 - 1)
elif i == 0 and not os.path.isfile(f'{working_dir}/steps/{i:04d}.png'):
one_hot = F.one_hot(
torch.randint(n_toks, [toksY * toksX], device=device), n_toks
).float()
z = one_hot @ model.quantize.embedding.weight
z = z.view([-1, toksY, toksX, e_dim]).permute(0, 3, 1, 2)
else:
if save_all_iterations:
img_0 = read_image_workaround(
f'{working_dir}/steps/{i:04d}_{iterations_per_frame}.png')
else:
img_0 = read_image_workaround(f'{working_dir}/steps/{i:04d}.png')
center = (1*img_0.shape[1]//2, 1*img_0.shape[0]//2)
trans_mat = np.float32(
[[1, 0, translation_x],
[0, 1, translation_y]]
)
rot_mat = cv2.getRotationMatrix2D( center, angle, zoom )
trans_mat = np.vstack([trans_mat, [0,0,1]])
rot_mat = np.vstack([rot_mat, [0,0,1]])
transformation_matrix = np.matmul(rot_mat, trans_mat)
img_0 = cv2.warpPerspective(
img_0,
transformation_matrix,
(img_0.shape[1], img_0.shape[0]),
borderMode=cv2.BORDER_WRAP
)
z, *_ = model.encode(TF.to_tensor(img_0).to(device).unsqueeze(0) * 2 - 1)
i += 1
z_orig = z.clone()
z.requires_grad_(True)
opt = optim.Adam([z], lr=args.step_size)
normalize = transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],
std=[0.26862954, 0.26130258, 0.27577711])
pMs = []
for prompt in args.prompts:
txt, weight, stop = parse_prompt(prompt)
embed = perceptor.encode_text(clip.tokenize(txt).to(device)).float()
pMs.append(Prompt(embed, weight, stop).to(device))
for prompt in args.image_prompts:
path, weight, stop = parse_prompt(prompt)
img = resize_image(Image.open(path).convert('RGB'), (sideX, sideY))
batch = make_cutouts(TF.to_tensor(img).unsqueeze(0).to(device))
embed = perceptor.encode_image(normalize(batch)).float()
pMs.append(Prompt(embed, weight, stop).to(device))
for seed, weight in zip(args.noise_prompt_seeds, args.noise_prompt_weights):
gen = torch.Generator().manual_seed(seed)
embed = torch.empty([1, perceptor.visual.output_dim]).normal_(generator=gen)
pMs.append(Prompt(embed, weight).to(device))
def synth(z):
z_q = vector_quantize(z.movedim(1, 3), model.quantize.embedding.weight).movedim(3, 1)
return clamp_with_grad(model.decode(z_q).add(1).div(2), 0, 1)
def add_xmp_data(filename):
imagen = ImgTag(filename=filename)
imagen.xmp.append_array_item(libxmp.consts.XMP_NS_DC, 'creator', 'VQGAN+CLIP', {"prop_array_is_ordered":True, "prop_value_is_array":True})
if args.prompts:
imagen.xmp.append_array_item(libxmp.consts.XMP_NS_DC, 'title', " | ".join(args.prompts), {"prop_array_is_ordered":True, "prop_value_is_array":True})
else:
imagen.xmp.append_array_item(libxmp.consts.XMP_NS_DC, 'title', 'None', {"prop_array_is_ordered":True, "prop_value_is_array":True})
imagen.xmp.append_array_item(libxmp.consts.XMP_NS_DC, 'i', str(i), {"prop_array_is_ordered":True, "prop_value_is_array":True})
imagen.xmp.append_array_item(libxmp.consts.XMP_NS_DC, 'model', model_name, {"prop_array_is_ordered":True, "prop_value_is_array":True})
imagen.xmp.append_array_item(libxmp.consts.XMP_NS_DC, 'seed',str(seed) , {"prop_array_is_ordered":True, "prop_value_is_array":True})
imagen.close()
def add_stegano_data(filename):
data = {
"title": " | ".join(args.prompts) if args.prompts else None,
"notebook": "VQGAN+CLIP",
"i": i,
"model": model_name,
"seed": str(seed),
}
lsb.hide(filename, json.dumps(data)).save(filename)
@torch.no_grad()
def checkin(i, losses):
losses_str = ', '.join(f'{loss.item():g}' for loss in losses)
tqdm.write(f'i: {i}, loss: {sum(losses).item():g}, losses: {losses_str}')
out = synth(z)
TF.to_pil_image(out[0].cpu()).save('progress.png')
add_stegano_data('progress.png')
add_xmp_data('progress.png')
display.display(display.Image('progress.png'))
def save_output(i, img, suffix=None):
filename = \
f"{working_dir}/steps/{i:04}{'_' + suffix if suffix else ''}.png"
imageio.imwrite(filename, np.array(img))
add_stegano_data(filename)
add_xmp_data(filename)
def ascend_txt(i, save=True, suffix=None):
out = synth(z)
iii = perceptor.encode_image(normalize(make_cutouts(out))).float()
result = []
if args.init_weight:
result.append(F.mse_loss(z, z_orig) * args.init_weight / 2)
for prompt in pMs:
result.append(prompt(iii))
img = np.array(out.mul(255).clamp(0, 255)[0].cpu().detach().numpy().astype(np.uint8))[:,:,:]
img = np.transpose(img, (1, 2, 0))
if save:
save_output(i, img, suffix=suffix)
return result
def train(i, save=True, suffix=None):
opt.zero_grad()
lossAll = ascend_txt(i, save=save, suffix=suffix)
if i % args.display_freq == 0 and save:
checkin(i, lossAll)
loss = sum(lossAll)
loss.backward()
opt.step()
with torch.no_grad():
z.copy_(z.maximum(z_min).minimum(z_max))
with tqdm() as pbar:
if iterations_per_frame == 0:
save_output(i, img_0)
j = 1
while True:
suffix = (str(j) if save_all_iterations else None)
if j >= iterations_per_frame:
train(i, save=True, suffix=suffix)
break
if save_all_iterations:
train(i, save=True, suffix=suffix)
else:
train(i, save=False, suffix=suffix)
j += 1
pbar.update()
except KeyboardInterrupt:
stop_on_next_loop = True
pass
###Output
_____no_output_____
###Markdown
SRCNNによる超解像(Option)
###Code
!git clone https://github.com/Mirwaisse/SRCNN.git
!curl https://raw.githubusercontent.com/chigozienri/SRCNN/master/models/model_2x.pth -o model_2x.pth
# @title Increase Resolution
# import subprocess in case this cell is run without the above cells
import subprocess
# Set zoomed = True if this cell is run
zoomed = True
init_frame = 1#@param {type:"number"}
last_frame = 60#@param {type:"number"}
for i in range(init_frame, last_frame + 1): #
filename = f"{i:04}.png"
cmd = [
'python',
'/content/SRCNN/run.py',
'--zoom_factor',
'2', # Note if you increase this, you also need to change the model.
'--model',
'/content/model_2x.pth', # 2x, 3x and 4x are available from the repo above
'--image',
filename,
'--cuda'
]
print(f'Upscaling frame {i}')
process = subprocess.Popen(cmd, cwd=f'{working_dir}/steps/')
stdout, stderr = process.communicate()
if process.returncode != 0:
print(stderr)
print(
"You may be able to avoid this error by backing up the frames,"
"restarting the notebook, and running only the video synthesis cells,"
"or by decreasing the resolution of the image generation steps. "
"If you restart the notebook, you will have to define the `filepath` manually"
"by adding `filepath = 'PATH_TO_THE_VIDEO'` to the beginning of this cell. "
"If these steps do not work, please post the traceback in the github."
)
raise RuntimeError(stderr)
###Output
_____no_output_____
###Markdown
ビデオ作成フレームを使用してビデオを生成します。FPSの数、最初のフレーム、最後のフレームなどを変更できます。 この手順は、メモリ不足エラーが原因で失敗する可能性があります。
###Code
# @title Create video
# import subprocess in case this cell is run without the above cells
import subprocess
# Try to avoid OOM errors
torch.cuda.empty_cache()
init_frame = 1#@param {type:"number"} This is the frame where the video will start
last_frame = 60#@param {type:"number"} You can change i to the number of the last frame you want to generate. It will raise an error if that number of frames does not exist.
fps = 12#@param {type:"number"}
try:
key_frames
except NameError:
filename = "video.mp4"
else:
if key_frames:
# key frame filename would be too long
filename = "video.mp4"
else:
filename = f"{'_'.join(text_prompts).replace(' ', '')}.mp4"
filepath = f'{working_dir}/{filename}'
frames = []
# tqdm.write('Generating video...')
try:
zoomed
except NameError:
image_path = f'{working_dir}/steps/%04d.png'
else:
image_path = f'{working_dir}/steps/zoomed_%04d.png'
cmd = [
'ffmpeg',
'-y',
'-vcodec',
'png',
'-r',
str(fps),
'-start_number',
str(init_frame),
'-i',
image_path,
'-c:v',
'libx264',
'-frames:v',
str(last_frame-init_frame),
'-vf',
f'fps={fps}',
'-pix_fmt',
'yuv420p',
'-crf',
'17',
'-preset',
'veryslow',
filepath
]
process = subprocess.Popen(cmd, cwd=f'{working_dir}/steps/', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode != 0:
print(stderr)
print(
"You may be able to avoid this error by backing up the frames,"
"restarting the notebook, and running only the google drive/local connection and video synthesis cells,"
"or by decreasing the resolution of the image generation steps. "
"If these steps do not work, please post the traceback in the github."
)
raise RuntimeError(stderr)
else:
print("The video is ready")
# @title Download video
from google.colab import files
files.download(filepath)
###Output
_____no_output_____
###Markdown
スローモーション動画生成(Option)上記の手順の直後に実行すると、この手順のメモリが不足する可能性があります。 その場合は、ノートブックを再起動し、前の手順で保存したビデオのコピーをアップロードして(または、Googleドライブから取得して)、 下のセルを再度実行する前に、ビデオへのパスを使用して変数 `filepath`を定義してください
###Code
# @title Download Super-Slomo model
!git clone -q --depth 1 https://github.com/avinashpaliwal/Super-SloMo.git
from os.path import exists
def download_from_google_drive(file_id, file_name):
# download a file from the Google Drive link
!rm -f ./cookie
!curl -c ./cookie -s -L "https://drive.google.com/uc?export=download&id={file_id}" > /dev/null
confirm_text = !awk '/download/ {print $NF}' ./cookie
confirm_text = confirm_text[0]
!curl -Lb ./cookie "https://drive.google.com/uc?export=download&confirm={confirm_text}&id={file_id}" -o {file_name}
pretrained_model = 'SuperSloMo.ckpt'
if not exists(pretrained_model):
download_from_google_drive('1IvobLDbRiBgZr3ryCRrWL8xDbMZ-KnpF', pretrained_model)
# import subprocess in case this cell is run without the above cells
import subprocess
SLOW_MOTION_FACTOR = 3#@param {type:"number"}
TARGET_FPS = 12#@param {type:"number"}
cmd1 = [
'python',
'Super-SloMo/video_to_slomo.py',
'--checkpoint',
pretrained_model,
'--video',
filepath,
'--sf',
str(SLOW_MOTION_FACTOR),
'--fps',
str(TARGET_FPS),
'--output',
f'{filepath}-slomo.mkv',
]
process = subprocess.Popen(cmd1, cwd=f'/content', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode != 0:
raise RuntimeError(stderr)
cmd2 = [
'ffmpeg',
'-i',
f'{filepath}-slomo.mkv',
'-pix_fmt',
'yuv420p',
'-crf',
'17',
'-preset',
'veryslow',
f'{filepath}-slomo.mp4',
]
process = subprocess.Popen(cmd2, cwd=f'/content', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
if process.returncode != 0:
raise RuntimeError(stderr)
print(stderr)
print(
"You may be able to avoid this error by backing up the frames,"
"restarting the notebook, and running only the video synthesis cells,"
"or by decreasing the resolution of the image generation steps. "
"If you restart the notebook, you will have to define the `filepath` manually"
"by adding `filepath = 'PATH_TO_THE_VIDEO'` to the beginning of this cell. "
"If these steps do not work, please post the traceback in the github."
)
# @title Download video
from google.colab import files
files.download(f'{filepath}-slomo.mp4')
###Output
_____no_output_____ |
practice_3.ipynb | ###Markdown
Práctica 3: Embeddings and IA**Integrantes:**1. Ceballos Equihua Conan Nathaniel2. Murrieta Villegas Alfonso3. Salas Mora Mónica 1. Libraries and Loading Dataset
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
###Output
_____no_output_____
###Markdown
Se carga el dataset obtenido en la tarea anterior (práctica 2).
###Code
urlData = 'https://raw.githubusercontent.com/aMurryFly/nlp_course/main/data/embeddings.csv'
data = pd.read_csv(urlData, encoding='latin1')
data.head()
###Output
_____no_output_____
###Markdown
2. EDA (Exploratory Data Analysis) Drop index column - (Unnamed: 0)
###Code
data = data.drop(['Unnamed: 0'], axis='columns')
data.head()
###Output
_____no_output_____
###Markdown
Validación de datos nulos o repetidos
###Code
explordata= data
nullValues= data.isnull().sum().sort_values(ascending=False)
total =explordata.shape[0]
percent_missing= (explordata.isnull().sum()/total).sort_values(ascending=False)
loss_data= pd.concat([nullValues, percent_missing], axis=1, keys=['Datos nulos', 'Porcetaje'])
print (loss_data)
###Output
Datos nulos Porcetaje
embedding 0 0.0
tagged_document 0 0.0
clean_description 0 0.0
category 0 0.0
###Markdown
Cantidad de datos o documentos
###Code
print('Total de datos: ', explordata.shape[0])
###Output
Total de datos: 29950
###Markdown
Distribución de las categorías de noticias
###Code
pre_class_allData = explordata.groupby('category').count()['clean_description'].reset_index().sort_values(by='clean_description',ascending=False)
percent_class= pre_class_allData.clean_description
labels= pre_class_allData.category
my_pie,_,_ = plt.pie(percent_class,radius = 1.2,labels=labels,autopct="%.1f%%")
plt.setp(my_pie, width=0.8, edgecolor='white')
plt.show()
###Output
_____no_output_____
###Markdown
3. IA Model 3.1 Libraries and hyperparameters
###Code
from sklearn.model_selection import train_test_split
# Generalización para el modelo de IA
import tensorflow as tf
import tensorflow.keras.layers as L
from tensorflow.keras.losses import SparseCategoricalCrossentropy
from tensorflow.keras.optimizers import Adam
# Para la construcción del modelo - NLP
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Embedding
# Para validación del modelo
from scipy.stats import norm
from scipy import stats as st
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from scipy import stats
data.head()
###Output
_____no_output_____
###Markdown
Cambiamios las clases a valores numéricos-discretos (esto debido a que es la forma en la que trabaja la entrada de datos del modelo)
###Code
re_classes = {'FOOD & DRINK': 0,
'WORLD NEWS': 1,
'POLITICS':2,
'PARENTING':3,
'WELLNESS': 4,
'BUSINESS': 5
}
labels = ['FOOD & DRINK', 'WORLD NEWS', 'POLITICS', 'PARENTING', 'WELLNESS','BUSINESS']
###Output
_____no_output_____
###Markdown
Hiperparametros y creación de listas de vectores
###Code
X_init = data["clean_description"].copy()
y_init = data["category"].copy()
y_init.replace(re_classes, inplace=True)
vocab_size = len(X_init)
max_length = 150
trunc_type='post'
padding_type='post'
oov_tok = "<OOV>"
###Output
_____no_output_____
###Markdown
3.2 Preprocesamiento de datos Split de datos: 1. 80% to train 2. 10% for validation 3. 10% for testing
###Code
X_train, X_val, y_train, y_val = train_test_split(X_init, y_init, test_size=0.2, random_state=42)
X_val, X_test , y_val, y_test= train_test_split(X_val, y_val, test_size=0.5, random_state=42)
###Output
_____no_output_____
###Markdown
Preparación de los datos antes de embeberlos a GloVe1. Se emplea tokenizer de sklearn2. Se homogenean los vectores mediante pad_sequences de keras3. Se separan los datos en tran, validation y test
###Code
tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words = vocab_size, oov_token=oov_tok)
tokenizer.fit_on_texts(X_train)
word_index = tokenizer.word_index
X_train = tokenizer.texts_to_sequences(X_train)
X_train = pad_sequences(X_train,maxlen= max_length,padding=padding_type, truncating=trunc_type)
y_train = np.asarray(y_train)
y_train = pd.get_dummies(y_train)
X_val = tokenizer.texts_to_sequences(X_val)
X_val = pad_sequences(X_val,maxlen= max_length,padding=padding_type, truncating=trunc_type)
y_val = np.asarray(y_val)
y_val = pd.get_dummies(y_val)
train_set = np.array(X_train)
val_set = np.array(X_val)
train_label = np.array(y_train)
val_label = np.array(y_val)
y_test = pd.get_dummies(y_test)
y_test = np.asarray(y_test)
y_test = np.argmax(y_test,axis=1)
print(train_set.shape)
print(train_label.shape)
print(val_set.shape)
print(val_label.shape)
###Output
(23960, 150)
(23960, 6)
(2995, 150)
(2995, 6)
###Markdown
3.3 Glove NOTA: NO EJECUTAR SI YA ESTÁ DESCARGADO GLOVEDescarga y descompresión del modelo
###Code
!wget http://nlp.stanford.edu/data/glove.6B.zip #GLOVE
!unzip -q glove.6B.zip
###Output
_____no_output_____
###Markdown
GloVe is an unsupervised learning algorithm for obtaining vector representations for words. Path o dir of our model
###Code
dir_glove_file = './glove.6B.100d.txt'
###Output
_____no_output_____
###Markdown
Hiperparametros para GloVe
###Code
num_tokens = len(tokenizer.word_index.items()) + 2
embedding_dim = 100
hits = 0
misses = 0
embeddings_index = {}
with open(dir_glove_file) as f:
for line in f:
word, coefs = line.split(maxsplit=1)
coefs = np.fromstring(coefs, "f", sep=" ")
embeddings_index[word] = coefs
print("Found %s word vectors." % len(embeddings_index))
# Prepare embedding matrix
embedding_matrix = np.zeros((num_tokens, embedding_dim))
for word, i in tokenizer.word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# Words not found in embedding index will be all-zeros.
# This includes the representation for "padding" and "OOV"
embedding_matrix[i] = embedding_vector
hits += 1
else:
misses += 1
print("Converted %d words (%d misses)" % (hits, misses))
###Output
Converted 26500 words (4213 misses)
###Markdown
3.4 Building AI Model
###Code
early_stop=tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3, min_delta=0.0001)
tf.keras.backend.clear_session()
embed_size = 100
model = tf.keras.Sequential([
Embedding(num_tokens,
embedding_dim,
embeddings_initializer=tf.keras.initializers.Constant(embedding_matrix),
mask_zero=True,input_shape=[None],trainable=False),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(256, dropout = 0.4)),
tf.keras.layers.Dense(6, activation="softmax")
])
model.summary()
###Output
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding (Embedding) (None, None, 100) 3071500
bidirectional (Bidirectiona (None, 512) 731136
l)
dense (Dense) (None, 6) 3078
=================================================================
Total params: 3,805,714
Trainable params: 734,214
Non-trainable params: 3,071,500
_________________________________________________________________
###Markdown
3.5 Training Model
###Code
opt = tf.keras.optimizers.Adam(learning_rate=0.001)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
###Output
_____no_output_____
###Markdown
Ejecutar con GPU (GHasta 10 veces menos)
###Code
history = model.fit( train_set,train_label,
batch_size = 32,
steps_per_epoch=len(X_train) // 32,
validation_data = (val_set , val_label),
validation_steps = len(val_set)//32, epochs=4,
callbacks= early_stop )
###Output
Epoch 1/4
748/748 [==============================] - 46s 45ms/step - loss: 0.9142 - accuracy: 0.6663 - val_loss: 0.7893 - val_accuracy: 0.7204
Epoch 2/4
748/748 [==============================] - 30s 40ms/step - loss: 0.7823 - accuracy: 0.7172 - val_loss: 0.7548 - val_accuracy: 0.7245
Epoch 3/4
748/748 [==============================] - 30s 40ms/step - loss: 0.7215 - accuracy: 0.7359 - val_loss: 0.7381 - val_accuracy: 0.7362
Epoch 4/4
748/748 [==============================] - 30s 40ms/step - loss: 0.6795 - accuracy: 0.7544 - val_loss: 0.7245 - val_accuracy: 0.7450
###Markdown
3.6 Accuracy train vs validation ENTRENAMIENTO
###Code
plt.plot(history.history['accuracy'], label = "acc")
plt.plot(history.history['val_accuracy'],label = "val_acc")
plt.ylabel("accuracy")
plt.xlabel("epochs")
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
PERDIDA
###Code
plt.plot(history.history['loss'], label = "loss")
plt.plot(history.history['val_loss'], label = "val_loss")
plt.ylabel("loss")
plt.xlabel("epochs")
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
4. FINAL MODEL 4.1 Evaluation / Confusion Matrix
###Code
classes = data['category'].value_counts().index
def prediction(inference_data):
X = tokenizer.texts_to_sequences(inference_data)
X = pad_sequences(X,maxlen= max_length,padding=padding_type, truncating=trunc_type)
pred = model.predict(X)
pred_value = tf.argmax(pred,axis =1).numpy()
return pred_value
###Output
_____no_output_____
###Markdown
Evaluación/ predicción mediante el datset de prueba - test f1
###Code
y_pred = prediction(X_test)
print(classification_report(np.asarray(y_test),np.asarray( y_pred)))
###Output
precision recall f1-score support
0 0.83 0.86 0.84 501
1 0.81 0.75 0.78 513
2 0.69 0.70 0.69 488
3 0.75 0.79 0.77 483
4 0.73 0.77 0.75 531
5 0.74 0.68 0.71 479
accuracy 0.76 2995
macro avg 0.76 0.76 0.76 2995
weighted avg 0.76 0.76 0.76 2995
###Markdown
Matriz de confusión para validación de asertividad por clase en dataset de prueba (Test)
###Code
confu_matrix = confusion_matrix(y_test, y_pred)
heatmap = sns.heatmap(confu_matrix, xticklabels=classes,
yticklabels=classes,
annot=True, fmt='d', color='blue')
plt.xlabel('Predicted class')
plt.ylabel('Real class')
###Output
_____no_output_____ |
python/d2l-en/mxnet/chapter_optimization/momentum.ipynb | ###Markdown
Momentum:label:`sec_momentum`In :numref:`sec_sgd` we reviewed what happens when performing stochastic gradient descent, i.e., when performing optimization where only a noisy variant of the gradient is available. In particular, we noticed that for noisy gradients we need to be extra cautious when it comes to choosing the learning rate in the face of noise. If we decrease it too rapidly, convergence stalls. If we are too lenient, we fail to converge to a good enough solution since noise keeps on driving us away from optimality. BasicsIn this section, we will explore more effective optimization algorithms, especially for certain types of optimization problems that are common in practice. Leaky AveragesThe previous section saw us discussing minibatch SGD as a means for accelerating computation. It also had the nice side-effect that averaging gradients reduced the amount of variance. The minibatch stochastic gradient descent can be calculated by:$$\mathbf{g}_{t, t-1} = \partial_{\mathbf{w}} \frac{1}{|\mathcal{B}_t|} \sum_{i \in \mathcal{B}_t} f(\mathbf{x}_{i}, \mathbf{w}_{t-1}) = \frac{1}{|\mathcal{B}_t|} \sum_{i \in \mathcal{B}_t} \mathbf{h}_{i, t-1}.$$To keep the notation simple, here we used $\mathbf{h}_{i, t-1} = \partial_{\mathbf{w}} f(\mathbf{x}_i, \mathbf{w}_{t-1})$ as the stochastic gradient descent for sample $i$ using the weights updated at time $t-1$.It would be nice if we could benefit from the effect of variance reduction even beyond averaging gradients on a minibatch. One option to accomplish this task is to replace the gradient computation by a "leaky average":$$\mathbf{v}_t = \beta \mathbf{v}_{t-1} + \mathbf{g}_{t, t-1}$$for some $\beta \in (0, 1)$. This effectively replaces the instantaneous gradient by one that's been averaged over multiple *past* gradients. $\mathbf{v}$ is called *momentum*. It accumulates past gradients similar to how a heavy ball rolling down the objective function landscape integrates over past forces. To see what is happening in more detail let us expand $\mathbf{v}_t$ recursively into$$\begin{aligned}\mathbf{v}_t = \beta^2 \mathbf{v}_{t-2} + \beta \mathbf{g}_{t-1, t-2} + \mathbf{g}_{t, t-1}= \ldots, = \sum_{\tau = 0}^{t-1} \beta^{\tau} \mathbf{g}_{t-\tau, t-\tau-1}.\end{aligned}$$Large $\beta$ amounts to a long-range average, whereas small $\beta$ amounts to only a slight correction relative to a gradient method. The new gradient replacement no longer points into the direction of steepest descent on a particular instance any longer but rather in the direction of a weighted average of past gradients. This allows us to realize most of the benefits of averaging over a batch without the cost of actually computing the gradients on it. We will revisit this averaging procedure in more detail later.The above reasoning formed the basis for what is now known as *accelerated* gradient methods, such as gradients with momentum. They enjoy the additional benefit of being much more effective in cases where the optimization problem is ill-conditioned (i.e., where there are some directions where progress is much slower than in others, resembling a narrow canyon). Furthermore, they allow us to average over subsequent gradients to obtain more stable directions of descent. Indeed, the aspect of acceleration even for noise-free convex problems is one of the key reasons why momentum works and why it works so well.As one would expect, due to its efficacy momentum is a well-studied subject in optimization for deep learning and beyond. See e.g., the beautiful [expository article](https://distill.pub/2017/momentum/) by :cite:`Goh.2017` for an in-depth analysis and interactive animation. It was proposed by :cite:`Polyak.1964`. :cite:`Nesterov.2018` has a detailed theoretical discussion in the context of convex optimization. Momentum in deep learning has been known to be beneficial for a long time. See e.g., the discussion by :cite:`Sutskever.Martens.Dahl.ea.2013` for details. An Ill-conditioned ProblemTo get a better understanding of the geometric properties of the momentum method we revisit gradient descent, albeit with a significantly less pleasant objective function. Recall that in :numref:`sec_gd` we used $f(\mathbf{x}) = x_1^2 + 2 x_2^2$, i.e., a moderately distorted ellipsoid objective. We distort this function further by stretching it out in the $x_1$ direction via$$f(\mathbf{x}) = 0.1 x_1^2 + 2 x_2^2.$$As before $f$ has its minimum at $(0, 0)$. This function is *very* flat in the direction of $x_1$. Let us see what happens when we perform gradient descent as before on this new function. We pick a learning rate of $0.4$.
###Code
%matplotlib inline
from mxnet import np, npx
from d2l import mxnet as d2l
npx.set_np()
eta = 0.4
def f_2d(x1, x2):
return 0.1 * x1 ** 2 + 2 * x2 ** 2
def gd_2d(x1, x2, s1, s2):
return (x1 - eta * 0.2 * x1, x2 - eta * 4 * x2, 0, 0)
d2l.show_trace_2d(f_2d, d2l.train_2d(gd_2d))
###Output
epoch 20, x1: -0.943467, x2: -0.000073
###Markdown
By construction, the gradient in the $x_2$ direction is *much* higher and changes much more rapidly than in the horizontal $x_1$ direction. Thus we are stuck between two undesirable choices: if we pick a small learning rate we ensure that the solution does not diverge in the $x_2$ direction but we are saddled with slow convergence in the $x_1$ direction. Conversely, with a large learning rate we progress rapidly in the $x_1$ direction but diverge in $x_2$. The example below illustrates what happens even after a slight increase in learning rate from $0.4$ to $0.6$. Convergence in the $x_1$ direction improves but the overall solution quality is much worse.
###Code
eta = 0.6
d2l.show_trace_2d(f_2d, d2l.train_2d(gd_2d))
###Output
epoch 20, x1: -0.387814, x2: -1673.365109
###Markdown
The Momentum MethodThe momentum method allows us to solve the gradient descent problem describedabove. Looking at the optimization trace above we might intuit that averaging gradients over the past would work well. After all, in the $x_1$ direction this will aggregate well-aligned gradients, thus increasing the distance we cover with every step. Conversely, in the $x_2$ direction where gradients oscillate, an aggregate gradient will reduce step size due to oscillations that cancel each other out.Using $\mathbf{v}_t$ instead of the gradient $\mathbf{g}_t$ yields the following update equations:$$\begin{aligned}\mathbf{v}_t &\leftarrow \beta \mathbf{v}_{t-1} + \mathbf{g}_{t, t-1}, \\\mathbf{x}_t &\leftarrow \mathbf{x}_{t-1} - \eta_t \mathbf{v}_t.\end{aligned}$$Note that for $\beta = 0$ we recover regular gradient descent. Before delving deeper into the mathematical properties let us have a quick look at how the algorithm behaves in practice.
###Code
def momentum_2d(x1, x2, v1, v2):
v1 = beta * v1 + 0.2 * x1
v2 = beta * v2 + 4 * x2
return x1 - eta * v1, x2 - eta * v2, v1, v2
eta, beta = 0.6, 0.5
d2l.show_trace_2d(f_2d, d2l.train_2d(momentum_2d))
###Output
epoch 20, x1: 0.007188, x2: 0.002553
###Markdown
As we can see, even with the same learning rate that we used before, momentum still converges well. Let us see what happens when we decrease the momentum parameter. Halving it to $\beta = 0.25$ leads to a trajectory that barely converges at all. Nonetheless, it is a lot better than without momentum (when the solution diverges).
###Code
eta, beta = 0.6, 0.25
d2l.show_trace_2d(f_2d, d2l.train_2d(momentum_2d))
###Output
epoch 20, x1: -0.126340, x2: -0.186632
###Markdown
Note that we can combine momentum with stochastic gradient descent and in particular, minibatch stochastic gradient descent. The only change is that in that case we replace the gradients $\mathbf{g}_{t, t-1}$ with $\mathbf{g}_t$. Last, for convenience we initialize $\mathbf{v}_0 = 0$ at time $t=0$. Let us look at what leaky averaging actually does to the updates. Effective Sample WeightRecall that $\mathbf{v}_t = \sum_{\tau = 0}^{t-1} \beta^{\tau} \mathbf{g}_{t-\tau, t-\tau-1}$. In the limit the terms add up to $\sum_{\tau=0}^\infty \beta^\tau = \frac{1}{1-\beta}$. In other words, rather than taking a step of size $\eta$ in gradient descent or stochastic gradient descent we take a step of size $\frac{\eta}{1-\beta}$ while at the same time, dealing with a potentially much better behaved descent direction. These are two benefits in one. To illustrate how weighting behaves for different choices of $\beta$ consider the diagram below.
###Code
d2l.set_figsize()
betas = [0.95, 0.9, 0.6, 0]
for beta in betas:
x = np.arange(40).asnumpy()
d2l.plt.plot(x, beta ** x, label=f'beta = {beta:.2f}')
d2l.plt.xlabel('time')
d2l.plt.legend();
###Output
_____no_output_____
###Markdown
Practical ExperimentsLet us see how momentum works in practice, i.e., when used within the context of a proper optimizer. For this we need a somewhat more scalable implementation. Implementation from ScratchCompared with (minibatch) stochastic gradient descent the momentum method needs to maintain a set of auxiliary variables, i.e., velocity. It has the same shape as the gradients (and variables of the optimization problem). In the implementation below we call these variables `states`.
###Code
def init_momentum_states(feature_dim):
v_w = np.zeros((feature_dim, 1))
v_b = np.zeros(1)
return (v_w, v_b)
def sgd_momentum(params, states, hyperparams):
for p, v in zip(params, states):
v[:] = hyperparams['momentum'] * v + p.grad
p[:] -= hyperparams['lr'] * v
###Output
_____no_output_____
###Markdown
Let us see how this works in practice.
###Code
def train_momentum(lr, momentum, num_epochs=2):
d2l.train_ch11(sgd_momentum, init_momentum_states(feature_dim),
{'lr': lr, 'momentum': momentum}, data_iter,
feature_dim, num_epochs)
data_iter, feature_dim = d2l.get_data_ch11(batch_size=10)
train_momentum(0.02, 0.5)
###Output
loss: 0.250, 0.099 sec/epoch
###Markdown
When we increase the momentum hyperparameter `momentum` to 0.9, it amounts to a significantly larger effective sample size of $\frac{1}{1 - 0.9} = 10$. We reduce the learning rate slightly to $0.01$ to keep matters under control.
###Code
train_momentum(0.01, 0.9)
###Output
loss: 0.244, 0.076 sec/epoch
###Markdown
Reducing the learning rate further addresses any issue of non-smooth optimization problems. Setting it to $0.005$ yields good convergence properties.
###Code
train_momentum(0.005, 0.9)
###Output
loss: 0.247, 0.106 sec/epoch
###Markdown
Concise ImplementationThere is very little to do in Gluon since the standard `sgd` solver already had momentum built in. Setting matching parameters yields a very similar trajectory.
###Code
d2l.train_concise_ch11('sgd', {'learning_rate': 0.005, 'momentum': 0.9},
data_iter)
###Output
loss: 0.248, 0.059 sec/epoch
###Markdown
Theoretical AnalysisSo far the 2D example of $f(x) = 0.1 x_1^2 + 2 x_2^2$ seemed rather contrived. We will now see that this is actually quite representative of the types of problem one might encounter, at least in the case of minimizing convex quadratic objective functions. Quadratic Convex FunctionsConsider the function$$h(\mathbf{x}) = \frac{1}{2} \mathbf{x}^\top \mathbf{Q} \mathbf{x} + \mathbf{x}^\top \mathbf{c} + b.$$This is a general quadratic function. For positive definite matrices $\mathbf{Q} \succ 0$, i.e., for matrices with positive eigenvalues this has a minimizer at $\mathbf{x}^* = -\mathbf{Q}^{-1} \mathbf{c}$ with minimum value $b - \frac{1}{2} \mathbf{c}^\top \mathbf{Q}^{-1} \mathbf{c}$. Hence we can rewrite $h$ as$$h(\mathbf{x}) = \frac{1}{2} (\mathbf{x} - \mathbf{Q}^{-1} \mathbf{c})^\top \mathbf{Q} (\mathbf{x} - \mathbf{Q}^{-1} \mathbf{c}) + b - \frac{1}{2} \mathbf{c}^\top \mathbf{Q}^{-1} \mathbf{c}.$$The gradient is given by $\partial_{\mathbf{x}} f(\mathbf{x}) = \mathbf{Q} (\mathbf{x} - \mathbf{Q}^{-1} \mathbf{c})$. That is, it is given by the distance between $\mathbf{x}$ and the minimizer, multiplied by $\mathbf{Q}$. Consequently also the momentum is a linear combination of terms $\mathbf{Q} (\mathbf{x}_t - \mathbf{Q}^{-1} \mathbf{c})$.Since $\mathbf{Q}$ is positive definite it can be decomposed into its eigensystem via $\mathbf{Q} = \mathbf{O}^\top \boldsymbol{\Lambda} \mathbf{O}$ for an orthogonal (rotation) matrix $\mathbf{O}$ and a diagonal matrix $\boldsymbol{\Lambda}$ of positive eigenvalues. This allows us to perform a change of variables from $\mathbf{x}$ to $\mathbf{z} := \mathbf{O} (\mathbf{x} - \mathbf{Q}^{-1} \mathbf{c})$ to obtain a much simplified expression:$$h(\mathbf{z}) = \frac{1}{2} \mathbf{z}^\top \boldsymbol{\Lambda} \mathbf{z} + b'.$$Here $b' = b - \frac{1}{2} \mathbf{c}^\top \mathbf{Q}^{-1} \mathbf{c}$. Since $\mathbf{O}$ is only an orthogonal matrix this does not perturb the gradients in a meaningful way. Expressed in terms of $\mathbf{z}$ gradient descent becomes$$\mathbf{z}_t = \mathbf{z}_{t-1} - \boldsymbol{\Lambda} \mathbf{z}_{t-1} = (\mathbf{I} - \boldsymbol{\Lambda}) \mathbf{z}_{t-1}.$$The important fact in this expression is that gradient descent *does not mix* between different eigenspaces. That is, when expressed in terms of the eigensystem of $\mathbf{Q}$ the optimization problem proceeds in a coordinate-wise manner. This also holds for momentum.$$\begin{aligned}\mathbf{v}_t & = \beta \mathbf{v}_{t-1} + \boldsymbol{\Lambda} \mathbf{z}_{t-1} \\\mathbf{z}_t & = \mathbf{z}_{t-1} - \eta \left(\beta \mathbf{v}_{t-1} + \boldsymbol{\Lambda} \mathbf{z}_{t-1}\right) \\ & = (\mathbf{I} - \eta \boldsymbol{\Lambda}) \mathbf{z}_{t-1} - \eta \beta \mathbf{v}_{t-1}.\end{aligned}$$In doing this we just proved the following theorem: Gradient Descent with and without momentum for a convex quadratic function decomposes into coordinate-wise optimization in the direction of the eigenvectors of the quadratic matrix. Scalar FunctionsGiven the above result let us see what happens when we minimize the function $f(x) = \frac{\lambda}{2} x^2$. For gradient descent we have$$x_{t+1} = x_t - \eta \lambda x_t = (1 - \eta \lambda) x_t.$$Whenever $|1 - \eta \lambda| 2$ the optimization problem diverges.
###Code
lambdas = [0.1, 1, 10, 19]
eta = 0.1
d2l.set_figsize((6, 4))
for lam in lambdas:
t = np.arange(20).asnumpy()
d2l.plt.plot(t, (1 - eta * lam) ** t, label=f'lambda = {lam:.2f}')
d2l.plt.xlabel('time')
d2l.plt.legend();
###Output
_____no_output_____ |
archive/NASA_data/tableau_pixel_info.ipynb | ###Markdown
Pixelate mapThis notebook mainly draw a pixelated map by formatting a json file
###Code
import json
import os
import netCDF4 as nc
import numpy as np
import pandas as pd
###Output
_____no_output_____
###Markdown
Get some data for the mapBefore we draw the map, we need to have some data. Here I just copy-paste codes to count max consecutive MPID, calculate an EV score, and calculate maximum daily range loss Adding more data is trivia if you follow the same paradigm of formatting the data Define some constants
###Code
DATA_FILE_DIR = "./data/nasa/"
START_YEAR, END_YEAR = 2010, 2020
NUM_OF_YEARS = END_YEAR - START_YEAR
NUM_OF_MONTHS = 12
NUM_OF_DAYS = {1: 31, 2: 28, 3: 31, 4: 30, 5: 31, 6: 30, 7: 31, 8: 31, 9: 30, 10: 31, 11: 30, 12: 31}
###Output
_____no_output_____
###Markdown
The source data format is `netCDF4`. First, We need to use any of the source files to extract latitudes and longitudes
###Code
file = nc.Dataset(DATA_FILE_DIR+'20110101.nc4')
lat = file.variables['lat'][:].filled()
lon = file.variables['lon'][:].filled()
LON = len(lon)
LAT = len(lat)
# we will use this mask later
mask = file.variables['AvgSurfT_tavg'][0].mask
# remember to close opened files after use
file.close()
def get_tmp(filepath):
"""
This function extracts temperature data from the given filepath
# Arguements:
filepath: A string that specifies the file to be read
# Returns:
The data temperature in the file
"""
assert os.path.isfile(filepath), '{} does not exist!'.format(filepath)
file = nc.Dataset(filepath)
temperature = file.variables['AvgSurfT_tavg'][0]
file.close()
return temperature.filled(273.15)
###Output
_____no_output_____
###Markdown
Data 1: maximum consecutive MPID (must plug-in day) Algorithm: Keep two counters. One records the current consec MPID, and the other records the max consec MPID it has seen so far. After counting one day's MPID, compare the two and keep the larger one. Use `np.where` to adapt this algo to count on arrays Data 2: average maximum and daily range loss
###Code
%%time
# read the scale factors
scale_factors = pd.read_csv("./data/fitted_factors.csv")
percent_loss = scale_factors["Range Loss"].to_numpy()
# max_MPID will record the max MPID of all places in a year
max_MPID = np.ndarray(shape=(LAT, LON))
# curr_MPID will count the consecutive MPID we have seen so far (the counter)
curr_MPID = np.ndarray(shape=(LAT, LON))
# each_year_avg_loss will record each year's avgerage daily range loss
each_year_avg_loss = np.zeros(shape=(NUM_OF_YEARS, LAT, LON))
# max_loss will record the maximum daily range loss
max_loss = np.zeros(shape=(LAT, LON))
for year in range(START_YEAR, END_YEAR):
print(year, end=' ')
# keep track of the number of days
i = 0
# yearly_loss will record each day's range loss of this year
yearly_loss = np.zeros(shape=(365, LAT, LON))
for month in range(1, NUM_OF_MONTHS+1):
for day in range(1, NUM_OF_DAYS[month]+1):
date = "{}{:02d}{:02d}".format(year, month, day)
filepath = DATA_FILE_DIR + date + '.nc4'
date_temp = get_tmp(filepath)
# if this place has MPID on this day (temp<253.15K), then curr_MPID+1
# else, this place has no MPID on this day, which means not consecutive, so we reset the counter to 0;
curr_MPID = np.where(date_temp<253.15, curr_MPID+1, 0) # 253.15 K = -20 oC
# this is equivalent to A = max(A, B)
max_MPID = np.where(curr_MPID>max_MPID, curr_MPID, max_MPID)
# get the range loss of each day in this year
date_temp = np.round(date_temp-273.15, decimals=1) # convert to oC
# use the temperature difference as index. e.g. if temperature is -12.5 oC, then its range loss will
# be the (-12.5+100)*10=875th element in the percent_loss array
# "+100" means "-(-100)", "*10" means "/0.1"
index = (date_temp+100)*10
yearly_loss[i] = percent_loss[index.astype(int)]
i += 1
# calculate the yearly average daily range loss
each_year_avg_loss[year-START_YEAR] = yearly_loss.mean(axis=0)
# calculate the yearly maximum daily range loss
# NOTE: range loss is a negative value, so we use min()
yearly_max_loss = yearly_loss.min(axis=0)
max_loss = np.where(yearly_max_loss<max_loss, yearly_max_loss, max_loss)
print('Finished!\n')
###Output
2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 Finished!
Wall time: 5min 51s
###Markdown
Data 3: EV score EV score is simply converted from daily range loss
###Code
# get the daily average range loss
avg_loss = each_year_avg_loss.mean(axis=0)
# round the maximum daily range loss to 1 decimal
max_loss = np.round(max_loss, decimals=1)
# calculate score and round it to 1 decimal
avg_score = (avg_loss - avg_loss.min()) / (avg_loss.max() - avg_loss.min()) * 100
avg_score = np.round(avg_score, decimals=1)
###Output
_____no_output_____
###Markdown
Build the map with pixels
###Code
def get_zone(score):
"""
A simple function to return the corresponding zone of the given score
"""
assert 0 <= score <= 100, "Score {} is out of valid range [0, 100]".format(score)
if score < 10:
return "0-10"
elif score < 20:
return "10-20"
elif score < 30:
return "20-30"
elif score < 40:
return "30-40"
elif score < 50:
return "40-50"
elif score < 60:
return "50-60"
elif score < 70:
return "60-70"
elif score < 80:
return "70-80"
elif score < 90:
return "80-90"
else:
return "90-100"
###Output
_____no_output_____
###Markdown
Format a JSON file Each pixel is a rectangle shape (Polygon)
###Code
%%time
json_file = {'features':[], 'type': 'FeatureCollection'}
for i in range(LAT):
for j in range(LON):
if (not mask[i, j]):
feature = {'geometry':{'coordinates':[], 'type': 'Polygon'},
'properties':{},
'type': 'Feature'}
# the shape boundary of one pixel. It is represented in a list
pixel = [[[lon[j]-.125,lat[i]-.125],
[lon[j]-.125,lat[i]+.125],
[lon[j]+.125,lat[i]+.125],
[lon[j]+.125,lat[i]-.125],
[lon[j]-.125,lat[i]-.125]]]
feature['geometry']['coordinates'] = pixel
# if you want to add more information/values just follow this template:
# feature['properties']['YOUR FEATURE NAME'] = YOUR VALUE
feature['properties']['EV Zone'] = get_zone(avg_score[i,j])
feature['properties']['Score'] = avg_score[i,j]
feature['properties']['Max consecutive MPID'] = int(max_MPID[i,j])
feature['properties']['Max daily range loss'] = max_loss[i,j]
# append this pixel to the feature list
json_file['features'].append(feature)
###Output
Wall time: 11.5 s
###Markdown
save the map in json format
###Code
os.makedirs("./geojson_files", exist_ok=True)
with open('./geojson_files/pixel_data_1.json', 'w') as outfile:
json.dump(json_file, outfile)
###Output
_____no_output_____ |
Python Files/Formulation.ipynb | ###Markdown
1. Co-batsman runs function
###Code
def co_batsman(player_name_list,runs_scored):
all_players=(player_name_list)
data=[]
n=len(player_name_list)
for i,j in enumerate(all_players):
score=0
score=runs_scored[i]
data.append([j,score])
total_runs=sum(runs_scored)
runs1=[]
for i in range(0,len(data)):
runs1.append(data[i][1])
co_bats=[]
for i in runs1:
co_bats.append(total_runs-i)
for i in range(0,len(data)):
data[i].append(co_bats[i])
return (co_bats)
###Output
_____no_output_____
###Markdown
2. Co-batsman Average function
###Code
def co_batsman_avg(player_name_list,fow_batting):
batt_num=[]
sum=0
n=len(player_name_list)
wicket_fallen=[0]*(n)
for i in range(0,len(fow_batting)):
if(fow_batting[i]>-1):
wicket_fallen[i]=1
sum+=wicket_fallen[i]
for i in range(0,len(wicket_fallen)):
wicket_fallen[i]=sum-wicket_fallen[i]
score=[]
for i,j in enumerate(wicket_fallen):
score.append(player_name_list[i]/j)
return (score)
###Output
_____no_output_____
###Markdown
3. Co-batsman Strike Rate (CSR) function
###Code
def co_batsman_strike(player_name,ball_face,co_bat_score):
all_players=player_name
ball_faced=[]
n=len(player_name)
sum_balls=0
score=0
for i,j in enumerate(all_players):
score+=ball_face[i]
ball_faced.append([j,score])
sum_balls+=score
ball_face=[]
for i in ball_faced:
ball_face.append(sum_balls-i[1])
final_ball_faced=[]
for i,j in enumerate(co_bat_score):
final_ball_faced.append((j/ball_face[i])*100)
return(final_ball_faced)
###Output
_____no_output_____
###Markdown
4. Team Average function
###Code
def team_average(data_score):
match_ids=list(set(data_score["Match_id"]))
team_average=[]
for i in sorted(match_ids):
id=i
one_inn=0
two_inn=0
one_wicket_fall=1
two_wicket_fall=1
for x,j in enumerate(data_score["Match_id"]):
if(i==j):
if(data_score["Innings"][x]==1):
one_inn+=data_score["Runs_Scored"][x]
if(data_score["FOW_batting_number"][x]>one_wicket_fall):
one_wicket_fall=data_score["FOW_batting_number"][x]
else:
two_inn+=data_score["Runs_Scored"][x]
if(data_score["FOW_batting_number"][x]>two_wicket_fall):
two_wicket_fall=data_score["FOW_batting_number"][x]
# print(id,one_inn,two_inn,one_wicket_fall,two_wicket_fall)
# print((one_inn/one_wicket_fall),(two_inn/two_wicket_fall))
# print("---------------------")
team_average.append([id,(one_inn/one_wicket_fall),(two_inn/two_wicket_fall)])
plt_list=[]
for i,j in enumerate(data_score["Player_name"]):
temp_play=[]
score=0
lenght=0
for x,player_search in enumerate(data_score["Player_name"]):
if(j==player_search):
#checking
if(j not in temp_play):
temp_play.append(j)
match_id=data_score["Match_id"][x]
inn=data_score["Innings"][x]
for c in team_average:
if(c[0]==match_id ):
score+=c[inn]
lenght+=1
# print(j,player_search,c[inn])
# print(temp_play,score/lenght)
# print("--------------")
plt_list.append([temp_play,score/lenght])
play=[]
avg=[]
for i in plt_list:
for j in (i[0]):
play.append(j)
for i in plt_list:
avg.append(i[1])
tem_avg=[]
for i in range(0,len(play)):
tem_avg.append(avg[i])
return (tem_avg)
###Output
_____no_output_____
###Markdown
5. Team Strike Rate function
###Code
def team_strike_rate(data_score):
team_strike=[]
for i in (data_score["Match_id"]):
id=i
one_inn=0
two_inn=0
one_wicket_fall=1
two_wicket_fall=1
one_balls=1
two_balls=1
for x,j in enumerate(data_score["Match_id"]):
if(i==j):
if(data_score["Innings"][x]==1):
one_inn+=data_score["Runs_Scored"][x]
if(data_score["FOW_batting_number"][x]>one_wicket_fall):
one_wicket_fall=data_score["FOW_batting_number"][x]
one_balls+=data_score["Balls_faced"][x]
else:
two_inn+=data_score["Runs_Scored"][x]
if(data_score["FOW_batting_number"][x]>two_wicket_fall):
two_wicket_fall=data_score["FOW_batting_number"][x]
two_balls+=data_score["Balls_faced"][x]
team_strike.append([id,(one_inn/one_balls)*100,(two_inn/two_balls)*100])
str_rate_list=[]
str1=[]
for i,j in enumerate(data_score["Player_name"]):
temp_play=[]
score=0
lenght=0
for x,player_search in enumerate(data_score["Player_name"]):
if(j==player_search):
if(j not in temp_play):
temp_play.append(j)
match_id=data_score["Match_id"][x]
inn=data_score["Innings"][x]
for c in team_strike:
if(c[0]==match_id ):
score+=c[inn]
lenght+=1
str_rate_list.append([temp_play,score/lenght])
str1.append(score/lenght)
return str1
###Output
_____no_output_____
###Markdown
6. Team Win/Loss Ratio (TW/L) function
###Code
def team_wl(data_score,infocard):
win_data=[]
for i,j in enumerate(infocard["Match_id"]):
team_id=j
one_win=0
if(infocard["Winner"][i]==infocard["Team1"][i]):
one_win=1
elif(infocard["Winner"][i]==infocard["Team2"][i]):
one_win=2
else:
one_win=3
win_data.append([team_id,one_win])
match_win=[]
for i,j in enumerate(data_score["Player_name"]):
player=j
win=0
lose=0
tie=0
for x,player_search in enumerate(data_score["Player_name"]):
match_id=data_score["Match_id"][x]
match_inn=data_score["Innings"][x]
if(j==player_search):
# print(match_id,match_inn,player,player_search)
for z in win_data:
id_m=z[0]
inn_m=z[1]
if(match_id==id_m):
if(inn_m==3):
tie+=1
elif(inn_m!=match_inn):
lose+=1
else:
win+=1
# print(player,win,lose,tie)
match_win.append([player,win,lose,tie])
decision=[]
for i,j in enumerate(infocard["Toss_decision"]):
if(info_card["Toss_decision"][i]=='bat'):
decision.append([infocard["Match_id"][i],1])
else:
decision.append([infocard["Match_id"][i],2])
for i in range(0,len(decision)):
if(str(infocard["Winner"][i])=="nan"):
decision[i][1]=3
elif(str(infocard["Toss_winner"][i])!=str(infocard["Winner"][i])):
if(decision[i][1]==1):
decision[i][1]=2
else:
decision[i][1]=1
match_win=[]
for i,j in enumerate(data_score["Player_name"]):
player=j
win=0
lose=0
tie=0
for x,player_search in enumerate(data_score["Player_name"]):
match_id=data_score["Match_id"][x]
match_inn=data_score["Innings"][x]
if(j==player_search):
# print(match_id,match_inn,player,player_search)
for z in decision:
id_m=z[0]
inn_m=z[1]
if(match_id==id_m):
# print(match_id,player,inn_m)
if(inn_m==3):
tie+=1
elif(inn_m!=match_inn):
lose+=1
else:
win+=1
# print(player,win,lose,tie)
match_win.append([player,win,lose,tie])
win_los=[]
total_match=[]
for i in match_win:
win_los.append(i[1]+i[2])
total_match.append(i[1]+i[2]+i[3])
final_win_lose=[]
for i,j in enumerate(match_win):
# print(i,j)
if(j[2]==0):
fo1= j[1]/1
# print(fo1)
else:
fo1= j[1]/j[2]
# print(fo1)
# print("------------")
final_win_lose.append(fo1)
return (final_win_lose)
###Output
_____no_output_____
###Markdown
7. Opposite Teams Average (OTA) function
###Code
def opp_team_average(data_score):
one_in_wicket=0
two_in_wicket=0
for i in range(0,len(data_score["Runs_Scored"])):
if(data_score["Innings"][i]==1):
if(data_score["FOW_batting_number"][i] > -1):
one_in_wicket+=1
else:
if(data_score["FOW_batting_number"][i] > -1):
two_in_wicket+=1
final=[]
for i in range(0,len(data_score["Runs_Scored"])):
if(data_score["Innings"][i]==1):
a=data_score["Runs_Scored"][i]/two_in_wicket
else:
a=data_score["Runs_Scored"][i]/one_in_wicket
if(str(a)=='inf'):
a=0
final.append([data_score["Player_name"][i],a])
return (final)
###Output
_____no_output_____
###Markdown
8. Opposite Teams Strike Rate (OTSR)
###Code
def oppo_team_strike(data_score):
team_strike=[]
match_ids=list(set(data_score["Match_id"]))
for i in (match_ids):
id=i
one_inn=0
two_inn=0
one_wicket_fall=1
two_wicket_fall=1
one_balls=1
two_balls=1
for x,j in enumerate(data_score["Match_id"]):
if(i==j):
if(data_score["Innings"][x]==1):
one_inn+=data_score["Runs_Scored"][x]
if(data_score["FOW_batting_number"][x]>one_wicket_fall):
one_wicket_fall=data_score["FOW_batting_number"][x]
one_balls+=data_score["Balls_faced"][x]
else:
two_inn+=data_score["Runs_Scored"][x]
if(data_score["FOW_batting_number"][x]>two_wicket_fall):
two_wicket_fall=data_score["FOW_batting_number"][x]
two_balls+=data_score["Balls_faced"][x]
team_strike.append([id,(one_inn/one_balls)*100,(two_inn/two_balls)*100])
opp_str_rate_list=[]
for i,j in enumerate(data_score["Player_name"]):
temp_play=[]
score=0
lenght=0
for x,player_search in enumerate(data_score["Player_name"]):
if(j==player_search):
#checking
if(j not in temp_play):
temp_play.append(j)
match_id=data_score["Match_id"][x]
inn=data_score["Innings"][x]
for c in team_strike:
if(c[0]==match_id ):
if(inn==2):
score+=c[inn-1]
lenght+=1
elif(inn==1):
score+=c[inn+1]
lenght+=1
opp_str_rate_list.append([temp_play,score/lenght])
return (opp_str_rate_list)
###Output
_____no_output_____
###Markdown
9. Opposite Teams Win/Loss Ratio (OTW/L)
###Code
def oppo_team_wl(data_score,infocard):
all_players=(set(data_score["Player_name"]))
win_data=[]
for i,j in enumerate(infocard["Match_id"]):
team_id=j
one_win=0
if(infocard["Winner"][i]==infocard["Team1"][i]):
one_win=1
elif(infocard["Winner"][i]==infocard["Team2"][i]):
one_win=2
else:
one_win=3
win_data.append([team_id,one_win])
match_win=[]
for i,j in enumerate(all_players):
player=j
win=0
lose=0
tie=0
for x,player_search in enumerate(data_score["Player_name"]):
match_id=data_score["Match_id"][x]
match_inn=data_score["Innings"][x]
if(j==player_search):
for z in win_data:
id_m=z[0]
inn_m=z[1]
if(match_id==id_m):
if(inn_m==3):
tie+=1
elif(inn_m!=match_inn):
win+=1
else:
lose+=1
match_win.append([player,win,lose,tie])
decision=[]
for i,j in enumerate(infocard["Toss_decision"]):
if(infocard["Toss_decision"][i]=='bat'):
decision.append([infocard["Match_id"][i],1])
else:
decision.append([infocard["Match_id"][i],2])
for i in range(0,len(decision)):
if(str(infocard["Winner"][i])=="nan"):
decision[i][1]=3
elif(str(infocard["Toss_winner"][i])!=str(infocard["Winner"][i])):
if(decision[i][1]==1):
decision[i][1]=2
else:
decision[i][1]=1
match_win=[]
for i,j in enumerate(all_players):
player=j
win=0
lose=0
tie=0
for x,player_search in enumerate(data_score["Player_name"]):
match_id=data_score["Match_id"][x]
match_inn=data_score["Innings"][x]
if(j==player_search):
# print(match_id,match_inn,player,player_search)
for z in decision:
id_m=z[0]
inn_m=z[1]
if(match_id==id_m):
# print(match_id,player,inn_m)
if(inn_m==3):
tie+=1
elif(inn_m!=match_inn):
lose+=1
else:
win+=1
# print(player,win,lose,tie)
match_win.append([player,win,lose,tie])
win_los=[]
total_match=[]
for i in match_win:
win_los.append(i[1]+i[2])
total_match.append(i[1]+i[2]+i[3])
op_final_win_lose=[]
for i,j in enumerate(match_win):
if(j[2]==0):
fo1= j[1]/1
else:
fo1= j[1]/j[2]
op_final_win_lose.append(fo1)
return (op_final_win_lose)
###Output
_____no_output_____
###Markdown
10. Co-Bowlers Average (CBA)
###Code
def co_bowler(data_score):
co_bow_average=[]
uniq_player=list((data_score["Player_name"]))
for i in uniq_player:
runs=0
wicket=0
for x,y in enumerate(data_score["Player_name"]):
if(i!=y):
runs+=data_score["Runs_conceded"][x]
wicket+=data_score["Wickets_taken"][x]
co_bow_average.append([i,runs/wicket])
return (co_bow_average)
###Output
_____no_output_____
###Markdown
11. Co-Bowlers Strike Rate (CBSR)
###Code
def co_bowler_strike_rate(data_score):
co_bow_strike=[]
uniq_player=list((data_score["Player_name"]))
for i in uniq_player:
runs=0
wicket=0
for x,y in enumerate(data_score["Player_name"]):
if(i!=y):
runs+=data_score["Overs_bowled"][x]
wicket+=data_score["Wickets_taken"][x]
co_bow_strike.append([i,(runs/wicket)*6])
return(co_bow_strike)
###Output
_____no_output_____
###Markdown
12. Co-Bowlers Average (CBA)
###Code
def co_bowler_avg(data_score):
co_bow_average=[]
uniq_player=list((data_score["Player_name"]))
for i in uniq_player:
runs=0
wicket=0
for x,y in enumerate(data_score["Player_name"]):
if(i!=y):
runs+=data_score["Runs_conceded"][x]
wicket+=data_score["Wickets_taken"][x]
co_bow_average.append([i,runs/wicket])
return (co_bow_average)
###Output
_____no_output_____
###Markdown
13. Co-Bowlers Strike Rate (CBSR)
###Code
def co_bowler_str():
co_bow_strike=[]
uniq_player=list(set(short_bowler["Player_name"]))
for i in uniq_player:
runs=0
wicket=0
for x,y in enumerate(short_bowler["Player_name"]):
if(i!=y):
runs+=short_bowler["Overs_bowled"][x]
wicket+=short_bowler["Wicket_taken"][x]
co_bow_strike.append([i,(runs/wicket)*6])
return(co_bow_strike)
###Output
_____no_output_____
###Markdown
14. Co-Bowlers Economy (CBE)
###Code
def co_bowler_eco(data_score):
co_bow_economy=[]
uniq_player=list((data_score["Player_name"]))
for i in uniq_player:
runs=0
wicket=0
for x,y in enumerate(data_score["Player_name"]):
if(i!=y):
runs+=data_score["Overs_bowled"][x]
wicket+=data_score["Runs_conceded"][x]
co_bow_economy.append([i,(wicket/runs)])
return(co_bow_economy)
###Output
_____no_output_____
###Markdown
15. Team Economy (TE)
###Code
def team_eco(data_score):
match_ids=list((data_score["Match_id"]))
team_economy=[]
for i in (match_ids):
id=i
one_inn=0
two_inn=0
one_over_bowled=1
two_over_bowled=1
for x,j in enumerate(data_score["Match_id"]):
if(i==j):
if(data_score["Innings"][x]==1):
one_inn+=data_score["Runs_conceded"][x]
one_over_bowled=data_score["Overs_bowled"][x]
else:
two_inn+=data_score["Runs_conceded"][x]
two_over_bowled=data_score["Overs_bowled"][x]
team_economy.append([id,(one_inn/one_over_bowled),(two_inn/two_over_bowled)])
all_players=list((data_score["Player_name"]))
plt_list=[]
for i,j in enumerate(all_players):
temp_play=[]
score=0
lenght=0
for x,player_search in enumerate(data_score["Player_name"]):
if(j==player_search):
#checking
if(j not in temp_play):
temp_play.append(j)
match_id=data_score["Match_id"][x]
inn=data_score["Innings"][x]
for c in team_economy:
if(c[0]==match_id ):
score+=c[inn]
lenght+=1
plt_list.append([temp_play,score/lenght])
return (plt_list)
###Output
_____no_output_____
###Markdown
16. opposite team economy
###Code
def opp_team_eco(data_score):
team_economy=[]
match_ids=data_score["Match_id"]
for i in (match_ids):
id=i
one_inn=0
two_inn=0
one_over_bowled=1
two_over_bowled=1
for x,j in enumerate(data_score["Match_id"]):
if(i==j):
if(data_score["Innings"][x]==1):
one_inn+=data_score["Runs_conceded"][x]
one_over_bowled=data_score["Overs_bowled"][x]
else:
two_inn+=data_score["Runs_conceded"][x]
two_over_bowled=data_score["Overs_bowled"][x]
team_economy.append([id,(one_inn/one_over_bowled),(two_inn/two_over_bowled)])
opp_plt_list=[]
for i,j in enumerate(data_score["Player_name"]):
temp_play=[]
score=0
lenght=0
for x,player_search in enumerate(data_score["Player_name"]):
if(j==player_search):
#checking
if(j not in temp_play):
temp_play.append(j)
match_id=data_score["Match_id"][x]
inn=data_score["Innings"][x]
for c in team_economy:
if(c[0]==match_id ):
if(inn==2):
score+=c[inn-1]
lenght+=1
elif(inn==1):
score+=c[inn+1]
lenght+=1
opp_plt_list.append([temp_play,score/lenght])
return opp_plt_list
###Output
_____no_output_____
###Markdown
17. Weighted Average Of Batsman (WA(B))
###Code
def wei_avg_bats(data_score):
all_players=((data_score["Player_name"]))
data=[]
n=len(data_score['Player_name'])
for i in all_players:
score=0
for j in range(0,n):
if(i==data_score["Player_name"][j]):
score+=data_score["Runs_Scored"][j]
data.append([i,score])
all_players=(set(data_score["Player_name"]))
batt_num=[]
sum=0
n=len(data_score['Player_name'])
for i in all_players:
score=0
for j in range(0,n):
if(i==data_score["Player_name"][j] and data_score["FOW_batting_number"][j] > -1 ):
score+=1
batt_num.append([i,score])
sum=sum+score
average=[]
for i,j in enumerate(data):
average.append([j[0],data[i][1]/batt_num[i][1]])
all_players=((data_score["Player_name"]))
ball_faced=[]
n=len(data_score['Player_name'])
sum_balls=0
for i in all_players:
score=0
for j in range(0,n):
if(i==data_score["Player_name"][j]):
score+=data_score["Balls_faced"][j]
ball_faced.append([i,score])
sum_balls+=score
strike=[]
for i,j in enumerate(data):
strike.append([j[0],(j[1]/ball_faced[i][1])*100])
weighted_average=[]
for i in range(0,len(strike)):
# print(strike[i],ball_faced[i],data[i])
weight=((data[i][1]*33.33)+(ball_faced[i][1]*33.33)+(strike[i][1]*33.33))/100
weighted_average.append([data[i][0],weight])
return (weighted_average)
###Output
_____no_output_____
###Markdown
18. Weighted Average Of Bowler (WA(Bow)
###Code
def wei_avg_bow(data_score):
co_bow_average=[]
uniq_player=list((data_score["Player_name"]))
for i in uniq_player:
runs=0
wicket=0
for x,y in enumerate(data_score["Player_name"]):
if(i==y):
runs+=data_score["Runs_conceded"][x]
wicket+=data_score["Wickets_taken"][x]
co_bow_average.append([i,runs/wicket])
co_bow_economy=[]
for i in uniq_player:
runs=0
wicket=0
for x,y in enumerate(data_score["Player_name"]):
if(i==y):
runs+=data_score["Overs_bowled"][x]
wicket+=data_score["Runs_conceded"][x]
co_bow_economy.append([i,(wicket/runs)])
co_bow_strike=[]
for i in uniq_player:
runs=0
wicket=0
for x,y in enumerate(data_score["Player_name"]):
if(i==y):
runs+=data_score["Overs_bowled"][x]
wicket+=data_score["Wickets_taken"][x]
co_bow_strike.append([i,(runs/wicket)*6])
co_bow_wicket=[]
uniq_player=list((data_score["Player_name"]))
for i in uniq_player:
runs=0
wicket=0
for x,y in enumerate(data_score["Player_name"]):
if(i==y):
runs+=data_score["Runs_conceded"][x]
wicket+=data_score["Wickets_taken"][x]
co_bow_wicket.append([i,wicket])
weeighted_average_bowler=[]
for i in range(0,len(co_bow_wicket)):
weight=((25*co_bow_wicket[i][1])+(25*-(co_bow_average[i][1]))+(25*-(co_bow_economy[i][1]))+(25*-(co_bow_strike[i][1])))/100
weeighted_average_bowler.append([co_bow_wicket[i][0],weight])
return(weeighted_average_bowler)
###Output
_____no_output_____
###Markdown
19. Performance Evolution Of A Batsman (PE (B))
###Code
def performance_evo_bats():
unique_player=list(set(data_score["Player_name"]))
master=[]
for i in unique_player:
temp=i
match_id=[]
for j in range(0,len(data_score["Player_name"])):
if(data_score["Player_name"][j]==temp):
match_=data_score["Match_id"][j]
for x,y in enumerate(data_info["Match_id"]):
if(match_==y):
match_id.append([match_,int(data_info["Date"][x][6:])])
master.append([i,match_id,0,0])
player_recents=[]
for i in range(0,len(master)):
print(master[i][0])
maxix=master[i][1][0][1]
for j in range(len(master[i][1])):
if((master[i][1][j][1])>maxix):
maxix=(master[i][1][j][1])
master[i].append(["Maximum",maxix])
for i in range(0,len(master)):
max_year=(master[i][-1][1])
threshold=max_year-4
print(max_year,threshold)
j=0
while(j<len(master[i][1])):
print(master[i][1][j])
if(master[i][1][j][1]<threshold):
master[i][1].remove(master[i][1][j])
print("yeysyayydfsd")
j=0
j+=1
unique_id=list(set(data_score["Match_id"])) #162
ids=[]
playr=[]
playr_one=[]
playr_two=[]
ones=[]
twos=[]
indi=[]
indi_ballFace=[]
wickets=[]
ini_wickets=[]
for i in unique_id:
ply=[]
player_one=[]
player_two=[]
one_in=0
two_in=0
scoreer=[]
wickt=0
inid_W=[]
ball_face=[]
for x,y in enumerate(data_score["Match_id"]):
idi_score=0
if(y==i):
ply.append(data_score["Player_name"][x])
if(data_score["Innings"][x]==1):
one_in+=data_score["Runs_Scored"][x]
player_one.append(data_score["Player_name"][x])
else:
two_in+=data_score["Runs_Scored"][x]
player_two.append(data_score["Player_name"][x])
scoreer.append(data_score["Runs_Scored"][x])
ball_face.append(data_score["Balls_faced"][x])
if(data_score["Wicket_method"][x]!= ('-1')):
wickt+=1
if(data_score["Wicket_method"][x]!='-1'):
inid_W.append(1)
else:
inid_W.append(0)
ids.append(i)
playr.append(ply)
playr_one.append(player_one)
playr_two.append(player_two)
ones.append(one_in)
twos.append(two_in)
indi.append(scoreer)
wickets.append(wickt)
ini_wickets.append(inid_W)
indi_ballFace.append(ball_face)
new_data=pd.DataFrame({"Id's":ids,"Players_playes":playr,"1 inn player":playr_one,"2 inn player":playr_two,"First inn score":ones,"Second inn score":twos,"Idividual Score":indi,"Individual Ball Face":indi_ballFace,"Wickets":wickets,"Wicket array player":ini_wickets})
for i in range(0,len(master)):
individual_score=0
co_batsman_total=0
wicket=0
avg_inid=0
total_wicket=0
co_batsman_ball=0
co_wickets=0
for j in range(0,len(master[i][1])):
to_search=(master[i][1][j][0])
for k in range(0,len(new_data["Id's"])):
if(to_search==new_data["Id's"][k]):
imp_index=new_data["Players_playes"][k].index(master[i][0])
inn=""
if(imp_index<=10):
inn="First inn score"
wicket+=(new_data["Wickets"][k]-sum(new_data["Wicket array player"][k][:12]))
avg_inid+=new_data["Wicket array player"][k][imp_index]
else:
inn="Second inn score"
wicket+=(new_data["Wickets"][k]-sum(new_data["Wicket array player"][k][12:]))
avg_inid+=new_data["Wicket array player"][k][imp_index]
individual_score+=new_data["Idividual Score"][k][imp_index]
co_batsman_total+=new_data[inn][k]-individual_score
total_wicket+=new_data["Individual Ball Face"][k][imp_index]
co_batsman_ball+=(sum(new_data["Individual Ball Face"][k])-total_wicket)
avg_inid=avg_inid
if(avg_inid<=0):
avg_inid=1
master[i][2]=["Individual",individual_score]
master[i][3]=["Co=batsman",co_batsman_total/wicket]
master[i][4]=["Individual Average",individual_score/avg_inid]
master[i].append(["Ball_face",total_wicket])
master[i].append(["Co-batsman runs",co_batsman_total])
master[i].append(["COo-batsman_ball face",co_batsman_ball])
master[i].append(["Co_wickets",wicket])
print("-------------------------------------------------------------------")
name=[]
ini_scor=[]
ini_avg=[]
co_bats=[]
r_j=[]
co_batsman_run=[]
co_batsman_ball=[]
ball_face=[]
co_wickets=[]
for i in range(0,len(master)):
name.append(master[i][0])
ini_scor.append(master[i][2][1])
co_bats.append(master[i][3][1])
ini_avg.append(master[i][4][1])
ball_face.append(master[i][5][1])
co_batsman_run.append(master[i][6][1])
co_batsman_ball.append(master[i][7][1])
co_wickets.append(master[i][8][1])
r_j.append((master[i][2][1]-master[i][3][1])/(master[i][3][1]))
r_evo_data=pd.DataFrame({"Names":name,"Individual Score":ini_scor,"Individual Average":ini_avg,"Co-batsman avg Score":co_bats,"R_j":r_j,"Individual bal face":ball_face,"CO_batsman run":co_batsman_run,"Co-batsman ball face":co_batsman_ball,"Co-wickets":co_wickets})
print(r_evo_data)
#avg evo
average=[]
for i in range(0,len(r_evo_data["Individual Average"])):
average.append((r_evo_data["Individual Average"][i]-r_evo_data["Co-batsman avg Score"][i])/(r_evo_data["Co-batsman avg Score"][i]))
average=pd.DataFrame({"Average evo":average})
#Strike
factor_1=[] # batsman part
for i in range(0,len(r_evo_data["Individual Score"])):
factor_1.append(r_evo_data["Individual Score"][i]/r_evo_data["Individual bal face"][i])
factor_2=[]
for i in range(0,len(r_evo_data["Individual Score"])):
x=((r_evo_data["CO_batsman run"][i]/r_evo_data["Co-batsman ball face"][i])/r_evo_data["Co-wickets"][i])
if(str(x)=='inf'):
x=0
factor_2.append(x)
strike_final=[]
for i in range(0,len(factor_1)):
strike_final.append(factor_1[i]/factor_2[i])
strike=pd.DataFrame({"Strike Final":strike_final})
final_data=[]
for i in range(0,len(r_evo_data["R_j"])):
final_data.append((r_evo_data["R_j"][i]+average["Average evo"][i]+strike["Strike Final"][i])/4)
return (final_data)
###Output
_____no_output_____
###Markdown
20. Performance Evolution Of A Bowler (PE (Bow))
###Code
def perfo_evo_bow():
unique_player=list(set(data_score["Player_name"]))
master=[]
for i in unique_player:
temp=i
match_id=[]
for j in range(0,len(data_score["Player_name"])):
if(data_score["Player_name"][j]==temp):
match_=data_score["Match_id"][j]
for x,y in enumerate(data_info["Match_id"]):
if(match_==y):
match_id.append([match_,int(data_info["Date"][x][6:])])
master.append([i,match_id,0,0])
player_recents=[]
for i in range(0,len(master)):
maxix=master[i][1][0][1]
for j in range(len(master[i][1])):
if((master[i][1][j][1])>maxix):
maxix=(master[i][1][j][1])
master[i].append(["Maximum",maxix])
for i in range(0,len(master)):
max_year=(master[i][-1][1])
threshold=max_year-4
j=0
while(j<len(master[i][1])):
if(master[i][1][j][1]<threshold):
master[i][1].remove(master[i][1][j])
j=0
j+=1
unique_id=list(set(data_score["Match_id"])) #162
ids=[]
playr=[]
playr_one=[]
playr_two=[]
ones=[]
twos=[]
indi=[]
indi_ballFace=[]
wickets=[]
ini_wickets=[]
runs_conceded=[]
for i in unique_id:
ply=[]
player_one=[]
player_two=[]
one_in=0
two_in=0
scoreer=[]
wickt=0
inid_W=[]
ball_face=[]
runs_conce=[]
for x,y in enumerate(data_score["Match_id"]):
idi_score=0
if(y==i):
ply.append(data_score["Player_name"][x])
if(data_score["Innings"][x]==1):
one_in+=data_score["Wickets_taken"][x]
player_one.append(data_score["Player_name"][x])
else:
two_in+=data_score["Wickets_taken"][x]
player_two.append(data_score["Player_name"][x])
scoreer.append(data_score["Wickets_taken"][x])
ball_face.append(data_score["Overs_bowled"][x])
runs_conce.append(data_score["Runs_conceded"][x])
if(data_score["Wicket_method"][x]!= ('-1')):
wickt+=1
if(data_score["Wicket_method"][x]!='-1'):
inid_W.append(1)
else:
inid_W.append(0)
ids.append(i)
playr.append(ply)
playr_one.append(player_one)
playr_two.append(player_two)
ones.append(one_in)
twos.append(two_in)
indi.append(scoreer)
wickets.append(wickt)
ini_wickets.append(inid_W)
indi_ballFace.append(ball_face)
runs_conceded.append(runs_conce)
new_data=pd.DataFrame({"Id's":ids,"Players_playes":playr,"1 inn player":playr_one,"2 inn player":playr_two,"First inn wicket":ones,"Second inn wicket":twos,"Idividual wicket":indi,"Individual runs conceded bowler":runs_conceded,"Individual Ball Face":indi_ballFace,"Wickets":wickets,"Wicket array player":ini_wickets})
for i in range(0,len(master)):
individual_score=0
co_batsman_total=0
wicket=0
avg_inid=0
total_wicket=0
co_batsman_ball=0
co_wickets=0
eco=0
co_runs_con=0
co_bowled=0
co_eco=0
for j in range(0,len(master[i][1])):
to_search=(master[i][1][j][0])
for k in range(0,len(new_data["Id's"])):
if(to_search==new_data["Id's"][k]):
imp_index=new_data["Players_playes"][k].index(master[i][0])
inn=""
if(imp_index<=10):
inn="First inn wicket"
wicket+=(new_data["Wickets"][k]-sum(new_data["Wicket array player"][k][12:]))
avg_inid+=new_data["Wicket array player"][k][imp_index]
else:
inn="Second inn wicket"
wicket+=(new_data["Wickets"][k]-sum(new_data["Wicket array player"][k][:12]))
avg_inid+=new_data["Wicket array player"][k][imp_index]
individual_score+=new_data["Idividual wicket"][k][imp_index]
co_batsman_total+=new_data[inn][k]-individual_score
total_wicket+=new_data["Individual Ball Face"][k][imp_index]
co_batsman_ball+=(sum(new_data["Individual Ball Face"][k])-total_wicket)
co_runs_con+=sum(new_data["Individual runs conceded bowler"][k])-new_data["Individual runs conceded bowler"][k][imp_index]
co_bowled+=sum(new_data["Individual Ball Face"][k])-new_data["Individual Ball Face"][k][imp_index]
eco+=new_data["Individual runs conceded bowler"][k][imp_index]/new_data["Individual Ball Face"][k][imp_index]
if(str(eco)=='nan'):
eco=1
co_eco+=co_runs_con/co_bowled
avg_inid=avg_inid
if(avg_inid<=0):
avg_inid=1
master[i][2]=["Individual wicket",individual_score]
master[i][3]=["Co=batsman average wicket",co_batsman_total/wicket]
master[i][4]=["Individual Average",individual_score/avg_inid]
master[i].append(["Ball_face",total_wicket])
master[i].append(["Co-batsman runs",co_batsman_total])
master[i].append(["COo-batsman_ball face",co_batsman_ball])
master[i].append(["Co_wickets",wicket])
master[i].append(["Individual Economy",eco])
master[i].append(["Co_bowl Economy",co_eco])
# print("-------------------------------------------------------------------")
name=[]
ini_scor=[]
ini_avg=[]
co_bats=[]
r_j=[]
co_batsman_run=[]
co_batsman_ball=[]
ball_face=[]
co_wickets=[]
r_eco=[]
for i in range(0,len(master)):
name.append(master[i][0])
ini_scor.append(master[i][2][1])
co_bats.append(master[i][3][1])
ini_avg.append(master[i][4][1])
ball_face.append(master[i][5][1])
co_batsman_run.append(master[i][6][1])
co_batsman_ball.append(master[i][7][1])
co_wickets.append(master[i][8][1])
r_j.append((master[i][2][1]-master[i][3][1])/(master[i][3][1]))
r_eco.append((master[i][9][1]-master[i][10][1])/master[i][10][1])
w_evo_data=pd.DataFrame({"Names":name,"Individual Score":ini_scor,"Individual Average":ini_avg,"Co-batsman avg Score":co_bats,"W_j":r_j,"R_e":r_eco,"Individual bal face":ball_face,"CO_batsman run":co_batsman_run,"Co-batsman ball face":co_batsman_ball,"Co-wickets":co_wickets})
average=[]
for i in range(0,len(r_evo_data["Individual Average"])):
average.append((r_evo_data["Individual Average"][i]-r_evo_data["Co-batsman avg Score"][i])/(r_evo_data["Co-batsman avg Score"][i]))
average=pd.DataFrame({"Average evo":average})
factor_1=[]
for i in range(0,len(r_evo_data["Individual Score"])):
factor_1.append(r_evo_data["Individual Score"][i]/r_evo_data["Individual bal face"][i])
factor_2=[]
for i in range(0,len(r_evo_data["Individual Score"])):
x=((r_evo_data["CO_batsman run"][i]/r_evo_data["Co-batsman ball face"][i])/r_evo_data["Co-wickets"][i])
if(str(x)=='inf'):
x=0
factor_2.append(x)
strike_final=[]
for i in range(0,len(factor_1)):
strike_final.append(float(factor_1[i]//factor_2[i]))
strike=pd.DataFrame({"Strike Final":strike_final})
W=pd.DataFrame({"W":w_evo_data["W_j"].tolist()})
Perfo_bowler=[]
for i in range(0,len(W)):
form=W["W"][i]-average["Average evo"][i]-economy["Economy"][i]-strike["Strike Final"][i]
form1=(form/4)
if(str(form1)=='nan'):
form1=0
Perfo_bowler.append(form1)
final_perfo_bowelr=pd.DataFrame({"Performance bowler":Perfo_bowler})
print(final_perfo_bowelr)
def wei_avg_bow(data_score):
co_bow_average=[]
uniq_player=list((data_score["Player_name"]))
for i in uniq_player:
runs=0
wicket=0
for x,y in enumerate(data_score["Player_name"]):
if(i==y):
runs+=data_score["Runs_conceded"][x]
wicket+=data_score["Wickets_taken"][x]
co_bow_average.append([i,runs/wicket])
co_bow_economy=[]
for i in uniq_player:
runs=0
wicket=0
for x,y in enumerate(data_score["Player_name"]):
if(i==y):
runs+=data_score["Overs_bowled"][x]
wicket+=data_score["Runs_conceded"][x]
co_bow_economy.append([i,(wicket/runs)])
co_bow_strike=[]
for i in uniq_player:
runs=0
wicket=0
for x,y in enumerate(data_score["Player_name"]):
if(i==y):
runs+=data_score["Overs_bowled"][x]
wicket+=data_score["Wickets_taken"][x]
co_bow_strike.append([i,(runs/wicket)*6])
co_bow_wicket=[]
uniq_player=list((data_score["Player_name"]))
for i in uniq_player:
runs=0
wicket=0
for x,y in enumerate(data_score["Player_name"]):
if(i==y):
runs+=data_score["Runs_conceded"][x]
wicket+=data_score["Wickets_taken"][x]
co_bow_wicket.append([i,wicket])
weeighted_average_bowler=[]
for i in range(0,len(co_bow_wicket)):
weight=((25*co_bow_wicket[i][1])+(25*-(co_bow_average[i][1]))+(25*-(co_bow_economy[i][1]))+(25*-(co_bow_strike[i][1])))/100
weeighted_average_bowler.append([co_bow_wicket[i][0],weight])
return(weeighted_average_bowler)
###Output
_____no_output_____ |
nlp_second_order_attack_demo.ipynb | ###Markdown
Demo NotebookThis notebook provides the demo for the following paper:> Chong Zhang, Jieyu Zhao, Huan Zhang, Kai-Wei Chang, and Cho-Jui Hsieh, "*Double Perturbation: On the Robustness of Robustness and Counterfactual Bias Evaluation*", NAACL 2021The demo first setup the environment, and then perform the second-order attack on a pre-trained LSTM model from TextAttack. Please use the GPU runtime.Please refer to https://github.com/chong-z/nlp-second-order-attack for more details. 1. Clone the repo
###Code
!git clone --recurse-submodules https://github.com/chong-z/nlp-second-order-attack.git
%cd nlp-second-order-attack
###Output
Cloning into 'nlp-second-order-attack'...
remote: Enumerating objects: 56, done.[K
remote: Counting objects: 100% (56/56), done.[K
remote: Compressing objects: 100% (50/50), done.[K
remote: Total 56 (delta 11), reused 51 (delta 6), pack-reused 0[K
Unpacking objects: 100% (56/56), done.
Submodule 'libs/TextAttack' (https://github.com/chong-z/TextAttack.git) registered for path 'libs/TextAttack'
Submodule 'libs/jia_certified' (https://github.com/chong-z/certified-word-sub.git) registered for path 'libs/jia_certified'
Submodule 'libs/xu_auto_LiRPA' (https://github.com/KaidiXu/auto_LiRPA.git) registered for path 'libs/xu_auto_LiRPA'
Cloning into '/content/nlp-second-order-attack/libs/TextAttack'...
remote: Enumerating objects: 22, done.
remote: Counting objects: 100% (22/22), done.
remote: Compressing objects: 100% (15/15), done.
remote: Total 13659 (delta 7), reused 14 (delta 7), pack-reused 13637
Receiving objects: 100% (13659/13659), 108.47 MiB | 34.39 MiB/s, done.
Resolving deltas: 100% (10087/10087), done.
Cloning into '/content/nlp-second-order-attack/libs/jia_certified'...
remote: Enumerating objects: 55, done.
remote: Counting objects: 100% (55/55), done.
remote: Compressing objects: 100% (43/43), done.
remote: Total 55 (delta 15), reused 47 (delta 9), pack-reused 0
Cloning into '/content/nlp-second-order-attack/libs/xu_auto_LiRPA'...
remote: Enumerating objects: 367, done.
remote: Counting objects: 100% (367/367), done.
remote: Compressing objects: 100% (230/230), done.
remote: Total 367 (delta 197), reused 292 (delta 122), pack-reused 0
Receiving objects: 100% (367/367), 3.97 MiB | 31.75 MiB/s, done.
Resolving deltas: 100% (197/197), done.
Submodule path 'libs/TextAttack': checked out '995f098aca785d9ac37ccbb743ad8d7d0b2ed3c6'
Submodule path 'libs/jia_certified': checked out '54c602dcb29782a65aa5100ca9a1df1d32890c5d'
Submodule path 'libs/xu_auto_LiRPA': checked out 'c8935c6d22cd76e137b1a9b1b3ea67f7d234601d'
/content/nlp-second-order-attack
###Markdown
2. Install required packagesNote: Please run `setup.sh` instead of `quick_setup.sh` if you want to experiment on certified models such as Jia et al., 2019.
###Code
!./quick_setup.sh
###Output
Looking in links: https://download.pytorch.org/whl/torch_stable.html
Collecting torch==1.7.1+cu110
[?25l Downloading https://download.pytorch.org/whl/cu110/torch-1.7.1%2Bcu110-cp37-cp37m-linux_x86_64.whl (1156.8MB)
[K |███████████████████████ | 834.1MB 1.2MB/s eta 0:04:22tcmalloc: large alloc 1147494400 bytes == 0x55ad17d8a000 @ 0x7fa19d385615 0x55acde0c006c 0x55acde19feba 0x55acde0c2e8d 0x55acde1b499d 0x55acde136fe9 0x55acde131b0e 0x55acde0c477a 0x55acde136e50 0x55acde131b0e 0x55acde0c477a 0x55acde13386a 0x55acde1b57c6 0x55acde132ee2 0x55acde1b57c6 0x55acde132ee2 0x55acde1b57c6 0x55acde132ee2 0x55acde1b57c6 0x55acde237431 0x55acde198049 0x55acde102c84 0x55acde0c38e9 0x55acde137ade 0x55acde0c469a 0x55acde132a45 0x55acde131e0d 0x55acde0c477a 0x55acde132a45 0x55acde0c469a 0x55acde132a45
[K |█████████████████████████████▏ | 1055.7MB 1.3MB/s eta 0:01:20tcmalloc: large alloc 1434370048 bytes == 0x55ad5c3e0000 @ 0x7fa19d385615 0x55acde0c006c 0x55acde19feba 0x55acde0c2e8d 0x55acde1b499d 0x55acde136fe9 0x55acde131b0e 0x55acde0c477a 0x55acde136e50 0x55acde131b0e 0x55acde0c477a 0x55acde13386a 0x55acde1b57c6 0x55acde132ee2 0x55acde1b57c6 0x55acde132ee2 0x55acde1b57c6 0x55acde132ee2 0x55acde1b57c6 0x55acde237431 0x55acde198049 0x55acde102c84 0x55acde0c38e9 0x55acde137ade 0x55acde0c469a 0x55acde132a45 0x55acde131e0d 0x55acde0c477a 0x55acde132a45 0x55acde0c469a 0x55acde132a45
[K |████████████████████████████████| 1156.7MB 1.2MB/s eta 0:00:01tcmalloc: large alloc 1445945344 bytes == 0x55adb1bcc000 @ 0x7fa19d385615 0x55acde0c006c 0x55acde19feba 0x55acde0c2e8d 0x55acde1b499d 0x55acde136fe9 0x55acde131b0e 0x55acde0c477a 0x55acde132c9e 0x55acde131b0e 0x55acde0c477a 0x55acde132c9e 0x55acde131b0e 0x55acde0c477a 0x55acde132c9e 0x55acde131b0e 0x55acde0c477a 0x55acde132c9e 0x55acde131b0e 0x55acde0c477a 0x55acde132c9e 0x55acde0c469a 0x55acde132c9e 0x55acde131b0e 0x55acde0c477a 0x55acde13386a 0x55acde131b0e 0x55acde0c477a 0x55acde13386a 0x55acde131b0e 0x55acde0c4e11
[K |████████████████████████████████| 1156.8MB 11kB/s
[?25hRequirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from torch==1.7.1+cu110) (1.19.5)
Requirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from torch==1.7.1+cu110) (3.7.4.3)
[31mERROR: torchvision 0.9.1+cu101 has requirement torch==1.8.1, but you'll have torch 1.7.1+cu110 which is incompatible.[0m
[31mERROR: torchtext 0.9.1 has requirement torch==1.8.1, but you'll have torch 1.7.1+cu110 which is incompatible.[0m
Installing collected packages: torch
Found existing installation: torch 1.8.1+cu101
Uninstalling torch-1.8.1+cu101:
Successfully uninstalled torch-1.8.1+cu101
Successfully installed torch-1.7.1+cu110
Collecting tensorflow==2.2.0
[?25l Downloading https://files.pythonhosted.org/packages/4c/1a/0d79814736cfecc825ab8094b39648cc9c46af7af1bae839928acb73b4dd/tensorflow-2.2.0-cp37-cp37m-manylinux2010_x86_64.whl (516.2MB)
[K |████████████████████████████████| 516.2MB 33kB/s
[?25hCollecting transformers==3.0.2
[?25l Downloading https://files.pythonhosted.org/packages/27/3c/91ed8f5c4e7ef3227b4119200fc0ed4b4fd965b1f0172021c25701087825/transformers-3.0.2-py3-none-any.whl (769kB)
[K |████████████████████████████████| 778kB 39.3MB/s
[?25hCollecting sentence-transformers==0.3.4
[?25l Downloading https://files.pythonhosted.org/packages/1d/09/36bcda3e1839fee5ba7bd64779ab3824b5f0bbf19ba32d985692c4141ec0/sentence-transformers-0.3.4.tar.gz (61kB)
[K |████████████████████████████████| 61kB 9.5MB/s
[?25hCollecting sentencepiece==0.1.91
[?25l Downloading https://files.pythonhosted.org/packages/f2/e2/813dff3d72df2f49554204e7e5f73a3dc0f0eb1e3958a4cad3ef3fb278b7/sentencepiece-0.1.91-cp37-cp37m-manylinux1_x86_64.whl (1.1MB)
[K |████████████████████████████████| 1.1MB 50.4MB/s
[?25hRequirement already satisfied: scipy==1.4.1 in /usr/local/lib/python3.7/dist-packages (from -r requirements.txt (line 5)) (1.4.1)
Collecting scikit-learn==0.24.1
[?25l Downloading https://files.pythonhosted.org/packages/f3/74/eb899f41d55f957e2591cde5528e75871f817d9fb46d4732423ecaca736d/scikit_learn-0.24.1-cp37-cp37m-manylinux2010_x86_64.whl (22.3MB)
[K |████████████████████████████████| 22.3MB 1.4MB/s
[?25hCollecting flair==0.6.1.post1
[?25l Downloading https://files.pythonhosted.org/packages/4a/49/a812ed93088ba9519cbb40eb9f52341694b31cfa126bfddcd9db3761f3ac/flair-0.6.1.post1-py3-none-any.whl (337kB)
[K |████████████████████████████████| 337kB 45.4MB/s
[?25hCollecting pyarrow==0.17.1
[?25l Downloading https://files.pythonhosted.org/packages/14/78/dcd7f290cd018581b5c73f6c87e2b004f1161cdf6f55c7b2c87d78174592/pyarrow-0.17.1-cp37-cp37m-manylinux2014_x86_64.whl (63.8MB)
[K |████████████████████████████████| 63.8MB 44kB/s
[?25hCollecting wandb
[?25l Downloading https://files.pythonhosted.org/packages/47/af/4cfe48fe55046181b992251933cff4ceb3bfd71a42838f5fe683683cd925/wandb-0.10.25-py2.py3-none-any.whl (2.1MB)
[K |████████████████████████████████| 2.1MB 47.7MB/s
[?25hRequirement already satisfied: pandas in /usr/local/lib/python3.7/dist-packages (from -r requirements.txt (line 11)) (1.1.5)
Collecting bert-score
[?25l Downloading https://files.pythonhosted.org/packages/14/27/ccf86d5dfc19f89bee4449e96ac6e0f7c312f1614de86609c5f6da5c40af/bert_score-0.3.8-py3-none-any.whl (58kB)
[K |████████████████████████████████| 61kB 8.9MB/s
[?25hRequirement already satisfied: datasets in /usr/local/lib/python3.7/dist-packages (from -r requirements.txt (line 13)) (1.5.0)
Collecting visdom
[?25l Downloading https://files.pythonhosted.org/packages/c9/75/e078f5a2e1df7e0d3044749089fc2823e62d029cc027ed8ae5d71fafcbdc/visdom-0.1.8.9.tar.gz (676kB)
[K |████████████████████████████████| 686kB 50.1MB/s
[?25hCollecting tensorboardX
[?25l Downloading https://files.pythonhosted.org/packages/07/84/46421bd3e0e89a92682b1a38b40efc22dafb6d8e3d947e4ceefd4a5fabc7/tensorboardX-2.2-py2.py3-none-any.whl (120kB)
[K |████████████████████████████████| 122kB 52.0MB/s
[?25hRequirement already satisfied: tensorflow_hub in /usr/local/lib/python3.7/dist-packages (from -r requirements.txt (line 16)) (0.11.0)
Requirement already satisfied: nltk in /usr/local/lib/python3.7/dist-packages (from -r requirements.txt (line 17)) (3.2.5)
Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from -r requirements.txt (line 18)) (1.19.5)
Collecting pymemcache
[?25l Downloading https://files.pythonhosted.org/packages/b8/94/16a3ae7ce435c8abb90439baa6ebd465f7c5d202bc6b84d8fd69f1534e3e/pymemcache-3.4.1-py2.py3-none-any.whl (49kB)
[K |████████████████████████████████| 51kB 8.0MB/s
[?25hCollecting mezmorize
Downloading https://files.pythonhosted.org/packages/f1/73/c3153951bf8956c92e0a481daa804d57f13970457c32a6692ca6723a026f/mezmorize-0.28.2-py2.py3-none-any.whl
Collecting cached_property
Downloading https://files.pythonhosted.org/packages/48/19/f2090f7dad41e225c7f2326e4cfe6fff49e57dedb5b53636c9551f86b069/cached_property-1.5.2-py2.py3-none-any.whl
Requirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from -r requirements.txt (line 22)) (2.23.0)
Collecting lru-dict
Downloading https://files.pythonhosted.org/packages/68/ea/997af58d4e6da019ad825a412f93081d9df67e9dda11cfb026a3d7cd0b6c/lru-dict-1.1.7.tar.gz
Collecting python-Levenshtein
[?25l Downloading https://files.pythonhosted.org/packages/2a/dc/97f2b63ef0fa1fd78dcb7195aca577804f6b2b51e712516cc0e902a9a201/python-Levenshtein-0.12.2.tar.gz (50kB)
[K |████████████████████████████████| 51kB 7.7MB/s
[?25hCollecting lemminflect
[?25l Downloading https://files.pythonhosted.org/packages/4b/67/d04ca98b661d4ad52b9b965c9dabb1f1a2c85541d20f8decb9a9df4e4b32/lemminflect-0.2.2-py3-none-any.whl (769kB)
[K |████████████████████████████████| 778kB 47.0MB/s
[?25hCollecting language_tool_python
Downloading https://files.pythonhosted.org/packages/37/26/48b22ad565fd372edec3577218fb817e0e6626bf4e658033197470ad92b3/language_tool_python-2.5.3-py3-none-any.whl
Requirement already satisfied: editdistance in /usr/local/lib/python3.7/dist-packages (from -r requirements.txt (line 29)) (0.5.3)
Requirement already satisfied: filelock in /usr/local/lib/python3.7/dist-packages (from -r requirements.txt (line 30)) (3.0.12)
Collecting terminaltables
Downloading https://files.pythonhosted.org/packages/9b/c4/4a21174f32f8a7e1104798c445dacdc1d4df86f2f26722767034e4de4bff/terminaltables-3.1.0.tar.gz
Requirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from -r requirements.txt (line 32)) (4.41.1)
Collecting word2number
Downloading https://files.pythonhosted.org/packages/4a/29/a31940c848521f0725f0df6b25dca8917f13a2025b0e8fcbe5d0457e45e6/word2number-1.1.zip
Collecting num2words
[?25l Downloading https://files.pythonhosted.org/packages/eb/a2/ea800689730732e27711c41beed4b2a129b34974435bdc450377ec407738/num2words-0.5.10-py3-none-any.whl (101kB)
[K |████████████████████████████████| 102kB 12.5MB/s
[?25hRequirement already satisfied: appdirs>=1.4 in /usr/local/lib/python3.7/dist-packages (from -r requirements.txt (line 37)) (1.4.4)
Collecting oslo.concurrency>=4.2
[?25l Downloading https://files.pythonhosted.org/packages/30/2d/d9dd1b17bdbcd8f269c025052677b7bc3b54b6f91c3df6ba7732c4152327/oslo.concurrency-4.4.0-py3-none-any.whl (47kB)
[K |████████████████████████████████| 51kB 7.9MB/s
[?25hCollecting pytest>=5.0
[?25l Downloading https://files.pythonhosted.org/packages/76/4d/9c00146923da9f1cabd1878209d71b1380d537ec331a1a613e8f4b9d7985/pytest-6.2.3-py3-none-any.whl (280kB)
[K |████████████████████████████████| 286kB 58.9MB/s
[?25hCollecting pytorch_pretrained_bert
[?25l Downloading https://files.pythonhosted.org/packages/d7/e0/c08d5553b89973d9a240605b9c12404bcf8227590de62bae27acbcfe076b/pytorch_pretrained_bert-0.6.2-py3-none-any.whl (123kB)
[K |████████████████████████████████| 133kB 59.7MB/s
[?25hRequirement already satisfied: grpcio>=1.8.6 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.2.0->-r requirements.txt (line 1)) (1.32.0)
Requirement already satisfied: absl-py>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.2.0->-r requirements.txt (line 1)) (0.12.0)
Requirement already satisfied: protobuf>=3.8.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.2.0->-r requirements.txt (line 1)) (3.12.4)
Requirement already satisfied: wrapt>=1.11.1 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.2.0->-r requirements.txt (line 1)) (1.12.1)
Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.2.0->-r requirements.txt (line 1)) (1.1.0)
Requirement already satisfied: gast==0.3.3 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.2.0->-r requirements.txt (line 1)) (0.3.3)
Collecting tensorboard<2.3.0,>=2.2.0
[?25l Downloading https://files.pythonhosted.org/packages/1d/74/0a6fcb206dcc72a6da9a62dd81784bfdbff5fedb099982861dc2219014fb/tensorboard-2.2.2-py3-none-any.whl (3.0MB)
[K |████████████████████████████████| 3.0MB 54.2MB/s
[?25hRequirement already satisfied: google-pasta>=0.1.8 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.2.0->-r requirements.txt (line 1)) (0.2.0)
Requirement already satisfied: keras-preprocessing>=1.1.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.2.0->-r requirements.txt (line 1)) (1.1.2)
Collecting tensorflow-estimator<2.3.0,>=2.2.0
[?25l Downloading https://files.pythonhosted.org/packages/a4/f5/926ae53d6a226ec0fda5208e0e581cffed895ccc89e36ba76a8e60895b78/tensorflow_estimator-2.2.0-py2.py3-none-any.whl (454kB)
[K |████████████████████████████████| 460kB 43.7MB/s
[?25hRequirement already satisfied: wheel>=0.26; python_version >= "3" in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.2.0->-r requirements.txt (line 1)) (0.36.2)
Requirement already satisfied: h5py<2.11.0,>=2.10.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.2.0->-r requirements.txt (line 1)) (2.10.0)
Requirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.2.0->-r requirements.txt (line 1)) (3.3.0)
Requirement already satisfied: six>=1.12.0 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.2.0->-r requirements.txt (line 1)) (1.15.0)
Requirement already satisfied: astunparse==1.6.3 in /usr/local/lib/python3.7/dist-packages (from tensorflow==2.2.0->-r requirements.txt (line 1)) (1.6.3)
Collecting sacremoses
[?25l Downloading https://files.pythonhosted.org/packages/08/cd/342e584ee544d044fb573ae697404ce22ede086c9e87ce5960772084cad0/sacremoses-0.0.44.tar.gz (862kB)
[K |████████████████████████████████| 870kB 50.7MB/s
[?25hRequirement already satisfied: packaging in /usr/local/lib/python3.7/dist-packages (from transformers==3.0.2->-r requirements.txt (line 2)) (20.9)
Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.7/dist-packages (from transformers==3.0.2->-r requirements.txt (line 2)) (2019.12.20)
Collecting tokenizers==0.8.1.rc1
[?25l Downloading https://files.pythonhosted.org/packages/02/59/68c7e3833f535615fb97d33ffcb7b30bbf62bc7477a9c59cd19ad8535d72/tokenizers-0.8.1rc1-cp37-cp37m-manylinux1_x86_64.whl (3.0MB)
[K |████████████████████████████████| 3.0MB 49.6MB/s
[?25hRequirement already satisfied: torch>=1.2.0 in /usr/local/lib/python3.7/dist-packages (from sentence-transformers==0.3.4->-r requirements.txt (line 3)) (1.7.1+cu110)
Collecting threadpoolctl>=2.0.0
Downloading https://files.pythonhosted.org/packages/f7/12/ec3f2e203afa394a149911729357aa48affc59c20e2c1c8297a60f33f133/threadpoolctl-2.1.0-py3-none-any.whl
Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn==0.24.1->-r requirements.txt (line 6)) (1.0.1)
Collecting ftfy
[?25l Downloading https://files.pythonhosted.org/packages/78/50/ba5ec9ff8b56e09c0aa8e13d2cc6e24b31bdd23e2bab8f510929bcc4ac48/ftfy-6.0.tar.gz (63kB)
[K |████████████████████████████████| 71kB 11.2MB/s
[?25hRequirement already satisfied: hyperopt>=0.1.1 in /usr/local/lib/python3.7/dist-packages (from flair==0.6.1.post1->-r requirements.txt (line 7)) (0.1.2)
Collecting janome
[?25l Downloading https://files.pythonhosted.org/packages/a8/63/98858cbead27df7536c7e300c169da0999e9704d02220dc6700b804eeff0/Janome-0.4.1-py2.py3-none-any.whl (19.7MB)
[K |████████████████████████████████| 19.7MB 9.1MB/s
[?25hCollecting sqlitedict>=1.6.0
Downloading https://files.pythonhosted.org/packages/5c/2d/b1d99e9ad157dd7de9cd0d36a8a5876b13b55e4b75f7498bc96035fb4e96/sqlitedict-1.7.0.tar.gz
Collecting konoha<5.0.0,>=4.0.0
Downloading https://files.pythonhosted.org/packages/02/be/4dd30d56a0a19619deb9bf41ba8202709fa83b1b301b876572cd6dc38117/konoha-4.6.4-py3-none-any.whl
Requirement already satisfied: matplotlib>=2.2.3 in /usr/local/lib/python3.7/dist-packages (from flair==0.6.1.post1->-r requirements.txt (line 7)) (3.2.2)
Requirement already satisfied: lxml in /usr/local/lib/python3.7/dist-packages (from flair==0.6.1.post1->-r requirements.txt (line 7)) (4.2.6)
Requirement already satisfied: gensim>=3.4.0 in /usr/local/lib/python3.7/dist-packages (from flair==0.6.1.post1->-r requirements.txt (line 7)) (3.6.0)
Collecting bpemb>=0.3.2
Downloading https://files.pythonhosted.org/packages/91/77/3f0f53856e86af32b1d3c86652815277f7b5f880002584eb30db115b6df5/bpemb-0.3.2-py3-none-any.whl
Collecting mpld3==0.3
[?25l Downloading https://files.pythonhosted.org/packages/91/95/a52d3a83d0a29ba0d6898f6727e9858fe7a43f6c2ce81a5fe7e05f0f4912/mpld3-0.3.tar.gz (788kB)
[K |████████████████████████████████| 798kB 50.3MB/s
[?25hCollecting segtok>=1.5.7
Downloading https://files.pythonhosted.org/packages/41/08/582dab5f4b1d5ca23bc6927b4bb977c8ff7f3a87a3b98844ef833e2f5623/segtok-1.5.10.tar.gz
Collecting deprecated>=1.2.4
Downloading https://files.pythonhosted.org/packages/fb/73/994edfcba74443146c84b91921fcc269374354118d4f452fb0c54c1cbb12/Deprecated-1.2.12-py2.py3-none-any.whl
Collecting langdetect
[?25l Downloading https://files.pythonhosted.org/packages/56/a3/8407c1e62d5980188b4acc45ef3d94b933d14a2ebc9ef3505f22cf772570/langdetect-1.0.8.tar.gz (981kB)
[K |████████████████████████████████| 983kB 30.1MB/s
[?25hRequirement already satisfied: tabulate in /usr/local/lib/python3.7/dist-packages (from flair==0.6.1.post1->-r requirements.txt (line 7)) (0.8.9)
Requirement already satisfied: python-dateutil>=2.6.1 in /usr/local/lib/python3.7/dist-packages (from flair==0.6.1.post1->-r requirements.txt (line 7)) (2.8.1)
Requirement already satisfied: gdown in /usr/local/lib/python3.7/dist-packages (from flair==0.6.1.post1->-r requirements.txt (line 7)) (3.6.4)
Requirement already satisfied: psutil>=5.0.0 in /usr/local/lib/python3.7/dist-packages (from wandb->-r requirements.txt (line 10)) (5.4.8)
Collecting subprocess32>=3.5.3
[?25l Downloading https://files.pythonhosted.org/packages/32/c8/564be4d12629b912ea431f1a50eb8b3b9d00f1a0b1ceff17f266be190007/subprocess32-3.5.4.tar.gz (97kB)
[K |████████████████████████████████| 102kB 13.6MB/s
[?25hRequirement already satisfied: promise<3,>=2.0 in /usr/local/lib/python3.7/dist-packages (from wandb->-r requirements.txt (line 10)) (2.3)
Collecting configparser>=3.8.1
Downloading https://files.pythonhosted.org/packages/fd/01/ff260a18caaf4457eb028c96eeb405c4a230ca06c8ec9c1379f813caa52e/configparser-5.0.2-py3-none-any.whl
Collecting docker-pycreds>=0.4.0
Downloading https://files.pythonhosted.org/packages/f5/e8/f6bd1eee09314e7e6dee49cbe2c5e22314ccdb38db16c9fc72d2fa80d054/docker_pycreds-0.4.0-py2.py3-none-any.whl
Requirement already satisfied: Click>=7.0 in /usr/local/lib/python3.7/dist-packages (from wandb->-r requirements.txt (line 10)) (7.1.2)
Collecting pathtools
Downloading https://files.pythonhosted.org/packages/e7/7f/470d6fcdf23f9f3518f6b0b76be9df16dcc8630ad409947f8be2eb0ed13a/pathtools-0.1.2.tar.gz
Collecting GitPython>=1.0.0
[?25l Downloading https://files.pythonhosted.org/packages/a6/99/98019716955ba243657daedd1de8f3a88ca1f5b75057c38e959db22fb87b/GitPython-3.1.14-py3-none-any.whl (159kB)
[K |████████████████████████████████| 163kB 55.7MB/s
[?25hCollecting shortuuid>=0.5.0
Downloading https://files.pythonhosted.org/packages/25/a6/2ecc1daa6a304e7f1b216f0896b26156b78e7c38e1211e9b798b4716c53d/shortuuid-1.0.1-py3-none-any.whl
Collecting sentry-sdk>=0.4.0
[?25l Downloading https://files.pythonhosted.org/packages/f3/92/5a33be64990ba815364a8f2dd9e6f51de60d23dfddafb4f1fc5577d4dc64/sentry_sdk-1.0.0-py2.py3-none-any.whl (131kB)
[K |████████████████████████████████| 133kB 49.4MB/s
[?25hRequirement already satisfied: PyYAML in /usr/local/lib/python3.7/dist-packages (from wandb->-r requirements.txt (line 10)) (3.13)
Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas->-r requirements.txt (line 11)) (2018.9)
Requirement already satisfied: importlib-metadata; python_version < "3.8" in /usr/local/lib/python3.7/dist-packages (from datasets->-r requirements.txt (line 13)) (3.8.1)
Requirement already satisfied: huggingface-hub<0.1.0 in /usr/local/lib/python3.7/dist-packages (from datasets->-r requirements.txt (line 13)) (0.0.8)
Requirement already satisfied: dill in /usr/local/lib/python3.7/dist-packages (from datasets->-r requirements.txt (line 13)) (0.3.3)
Requirement already satisfied: xxhash in /usr/local/lib/python3.7/dist-packages (from datasets->-r requirements.txt (line 13)) (2.0.0)
Requirement already satisfied: multiprocess in /usr/local/lib/python3.7/dist-packages (from datasets->-r requirements.txt (line 13)) (0.70.11.1)
Requirement already satisfied: fsspec in /usr/local/lib/python3.7/dist-packages (from datasets->-r requirements.txt (line 13)) (0.9.0)
Requirement already satisfied: tornado in /usr/local/lib/python3.7/dist-packages (from visdom->-r requirements.txt (line 14)) (5.1.1)
Requirement already satisfied: pyzmq in /usr/local/lib/python3.7/dist-packages (from visdom->-r requirements.txt (line 14)) (22.0.3)
Collecting jsonpatch
Downloading https://files.pythonhosted.org/packages/a3/55/f7c93bae36d869292aedfbcbae8b091386194874f16390d680136edd2b28/jsonpatch-1.32-py2.py3-none-any.whl
Collecting torchfile
Downloading https://files.pythonhosted.org/packages/91/af/5b305f86f2d218091af657ddb53f984ecbd9518ca9fe8ef4103a007252c9/torchfile-0.1.0.tar.gz
Collecting websocket-client
[?25l Downloading https://files.pythonhosted.org/packages/08/33/80e0d4f60e84a1ddd9a03f340be1065a2a363c47ce65c4bd3bae65ce9631/websocket_client-0.58.0-py2.py3-none-any.whl (61kB)
[K |████████████████████████████████| 61kB 9.4MB/s
[?25hRequirement already satisfied: pillow in /usr/local/lib/python3.7/dist-packages (from visdom->-r requirements.txt (line 14)) (7.1.2)
Requirement already satisfied: werkzeug<=2.0.0,>=0.15.0 in /usr/local/lib/python3.7/dist-packages (from mezmorize->-r requirements.txt (line 20)) (1.0.1)
Collecting cachelib<=0.2,>=0.1
Downloading https://files.pythonhosted.org/packages/e6/fc/9c5571cf72ac3ea64ad5cd9d704c1000452cb483a6a3233357d8f3da6991/cachelib-0.1.1-py3-none-any.whl
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->-r requirements.txt (line 22)) (3.0.4)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->-r requirements.txt (line 22)) (2.10)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->-r requirements.txt (line 22)) (1.24.3)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->-r requirements.txt (line 22)) (2020.12.5)
Requirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from python-Levenshtein->-r requirements.txt (line 24)) (54.2.0)
Requirement already satisfied: docopt>=0.6.2 in /usr/local/lib/python3.7/dist-packages (from num2words->-r requirements.txt (line 34)) (0.6.2)
Collecting fasteners>=0.7.0
Downloading https://files.pythonhosted.org/packages/78/20/c862d765287e9e8b29f826749ebae8775bdca50b2cb2ca079346d5fbfd76/fasteners-0.16-py2.py3-none-any.whl
Collecting pbr!=2.1.0,>=2.0.0
[?25l Downloading https://files.pythonhosted.org/packages/fb/48/69046506f6ac61c1eaa9a0d42d22d54673b69e176d30ca98e3f61513e980/pbr-5.5.1-py2.py3-none-any.whl (106kB)
[K |████████████████████████████████| 112kB 42.5MB/s
[?25hCollecting oslo.config>=5.2.0
[?25l Downloading https://files.pythonhosted.org/packages/05/91/4dd50389dea8b9c76812f6f89c20bc35b48818c68a7ce2174ab9fd78bdbe/oslo.config-8.5.0-py3-none-any.whl (127kB)
[K |████████████████████████████████| 133kB 57.2MB/s
[?25hCollecting oslo.utils>=3.33.0
[?25l Downloading https://files.pythonhosted.org/packages/cc/ba/77f27f4b2fecbadbe40c3e367110b781afef85a3b5b576450040dfd1a1d1/oslo.utils-4.8.0-py3-none-any.whl (102kB)
[K |████████████████████████████████| 102kB 13.5MB/s
[?25hCollecting oslo.i18n>=3.15.3
[?25l Downloading https://files.pythonhosted.org/packages/89/ac/b71a66e54c8fcf22c4205efe2b5f94dbf282c194f9f07dbf0a1ac52d4633/oslo.i18n-5.0.1-py3-none-any.whl (42kB)
[K |████████████████████████████████| 51kB 8.2MB/s
[?25hRequirement already satisfied: attrs>=19.2.0 in /usr/local/lib/python3.7/dist-packages (from pytest>=5.0->-r requirements.txt (line 39)) (20.3.0)
Collecting pluggy<1.0.0a1,>=0.12
Downloading https://files.pythonhosted.org/packages/a0/28/85c7aa31b80d150b772fbe4a229487bc6644da9ccb7e427dd8cc60cb8a62/pluggy-0.13.1-py2.py3-none-any.whl
Requirement already satisfied: toml in /usr/local/lib/python3.7/dist-packages (from pytest>=5.0->-r requirements.txt (line 39)) (0.10.2)
Requirement already satisfied: iniconfig in /usr/local/lib/python3.7/dist-packages (from pytest>=5.0->-r requirements.txt (line 39)) (1.1.1)
Requirement already satisfied: py>=1.8.2 in /usr/local/lib/python3.7/dist-packages (from pytest>=5.0->-r requirements.txt (line 39)) (1.10.0)
Collecting boto3
[?25l Downloading https://files.pythonhosted.org/packages/fc/79/64c0815cbe8c6abd7fe5525ec37a2689d3cf10e387629ba4a6e44daff6d0/boto3-1.17.49-py2.py3-none-any.whl (131kB)
[K |████████████████████████████████| 133kB 51.4MB/s
[?25hRequirement already satisfied: google-auth<2,>=1.6.3 in /usr/local/lib/python3.7/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r requirements.txt (line 1)) (1.28.0)
Requirement already satisfied: google-auth-oauthlib<0.5,>=0.4.1 in /usr/local/lib/python3.7/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r requirements.txt (line 1)) (0.4.3)
Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.7/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r requirements.txt (line 1)) (3.3.4)
Requirement already satisfied: tensorboard-plugin-wit>=1.6.0 in /usr/local/lib/python3.7/dist-packages (from tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r requirements.txt (line 1)) (1.8.0)
Requirement already satisfied: pyparsing>=2.0.2 in /usr/local/lib/python3.7/dist-packages (from packaging->transformers==3.0.2->-r requirements.txt (line 2)) (2.4.7)
Requirement already satisfied: typing-extensions in /usr/local/lib/python3.7/dist-packages (from torch>=1.2.0->sentence-transformers==0.3.4->-r requirements.txt (line 3)) (3.7.4.3)
Requirement already satisfied: wcwidth in /usr/local/lib/python3.7/dist-packages (from ftfy->flair==0.6.1.post1->-r requirements.txt (line 7)) (0.2.5)
Requirement already satisfied: pymongo in /usr/local/lib/python3.7/dist-packages (from hyperopt>=0.1.1->flair==0.6.1.post1->-r requirements.txt (line 7)) (3.11.3)
Requirement already satisfied: future in /usr/local/lib/python3.7/dist-packages (from hyperopt>=0.1.1->flair==0.6.1.post1->-r requirements.txt (line 7)) (0.16.0)
Requirement already satisfied: networkx in /usr/local/lib/python3.7/dist-packages (from hyperopt>=0.1.1->flair==0.6.1.post1->-r requirements.txt (line 7)) (2.5)
Collecting overrides<4.0.0,>=3.0.0
Downloading https://files.pythonhosted.org/packages/ff/b1/10f69c00947518e6676bbd43e739733048de64b8dd998e9c2d5a71f44c5d/overrides-3.1.0.tar.gz
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=2.2.3->flair==0.6.1.post1->-r requirements.txt (line 7)) (0.10.0)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.7/dist-packages (from matplotlib>=2.2.3->flair==0.6.1.post1->-r requirements.txt (line 7)) (1.3.1)
Requirement already satisfied: smart-open>=1.2.1 in /usr/local/lib/python3.7/dist-packages (from gensim>=3.4.0->flair==0.6.1.post1->-r requirements.txt (line 7)) (4.2.0)
Collecting gitdb<5,>=4.0.1
[?25l Downloading https://files.pythonhosted.org/packages/ea/e8/f414d1a4f0bbc668ed441f74f44c116d9816833a48bf81d22b697090dba8/gitdb-4.0.7-py3-none-any.whl (63kB)
[K |████████████████████████████████| 71kB 10.4MB/s
[?25hRequirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata; python_version < "3.8"->datasets->-r requirements.txt (line 13)) (3.4.1)
Collecting jsonpointer>=1.9
Downloading https://files.pythonhosted.org/packages/23/52/05f67532aa922e494c351344e0d9624a01f74f5dd8402fe0d1b563a6e6fc/jsonpointer-2.1-py2.py3-none-any.whl
Collecting netaddr>=0.7.18
[?25l Downloading https://files.pythonhosted.org/packages/ff/cd/9cdfea8fc45c56680b798db6a55fa60a22e2d3d3ccf54fc729d083b50ce4/netaddr-0.8.0-py2.py3-none-any.whl (1.9MB)
[K |████████████████████████████████| 1.9MB 47.4MB/s
[?25hCollecting debtcollector>=1.2.0
Downloading https://files.pythonhosted.org/packages/8e/50/07a7ccf4dbbe90b58e96f97b747ff98aef9d8c841d2616c48cc05b07db33/debtcollector-2.2.0-py3-none-any.whl
Collecting rfc3986>=1.2.0
Downloading https://files.pythonhosted.org/packages/78/be/7b8b99fd74ff5684225f50dd0e865393d2265656ef3b4ba9eaaaffe622b8/rfc3986-1.4.0-py2.py3-none-any.whl
Collecting stevedore>=1.20.0
[?25l Downloading https://files.pythonhosted.org/packages/d4/49/b602307aeac3df3384ff1fcd05da9c0376c622a6c48bb5325f28ab165b57/stevedore-3.3.0-py3-none-any.whl (49kB)
[K |████████████████████████████████| 51kB 8.3MB/s
[?25hCollecting iso8601>=0.1.11
Downloading https://files.pythonhosted.org/packages/c5/10/da48dc228b821a64407c2527e1e8ee98917b36e80a181f2ca06ea3cb676b/iso8601-0.1.14-py2.py3-none-any.whl
Collecting netifaces>=0.10.4
Downloading https://files.pythonhosted.org/packages/0d/18/fd6e9c71a35b67a73160ec80a49da63d1eed2d2055054cc2995714949132/netifaces-0.10.9.tar.gz
Collecting jmespath<1.0.0,>=0.7.1
Downloading https://files.pythonhosted.org/packages/07/cb/5f001272b6faeb23c1c9e0acc04d48eaaf5c862c17709d20e3469c6e0139/jmespath-0.10.0-py2.py3-none-any.whl
Collecting s3transfer<0.4.0,>=0.3.0
[?25l Downloading https://files.pythonhosted.org/packages/98/14/0b4be62b65c52d6d1c442f24e02d2a9889a73d3c352002e14c70f84a679f/s3transfer-0.3.6-py2.py3-none-any.whl (73kB)
[K |████████████████████████████████| 81kB 11.5MB/s
[?25hCollecting botocore<1.21.0,>=1.20.49
[?25l Downloading https://files.pythonhosted.org/packages/68/59/6e28ce58206039ad2592992b75ee79a8f9dbc902a9704373ddacc4f96300/botocore-1.20.49-py2.py3-none-any.whl (7.4MB)
[K |████████████████████████████████| 7.4MB 48.9MB/s
[?25hRequirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.7/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r requirements.txt (line 1)) (0.2.8)
Requirement already satisfied: rsa<5,>=3.1.4; python_version >= "3.6" in /usr/local/lib/python3.7/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r requirements.txt (line 1)) (4.7.2)
Requirement already satisfied: cachetools<5.0,>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r requirements.txt (line 1)) (4.2.1)
Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.7/dist-packages (from google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r requirements.txt (line 1)) (1.3.0)
Requirement already satisfied: decorator>=4.3.0 in /usr/local/lib/python3.7/dist-packages (from networkx->hyperopt>=0.1.1->flair==0.6.1.post1->-r requirements.txt (line 7)) (4.4.2)
Collecting smmap<5,>=3.0.1
Downloading https://files.pythonhosted.org/packages/68/ee/d540eb5e5996eb81c26ceffac6ee49041d473bc5125f2aa995cf51ec1cf1/smmap-4.0.0-py2.py3-none-any.whl
Requirement already satisfied: pyasn1<0.5.0,>=0.4.6 in /usr/local/lib/python3.7/dist-packages (from pyasn1-modules>=0.2.1->google-auth<2,>=1.6.3->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r requirements.txt (line 1)) (0.4.8)
Requirement already satisfied: oauthlib>=3.0.0 in /usr/local/lib/python3.7/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<0.5,>=0.4.1->tensorboard<2.3.0,>=2.2.0->tensorflow==2.2.0->-r requirements.txt (line 1)) (3.1.0)
Building wheels for collected packages: sentence-transformers, visdom, lru-dict, python-Levenshtein, terminaltables, word2number, sacremoses, ftfy, sqlitedict, mpld3, segtok, langdetect, subprocess32, pathtools, torchfile, overrides, netifaces
Building wheel for sentence-transformers (setup.py) ... [?25l[?25hdone
Created wheel for sentence-transformers: filename=sentence_transformers-0.3.4-cp37-none-any.whl size=99829 sha256=facdbf6e5f8a1ad52ec2d883b95a1d5cb1471dc474bebddeaa361ddba7106a5a
Stored in directory: /root/.cache/pip/wheels/39/b3/0a/c25bcdeeb0858f691d377f06d4bbf5e735598fa3a54d01c04f
Building wheel for visdom (setup.py) ... [?25l[?25hdone
Created wheel for visdom: filename=visdom-0.1.8.9-cp37-none-any.whl size=655251 sha256=585e99f2de9e9b44d450b38f2839b15b1b46ff291573f0e809dd2918bd46db2a
Stored in directory: /root/.cache/pip/wheels/70/19/a7/6d589ed967f4dfefd33bc166d081257bd4ed0cb618dccfd62a
Building wheel for lru-dict (setup.py) ... [?25l[?25hdone
Created wheel for lru-dict: filename=lru_dict-1.1.7-cp37-cp37m-linux_x86_64.whl size=28380 sha256=8512f2dbafd0f6931ce90bf7b36fd04be6da8d4f5604ebddc049d3cba2a4693c
Stored in directory: /root/.cache/pip/wheels/ae/51/23/0a416781dead9225c7d66d25b9f223c7e32304e99a0b01d566
Building wheel for python-Levenshtein (setup.py) ... [?25l[?25hdone
Created wheel for python-Levenshtein: filename=python_Levenshtein-0.12.2-cp37-cp37m-linux_x86_64.whl size=149807 sha256=164aea6d35c0695a3bba5d04d68d3bbcb61c29a83e4fd09438cdded677c9fad4
Stored in directory: /root/.cache/pip/wheels/b3/26/73/4b48503bac73f01cf18e52cd250947049a7f339e940c5df8fc
Building wheel for terminaltables (setup.py) ... [?25l[?25hdone
Created wheel for terminaltables: filename=terminaltables-3.1.0-cp37-none-any.whl size=15356 sha256=4a678f319d0248bcfe387d115f64a21050430334f2aa6133ea5189fe0c00b188
Stored in directory: /root/.cache/pip/wheels/30/6b/50/6c75775b681fb36cdfac7f19799888ef9d8813aff9e379663e
Building wheel for word2number (setup.py) ... [?25l[?25hdone
Created wheel for word2number: filename=word2number-1.1-cp37-none-any.whl size=5589 sha256=7ed7c059ac1095ab70ad1cf8907edf30a26ce38dd45e13a21ff4ce92e1c44852
Stored in directory: /root/.cache/pip/wheels/46/2f/53/5f5c1d275492f2fce1cdab9a9bb12d49286dead829a4078e0e
Building wheel for sacremoses (setup.py) ... [?25l[?25hdone
Created wheel for sacremoses: filename=sacremoses-0.0.44-cp37-none-any.whl size=886084 sha256=478135448291701d88fecb8de4622d6d52c1b5fc9eac17f6c3ffdd93509040f9
Stored in directory: /root/.cache/pip/wheels/3e/fb/c0/13ab4d63d537658f448366744654323077c4d90069b6512f3c
Building wheel for ftfy (setup.py) ... [?25l[?25hdone
Created wheel for ftfy: filename=ftfy-6.0-cp37-none-any.whl size=41622 sha256=2e11870432e02026fdb563a776ba77b080cb41f4a7abb248e910ce25bb8db234
Stored in directory: /root/.cache/pip/wheels/22/8b/08/7d1c17849e10371206a262304973b5a9f45e8b9d0a2179f465
Building wheel for sqlitedict (setup.py) ... [?25l[?25hdone
Created wheel for sqlitedict: filename=sqlitedict-1.7.0-cp37-none-any.whl size=14376 sha256=88a082d860346dca4bf746ca08164a42e6dd23c064aab3e24ec5129e14979152
Stored in directory: /root/.cache/pip/wheels/cf/c6/4f/2c64a43f041415eb8b8740bd80e15e92f0d46c5e464d8e4b9b
Building wheel for mpld3 (setup.py) ... [?25l[?25hdone
Created wheel for mpld3: filename=mpld3-0.3-cp37-none-any.whl size=116679 sha256=f177d1c2add7ab7fabf8b24c5484f92b10fe5aea129dd82aaf5d669b42aacb7f
Stored in directory: /root/.cache/pip/wheels/c0/47/fb/8a64f89aecfe0059830479308ad42d62e898a3e3cefdf6ba28
Building wheel for segtok (setup.py) ... [?25l[?25hdone
Created wheel for segtok: filename=segtok-1.5.10-cp37-none-any.whl size=25019 sha256=0133bef9be3f24091c665f810e39f9359cdcb2f1faae38400051f52bc3aa6420
Stored in directory: /root/.cache/pip/wheels/b4/39/f6/9ca1c5cabde964d728023b5751c3a206a5c8cc40252321fb6b
Building wheel for langdetect (setup.py) ... [?25l[?25hdone
Created wheel for langdetect: filename=langdetect-1.0.8-cp37-none-any.whl size=993193 sha256=aa51288e257e9daac3744a1b9efed7f4433ca074823fde52ebfbfd674d5c2a3d
Stored in directory: /root/.cache/pip/wheels/8d/b3/aa/6d99de9f3841d7d3d40a60ea06e6d669e8e5012e6c8b947a57
Building wheel for subprocess32 (setup.py) ... [?25l[?25hdone
Created wheel for subprocess32: filename=subprocess32-3.5.4-cp37-none-any.whl size=6489 sha256=3100d3ef379f47d15305d2d4ecf05102c0e2ce836eb86004eba5d03fb2cf67d2
Stored in directory: /root/.cache/pip/wheels/68/39/1a/5e402bdfdf004af1786c8b853fd92f8c4a04f22aad179654d1
Building wheel for pathtools (setup.py) ... [?25l[?25hdone
Created wheel for pathtools: filename=pathtools-0.1.2-cp37-none-any.whl size=8786 sha256=1393968b279b1cb78c753f8af6b2a00f334f921e920727708be986472399e356
Stored in directory: /root/.cache/pip/wheels/0b/04/79/c3b0c3a0266a3cb4376da31e5bfe8bba0c489246968a68e843
Building wheel for torchfile (setup.py) ... [?25l[?25hdone
Created wheel for torchfile: filename=torchfile-0.1.0-cp37-none-any.whl size=5713 sha256=2dcd3348c63322d7e7672fab118f9dc0c42cc886f532809feb985d86eb0ce51f
Stored in directory: /root/.cache/pip/wheels/b1/c3/d6/9a1cc8f3a99a0fc1124cae20153f36af59a6e683daca0a0814
Building wheel for overrides (setup.py) ... [?25l[?25hdone
Created wheel for overrides: filename=overrides-3.1.0-cp37-none-any.whl size=10174 sha256=c95d11fdd18c41888216ab91ce42eef877ad39e5b781210cfac880270738fd90
Stored in directory: /root/.cache/pip/wheels/5c/24/13/6ef8600e6f147c95e595f1289a86a3cc82ed65df57582c65a9
Building wheel for netifaces (setup.py) ... [?25l[?25hdone
Created wheel for netifaces: filename=netifaces-0.10.9-cp37-cp37m-linux_x86_64.whl size=37423 sha256=2d54ef03a1ce94cd672662832eb64051424645257818e8d964fd28e13a4791f1
Stored in directory: /root/.cache/pip/wheels/23/8f/f3/7054578f04c904f70757c5c85a6e2823baa69d42365526e93d
Successfully built sentence-transformers visdom lru-dict python-Levenshtein terminaltables word2number sacremoses ftfy sqlitedict mpld3 segtok langdetect subprocess32 pathtools torchfile overrides netifaces
[31mERROR: datascience 0.10.6 has requirement folium==0.2.1, but you'll have folium 0.8.3 which is incompatible.[0m
[31mERROR: konoha 4.6.4 has requirement requests<3.0.0,>=2.25.1, but you'll have requests 2.23.0 which is incompatible.[0m
[31mERROR: oslo-config 8.5.0 has requirement PyYAML>=5.1, but you'll have pyyaml 3.13 which is incompatible.[0m
[31mERROR: botocore 1.20.49 has requirement urllib3<1.27,>=1.25.4, but you'll have urllib3 1.24.3 which is incompatible.[0m
Installing collected packages: tensorboard, tensorflow-estimator, tensorflow, sacremoses, sentencepiece, tokenizers, transformers, threadpoolctl, scikit-learn, sentence-transformers, ftfy, janome, sqlitedict, overrides, konoha, bpemb, mpld3, segtok, deprecated, langdetect, flair, pyarrow, subprocess32, configparser, docker-pycreds, pathtools, smmap, gitdb, GitPython, shortuuid, sentry-sdk, wandb, bert-score, jsonpointer, jsonpatch, torchfile, websocket-client, visdom, tensorboardX, pymemcache, cachelib, mezmorize, cached-property, lru-dict, python-Levenshtein, lemminflect, language-tool-python, terminaltables, word2number, num2words, fasteners, pbr, netaddr, debtcollector, rfc3986, stevedore, oslo.i18n, oslo.config, iso8601, netifaces, oslo.utils, oslo.concurrency, pluggy, pytest, jmespath, botocore, s3transfer, boto3, pytorch-pretrained-bert
Found existing installation: tensorboard 2.4.1
Uninstalling tensorboard-2.4.1:
Successfully uninstalled tensorboard-2.4.1
Found existing installation: tensorflow-estimator 2.4.0
Uninstalling tensorflow-estimator-2.4.0:
Successfully uninstalled tensorflow-estimator-2.4.0
Found existing installation: tensorflow 2.4.1
Uninstalling tensorflow-2.4.1:
Successfully uninstalled tensorflow-2.4.1
Found existing installation: scikit-learn 0.22.2.post1
Uninstalling scikit-learn-0.22.2.post1:
Successfully uninstalled scikit-learn-0.22.2.post1
Found existing installation: pyarrow 3.0.0
Uninstalling pyarrow-3.0.0:
Successfully uninstalled pyarrow-3.0.0
Found existing installation: pluggy 0.7.1
Uninstalling pluggy-0.7.1:
Successfully uninstalled pluggy-0.7.1
Found existing installation: pytest 3.6.4
Uninstalling pytest-3.6.4:
Successfully uninstalled pytest-3.6.4
Successfully installed GitPython-3.1.14 bert-score-0.3.8 boto3-1.17.49 botocore-1.20.49 bpemb-0.3.2 cached-property-1.5.2 cachelib-0.1.1 configparser-5.0.2 debtcollector-2.2.0 deprecated-1.2.12 docker-pycreds-0.4.0 fasteners-0.16 flair-0.6.1.post1 ftfy-6.0 gitdb-4.0.7 iso8601-0.1.14 janome-0.4.1 jmespath-0.10.0 jsonpatch-1.32 jsonpointer-2.1 konoha-4.6.4 langdetect-1.0.8 language-tool-python-2.5.3 lemminflect-0.2.2 lru-dict-1.1.7 mezmorize-0.28.2 mpld3-0.3 netaddr-0.8.0 netifaces-0.10.9 num2words-0.5.10 oslo.concurrency-4.4.0 oslo.config-8.5.0 oslo.i18n-5.0.1 oslo.utils-4.8.0 overrides-3.1.0 pathtools-0.1.2 pbr-5.5.1 pluggy-0.13.1 pyarrow-0.17.1 pymemcache-3.4.1 pytest-6.2.3 python-Levenshtein-0.12.2 pytorch-pretrained-bert-0.6.2 rfc3986-1.4.0 s3transfer-0.3.6 sacremoses-0.0.44 scikit-learn-0.24.1 segtok-1.5.10 sentence-transformers-0.3.4 sentencepiece-0.1.91 sentry-sdk-1.0.0 shortuuid-1.0.1 smmap-4.0.0 sqlitedict-1.7.0 stevedore-3.3.0 subprocess32-3.5.4 tensorboard-2.2.2 tensorboardX-2.2 tensorflow-2.2.0 tensorflow-estimator-2.2.0 terminaltables-3.1.0 threadpoolctl-2.1.0 tokenizers-0.8.1rc1 torchfile-0.1.0 transformers-3.0.2 visdom-0.1.8.9 wandb-0.10.25 websocket-client-0.58.0 word2number-1.1
Collecting lm-scorer==0.4.2
Downloading https://files.pythonhosted.org/packages/c8/89/d86ee877bfa51104b338a67413c76b6fde50a76c7b7e0c55c546effe97e9/lm_scorer-0.4.2-py3-none-any.whl
Installing collected packages: lm-scorer
Successfully installed lm-scorer-0.4.2
###Markdown
3. Perform the Second-Order AttackAttack a pre-trained model `lstm-sst2` in [TextAttack Model Zoo](https://github.com/chong-z/TextAttack/blob/d6ebeeb1afae215d7de5f04c3aac743bbeaf54db/textattack/models/README.md):
###Code
!./patched_textattack attack --attack-from-file=biasattack.py:SOBeamAttack \
--dataset-from-nlp=glue:sst2:validation --num-examples=10 --shuffle=False \
--model=lstm-sst2
###Output
_____no_output_____ |
Code_Conf/AutoSklearn.ipynb | ###Markdown
###Code
!pip3 install auto-sklearn
# auto-sklearn for classification dataset
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score
from autosklearn.classification import AutoSklearnClassifier
# load dataset
url = 'https://raw.githubusercontent.com/kdemertzis/Earthquakes/main/test_1_3class.csv'
dataframe = read_csv(url, header=None)
# print(dataframe.head())
# split into input and output elements
data = dataframe.values
X, y = data[:, :-1], data[:, -1]
# minimally prepare dataset
X = X.astype('float32')
y = LabelEncoder().fit_transform(y.astype('str'))
# split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=1)
# define search
model = AutoSklearnClassifier(time_left_for_this_task=5*60, per_run_time_limit=30, n_jobs=8)
# perform the search
model.fit(X_train, y_train)
# summarize
print(model.sprint_statistics())
# evaluate best model
y_hat = model.predict(X_test)
acc = accuracy_score(y_test, y_hat)
print("Accuracy: %.3f" % acc)
###Output
_____no_output_____ |
code/rochester_hills_SUITE.ipynb | ###Markdown
Tools for SUITE Risk-Limiting Election AuditsThis Jupyter notebook implements some tools to conduct "hybrid" stratified risk-limiting audits as described in Risk-Limiting Audits by Stratified Union-Intersection Tests of Elections (SUITE), by Ottoboni, Stark, Lindeman, and McBurnett.For an implementation of tools for "comparison" risk-limiting audits as described in AGI, see http://statistics.berkeley.edu/~stark/Vote/auditTools.htm. For the sister ballot polling tool, see https://www.stat.berkeley.edu/~stark/Vote/ballotPollTools.htm.The tools on this page help perform the following steps:* Choose a number of ballots to audit in each stratum initially, on the assumption that the contest outcome is correct.* Select random samples of ballots in each stratum.* Find those ballots using ballot manifests.* Determine whether the audit can stop, given the votes on the ballots in the sample. * If the audit cannot stop yet, estimate how many additional ballots will need to be audited.This notebook is already filled out with an example election. It can be run from start to finish to demonstrate how the tool works. The numbers in the example can be deleted and replaced with actual data for an audit. Introduction to Jupyter NotebooksWe leave [a comprehensive introduction to the Jupyter notebook](https://jupyter-notebook.readthedocs.io/en/stable/notebook.html) to the experts, but below are a few features you should know to use this tool:* notebooks are comprised of _cells_, blocks of code that can be run together. To the left of a code cell, you will see either [] (indicating that it has not been run yet) or [x] (where x is a number indicating that it was the xth cell to be run). You can the code in a cell by clicking into the cell, indicated by a green box around the cell, and running `Ctrl + Enter`.* code lines that begin with `` are comments. They're not actually run, but are there to describe what the code is doing.* the text in a notebook is also written in a cell. Instead of a code cell, it's a Markdown cell. Clicking on a text cell will make it editable; running `Ctrl + Enter` will render it back into text.* the order in which cells are executed matters. Code in later cells depends on earlier cells. However, it is _possible_ to run cells out of order or rerun cells that have been run earlier; this can cause problem. In general, it is __best practice__ to rerun the entire notebook after you have filled in the values you want. To do so, click on the `Kernel` menu at the top of the page and select `Restart & Run All`. This will clear the memory and rerun everything in the prescribed order.The following cell imports all the necessary functionality from packages.
###Code
from __future__ import print_function
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
from IPython.display import display, HTML
from collections import OrderedDict
from itertools import product
import math
import json
import pprint
import numpy as np
from ballot_comparison import ballot_comparison_pvalue
from fishers_combination import maximize_fisher_combined_pvalue, create_modulus
from sprt import ballot_polling_sprt
from cryptorandom.cryptorandom import SHA256
from cryptorandom.sample import random_sample
from suite_tools import write_audit_parameters, write_audit_results, \
check_valid_audit_parameters, check_valid_vote_counts, \
check_overvote_rates, find_winners_losers, print_reported_votes, \
estimate_n, estimate_escalation_n, \
sample_from_manifest, write_ballots_to_sample, \
audit_contest, check_polling_sample_size, plot_nratio_sample_sizes
import warnings
warnings.filterwarnings("ignore")
###Output
/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/importlib/_bootstrap.py:321: RuntimeWarning: numpy.dtype size changed, may indicate binary incompatibility. Expected 96, got 88
return f(*args, **kwds)
###Markdown
Input the global audit parameters.For an audit, you should input the following global parameters in the cell below:* contest-specific parameters: * `risk_limit`: the risk limit for the audit * `stratum_sizes`: total ballots in the two strata, [CVR total, no-CVR total] * `num_winners`: number of winners in the contest* software parameters: * `seed`: the numeric seed for the pseudo-random number generator used to draw samples of ballots. Use, e.g., 20 rolls of a 10-sided die * `gamma`: the gamma parameter used in the ballot-polling method from Lindeman and Stark (2012). Default value of 1.03905 is generally accepted * `lambda_step`: the initial step size in the grid search over the way error is allocated across the CVR and no-CVR strata in SUITE. Default 0.05 is acceptable* initial sample size estimate parameters: * `o1_rate`: expected rate of 1-vote overstatements in the CVR stratum * `o2_rate`: expected rate of 2-vote overstatements in the CVR stratum * `u1_rate`: expected rate of 1-vote understatements in the CVR stratum * `u2_rate`: expected rate of 2-vote understatements in the CVR stratum * `n_ratio`: what fraction of the sample is taken from the CVR stratum. Default is to allocate sample in proportion to ballots cast in each stratum.
###Code
# contest-specific parameters
risk_limit = 0.05 # risk limit
stratum_sizes = [0, 36666] # total ballots in the two strata, CVR, no-CVR
num_winners = 1 # maximum number of winners, per social choice function
# software parameters
seed = "04889743399761425005" # use, e.g., 20 rolls of a 10-sided die
gamma=1.03905 # gamma from Lindeman and Stark (2012)
lambda_step = 0.05 # stepsize for the discrete bounds on Fisher's combining function
# initial sample size parameters
o1_rate = 0.002 # expect 2 1-vote overstatements per 1000 ballots in the CVR stratum
o2_rate = 0 # expect 0 2-vote overstatements
u1_rate = 0 # expect 0 1-vote understatements
u2_rate = 0 # expect 0 2-vote understatements
n_ratio = stratum_sizes[0]/np.sum(stratum_sizes)
# allocate sample in proportion to ballots cast in each stratum
check_valid_audit_parameters(risk_limit, lambda_step, o1_rate, o2_rate, \
u1_rate, u2_rate, stratum_sizes, n_ratio, num_winners)
###Output
_____no_output_____
###Markdown
The next cell saves the input parameters to a JSON file. You may change the file name in quotes but do not change the rest of the code.
###Code
write_audit_parameters("../log/rochester_hills_audit_parameters.json",\
risk_limit, stratum_sizes, num_winners, seed, gamma, \
lambda_step, o1_rate, o2_rate, \
u1_rate, u2_rate, n_ratio)
###Output
_____no_output_____
###Markdown
Enter the reported votesCandidates are stored in a data structure called a dictionary. Enter the candidate name and the votes in each stratum, [votes in CVR stratum, votes in no-CVR stratum], in the cell below. The following cell will calculate the vote totals, margins, winners, and losers.
###Code
# input number of winners
# input names as well as reported votes in each stratum
# candidates are a dict with name, [votes in CVR stratum, votes in no-CVR stratum]
candidates = {"Yes": [0, 22999],
"No": [0, 12343]}
# Run validity check on the input vote totals
check_valid_vote_counts(candidates, num_winners, stratum_sizes)
# compute reported winners, losers, and pairwise margins. Nothing should be printed.
(candidates, margins, winners, losers) = find_winners_losers(candidates, num_winners)
# Check that overstatement rates are compatible with the reported results
check_overvote_rates(margins=margins, total_votes=sum(stratum_sizes),
o1_rate=o1_rate, o2_rate=o2_rate)
# print reported winners, losers, and pairwise margins
print_reported_votes(candidates, winners, losers, margins, stratum_sizes,\
print_alphabetical=False)
###Output
Total reported votes:
CVR no-CVR total % of all votes % of valid votes
Yes : 0 22999 22999 62.73% 65.08%
No : 0 12343 12343 33.66% 34.92%
valid votes: 0 35342 35342 96.39%
non-votes: 0 1324 1324 3.61%
Reported winners:
Yes
Reported losers:
No
Reported margins:
Yes beat No by 10,656 votes
Smallest reported margin: 10,656
Corresponding reported diluted margin: 29.06%
###Markdown
Initial sample size estimates.The initial sample size tool helps you anticipate the number of randomly selected ballots that might need to be inspected to attain a given limit on the risk, under the assumption that the reported percentages for each candidate are correct. It is completely legitimate to sample one at a time and rerun the SUITE calculations, but this form can help auditors anticipate how many ballots the audit is likely to require and to retrieve ballots more efficiently.This code will estimate the sample size needed to attain the desired risk limit in an audit of the contest between each pair of winning and losing candidates. The overall sample size will be allocated to the CVR stratum in `n_ratio` proportion and to the no-CVR stratum in `1-n_ratio` proportion. The sample size estimates for each pair will be printed below. The expected sample size needed for the audit is the _maximum_ of the sample sizes for each winner, loser pair: the sample must be large enough to confirm the closest margin.Taking a larger initial sample can avoid needing to expand the sample later, depending on the rate of ballots for each candidate in the sample. Avoiding "escalation" can make the audit less complicated.
###Code
# Calculate expected sample size across (winner, loser) pairs
sample_sizes = {}
for k in product(winners, losers):
sample_sizes[k] = estimate_n(N_w1 = candidates[k[0]][0],\
N_w2 = candidates[k[0]][1],\
N_l1 = candidates[k[1]][0],\
N_l2 = candidates[k[1]][1],\
N1 = stratum_sizes[0],\
N2 = stratum_sizes[1],\
o1_rate = o1_rate,\
o2_rate = o2_rate,\
u1_rate = u1_rate,\
u2_rate = u2_rate,\
n_ratio = n_ratio,\
risk_limit = risk_limit,\
gamma = gamma,\
stepsize = lambda_step,\
min_n = 5,\
risk_limit_tol = 0.8)
sample_size = np.amax([v[0]+v[1] for v in sample_sizes.values()])
print("estimated sample sizes for each contest, written as (cvr stratum, no-cvr stratum):\n")
pprint.pprint(sample_sizes)
print('\n\nexpected total sample size needed to confirm all pairs:', sample_size)
check_polling_sample_size(candidates, winners, losers, stratum_sizes, risk_limit)
# Run this cell to plot the total size as a function of n_ratio
#plot_nratio_sample_sizes(candidates, winners, losers, stratum_sizes, n_ratio_step=0.05, o1_rate=o1_rate)
###Output
_____no_output_____
###Markdown
Random sampling The next tool helps generate pseudo-random samples of ballots in each stratum. Further below, there is a form to help find the individual, randomly selected ballots among the batches in which ballots are stored.The first cell below initializes the SHA-256 cryptographically secure pseudo-random number generator. Details on why you might want to use this pseudo-random number generator instead of the Python default can be found in [Stark and Ottoboni (2018)](https://arxiv.org/abs/1810.10985). Input your desired sample sizes in the second cell below. Input the number of ballots you want in the sample. The default values that are pre-filled are taken from the initial sample size estimates above. The third cell should not be modified. It draws the samples from each stratum, using sampling _with_ replacement for the CVR stratum and sampling _without_ replacement for the no-CVR stratum. This means that some ballots in the CVR stratum could be sampled more than once.**NOTE:**If this section is giving errors, you probably need to update your version of `cryptorandom`.```pip install [--update] cryptorandom```
###Code
# initialize the PRNG
prng = SHA256(seed)
# Input the sample sizes for each stratum.
# Defaults to those found using the initial sample size tool above.
n1 = math.ceil(sample_size*n_ratio)
n2 = sample_size-n1
# CVR stratum initial sample size, sampled with replacement
sample1 = prng.randint(1, stratum_sizes[0]+1, size=n1)
# No-CVR ballots are sampled without replacement
sample2 = random_sample(stratum_sizes[1], size=n2, replace=False, prng=prng)
###Output
_____no_output_____
###Markdown
No-CVR sample
###Code
print("No-CVR stratum sample:\n", sample2)
###Output
No-CVR stratum sample:
[20521 14670 10385 6709 30571 28696 34411 23555 25721 24708 33135 33946
9261 28233 8525 36136 12468 17956 16087 3333 11126 19428 21828 30524
29593 28924 12996 31819 26987 2262 21519 24690 10755 22866 31570 6994
7190 20574 33258 15733 11246 14748 25847 514 36634 35260 29245 1434
27437 34375 11774 4711 20302 32028 14980 24095 6813 6253 20172 23478
30791 17510 12366 7654 17574 10667 35937 22936 9667 8187 11599 9120
22926 11536 22535 31959]
###Markdown
Find ballots using ballot manifestGenerally, ballots will be stored in batches, for instance, separated by precinct and mode of voting. To make it easier to find individual ballots, it helps to have a ballot manifest that describes how the ballots are stored. Batch label | ballots--- | ---Polling place precinct 1 | 130Vote by mail precinct 1 | 172Polling place precinct 2 | 112Vote by mail precinct 2 | 201Polling place precinct 3 | 197Vote by mail precinct 3 | 188If ballot 500 is selected for audit, which ballot is that? If we take the listing of batches in the order given by the manifest, and we require that within each batch, the ballots are in an order that does not change during the audit, then the 500th ballot is the 86th ballot among the vote by mail ballots for precinct 2: The first three batches have a total of 130+172+112 = 414 ballots. The first ballot in the fourth batch is ballot 415. Ballot 500 is the 86th ballot in the fourth batch. The ballot look-up tool transforms a list of ballot numbers and a ballot manifest into a list of ballots in each batch.There must be separate ballot manifests for ballots in the CVR stratum and for ballots in the no-CVR stratum. The manifests should be input as a CSV file with three columns: Batch ID, Scanner ID, and number of ballots.The total number of ballots in the manifest must equal the number cast in the contest that is to be audited using the sample.
###Code
nocvr_sample = sample_from_manifest(filename="../data/Rochester Hills Ballot Manifest - combined.csv", \
sample=sample2, \
stratum_size=stratum_sizes[1])
write_ballots_to_sample("../log/Rochester-Hills-sampled-ballots.csv", nocvr_sample)
print("No CVR sample")
display(HTML(
'<table><tr>{}</tr></table>'.format(
'</tr><tr>'.join(
'<td>{}</td>'.format('</td><td>'.join(str(_) for _ in row)) for row in nocvr_sample)
)
))
###Output
No CVR sample
###Markdown
Enter the sample dataThe audit cannot stop until **all** the sampled ballots have been examined. Sample statistics for the CVR stratum (stratum 1)Enter the number of 1-vote and 2-vote over-/understatements that were observed in the sample using the sliders below, then run the cell beneath the sliders to store the values.
###Code
print("The sample size in the CVR stratum was", n1)
def cvr_audit_inputs(o1, o2, u1, u2):
return (o1, o2, u1, u2)
cvr_stats = interactive(cvr_audit_inputs,
o1 = widgets.IntSlider(min=0,max=n1,value=0),
u1 = widgets.IntSlider(min=0,max=n1,value=0),
o2 = widgets.IntSlider(min=0,max=n1,value=0),
u2 = widgets.IntSlider(min=0,max=n1,value=0))
display(cvr_stats)
(o1, o2, u1, u2) = [cvr_stats.children[i].value for i in range(4)]
###Output
_____no_output_____
###Markdown
Sample statistics for the no-CVR stratum (stratum 2)Enter the number of ballots for each candidate that were observed in the sample using the sliders below, then run the cell beneath the sliders to store the values.
###Code
print("The sample size in the no-CVR stratum was", n2)
nocvr_widgets=[]
# create the widgets
for name in candidates.keys():
nocvr_widgets.append(widgets.IntSlider(value=0,min=0,max=n2,description=name))
# group the widgets into a FlexBox
nocvr_audit_inputs = widgets.VBox(children=nocvr_widgets)
# display the widgets
display(nocvr_audit_inputs)
# no-CVR sample is stored in a dict with name, votes in the sample
observed_poll = {}
for widget in nocvr_widgets:
observed_poll[widget.description] = widget.value
assert np.sum(list(observed_poll.values())) <= n2, "Too many ballots input"
pprint.pprint(observed_poll)
###Output
{'No': 26, 'Yes': 50}
###Markdown
What's the risk for this sample?The audit looks at every (winner, loser) pair in each contest. Auditing continues until there is strong evidence that every winner in a contest got more votes than every loser in the contest. It does this by considering (winner, loser) pairs. The SUITE risk for every pair will appear beneath the cell below after it is run. The audit continues until all the numbers are not larger than the risk limit. E.g., if the risk limit is 10%, the audit stops when the numbers in the table are all less than 0.1.
###Code
# Find audit p-values across (winner, loser) pairs
audit_pvalues = audit_contest(candidates, winners, losers, stratum_sizes, \
n1, n2, o1, o2, u1, u2, observed_poll, \
risk_limit=risk_limit, gamma=gamma, stepsize=lambda_step)
pprint.pprint(audit_pvalues)
# Track contests not yet confirmed
contests_not_yet_confirmed = [i[0] for i in audit_pvalues.items() \
if i[1]>risk_limit]
print("Pairs not yet confirmed:\n", contests_not_yet_confirmed)
winners_not_yet_confirmed = list(set(list(map(lambda x: x[0], contests_not_yet_confirmed))))
losers_not_yet_confirmed = list(set(list(map(lambda x: x[1], contests_not_yet_confirmed))))
# Save everything to file, you may change the file name in quotes
write_audit_results("../log/Rochester Hills audit_results.json", \
n1, n2, sample1, sample2, \
o1, o2, u1, u2, observed_poll, \
audit_pvalues, prng.getstate())
###Output
_____no_output_____
###Markdown
Escalation guidance: how many more ballots should be drawn?This tool estimates how many more ballots should be examined to confirm any remaining contests. The enlarged sample size is based on the following:* ballots that have already been sampled* assumption that we will continue to see overstatements and understatements at the same rate that they've been observed in the sample so far* assumption that vote proportions in the ballot-polling stratum will reflect the reported proportionsGiven these additional numbers, return to the sampling tool and draw additional ballots, find them with the ballot manifest tool, update the observed sample values, and rerun the SUITE risk calculations. Additional code cells to do this are included below.
###Code
sample_sizes_new = {}
# Add a reminder note about the candidate dict structure.
for k in contests_not_yet_confirmed:
sample_sizes_new[k] = estimate_escalation_n(\
N_w1 = candidates[k[0]][0],\
N_w2 = candidates[k[0]][1],\
N_l1 = candidates[k[1]][0],\
N_l2 = candidates[k[1]][1],\
N1 = stratum_sizes[0],\
N2 = stratum_sizes[1],\
n1 = n1,\
n2 = n2,\
o1_obs = o1,\
o2_obs = o2,\
u1_obs = u1,\
u2_obs = u2,\
n2l_obs = observed_poll[k[1]],\
n2w_obs = observed_poll[k[0]],\
n_ratio = n_ratio,\
risk_limit = risk_limit,\
gamma = gamma,\
stepsize = lambda_step,
risk_limit_tol = 0.8)
sample_size_new = np.amax([v[0]+v[1] for v in sample_sizes_new.values()])
n1_new = np.amax([v[0] for v in sample_sizes_new.values()])
n2_new = np.amax([v[1] for v in sample_sizes_new.values()])
print("estimated sample sizes for each contest, written as (cvr stratum, no-cvr stratum):\n")
pprint.pprint(sample_sizes_new)
print('\n\nexpected total sample size needed to confirm remaining pairs:', sample_size_new)
print("\nDraw this many additional ballots in the CVR stratum:", n1_new - n1)
print("Draw this many additional ballots in the no-CVR stratum:", n2_new - n2)
###Output
_____no_output_____
###Markdown
Draw additional ballots
###Code
# print the current state of the PRNG after drawing the initial samples
print(prng)
# CVR stratum sample size, sampled with replacement
sample1 = prng.randint(1, stratum_sizes[0]+1, size=n1_new - n1)
# No-CVR ballots are sampled without replacement
remaining_ballots = [i for i in range(stratum_sizes[1]) if i not in sample2]
sample2 = random_sample(remaining_ballots, size=n2_new - n2, replace=False, prng=prng)
###Output
_____no_output_____
###Markdown
CVR stratum sample
###Code
print("CVR stratum sample:\n", sample1)
m = np.zeros_like(sample1, dtype=bool)
m[np.unique(sample1, return_index=True)[1]] = True
print("CVR stratum repeated ballots:\n", sample1[~m])
###Output
_____no_output_____
###Markdown
No-CVR sample
###Code
print("No-CVR stratum sample:\n", sample2)
###Output
_____no_output_____
###Markdown
Find ballots using ballot manifest
###Code
nocvr_sample = sample_from_manifest(filename="../data/Rochester Hills Ballot Manifest - combined.csv", \
sample=sample2, \
stratum_size=stratum_sizes[1])
write_ballots_to_sample("../log/Rochester-Hills-sampled-ballots.csv", nocvr_sample)
print("No CVR sample")
display(HTML(
'<table><tr>{}</tr></table>'.format(
'</tr><tr>'.join(
'<td>{}</td>'.format('</td><td>'.join(str(_) for _ in row)) for row in nocvr_sample)
)
))
###Output
_____no_output_____
###Markdown
Enter the data from the *combined* sample Sample statistics for the CVR stratum (stratum 1).Update the numbers below to include what was seen in the initial sample PLUS what was seen in the new sample.
###Code
print("The initial sample size in the CVR stratum was", n1, \
"and the new sample size was", n1_new)
print("The observed overstatements and understatements from the original sample were")
pprint.pprint({"o1" : o1, "o2" : o2, "u1" : u1, "u2" : u2})
# Number of observed...
def cvr_audit_inputs(o1, o2, u1, u2):
return (o1, o2, u1, u2)
cvr_stats = interactive(cvr_audit_inputs,
o1 = widgets.IntSlider(min=0,max=n1_new,value=0),
u1 = widgets.IntSlider(min=0,max=n1_new,value=0),
o2 = widgets.IntSlider(min=0,max=n1_new,value=0),
u2 = widgets.IntSlider(min=0,max=n1_new,value=0))
display(cvr_stats)
(o1, o2, u1, u2) = [cvr_stats.children[i].value for i in range(4)]
###Output
_____no_output_____
###Markdown
Sample statistics for the no-CVR stratum (stratum 2)Update the numbers below to include what was seen in the initial sample PLUS what was seen in the new sample.
###Code
print("The initial sample size in the no-CVR stratum was", n2, \
"and the new sample size was", n2_new)
print("The observed tallies from the original sample were")
pprint.pprint(observed_poll)
nocvr_widgets=[]
# create the widgets
for name in candidates.keys():
nocvr_widgets.append(widgets.IntSlider(value=0,min=0,max=n2_new,description=name))
# group the widgets into a FlexBox
nocvr_audit_inputs = widgets.VBox(children=nocvr_widgets)
# display the widgets
display(nocvr_audit_inputs)
# no-CVR sample is stored in a dict with name, votes in the sample
observed_poll = {}
for widget in nocvr_widgets:
observed_poll[widget.description] = widget.value
assert np.sum(list(observed_poll.values())) <= n2_new, "Too many ballots input"
pprint.pprint(observed_poll)
###Output
_____no_output_____
###Markdown
What's the risk for this sample?The audit looks at every (winner, loser) pair in each contest. Auditing continues until there is strong evidence that every winner in a contest got more votes than every loser in the contest. It does this by considering (winner, loser) pairs. The SUITE risk for every pair will appear beneath the cell below after it is run. The audit continues until all the numbers are not larger than the risk limit. E.g., if the risk limit is 10%, the audit stops when the numbers in the table are all less than 0.1.
###Code
# Find audit p-values across (winner, loser) pairs
audit_pvalues = audit_contest(candidates, winners_not_yet_confirmed, \
losers_not_yet_confirmed, stratum_sizes, \
n1_new, n2_new, o1, o2, u1, u2, observed_poll, \
risk_limit=risk_limit, gamma=gamma, stepsize=lambda_step)
pprint.pprint(audit_pvalues)
# Track contests not yet confirmed
contests_not_yet_confirmed = [i[0] for i in audit_pvalues.items() \
if i[1]>risk_limit]
print("Pairs not yet confirmed:\n", contests_not_yet_confirmed)
# Save everything to file, you may change the file name in quotes
write_audit_results("../log/Rochester hills audit_results2.json", \
n1_new, n2_new, sample1, sample2, \
o1, o2, u1, u2, observed_poll, \
audit_pvalues, prng.getstate())
###Output
_____no_output_____ |
tutorials/08_ddn_pytorch_node.ipynb | ###Markdown
Implementing a Declarative Node using the `ddn.pytorch.node` ModuleUnlike the previous tutorials, in this notebook we use the [PyTorch](https://pytorch.org/) framework to implement a declarative node. For information on how to use PyTorch, see the [official documentation](https://pytorch.org/docs/stable/index.html) and [tutorials](https://pytorch.org/tutorials/). Here we will show how to implement a declarative node using the `ddn.pytorch.node` module to explore the behavior of the node and solve simple bi-level optimization problems. Example 1: Minimize the KL-divergence over the probability simplexWe consider the problem of minimizing the KL-divergence between the input $x$ and output $y$ subject to the output forming a valid probability vector (i.e., the elements of $y$ be positive and sum to one). We will assume strictly positive $x$. The problem can be written formally as$$\begin{array}{rll}y =& \text{argmin}_u & - \sum_{i=1}^{n} x_i \log u_i \\& \text{subject to} & \sum_{i=1}^{n} u_i = 1\end{array}$$where the positivity constraint on $y$ is automatically satisfied by the domain of the log function.A nice feature of this problem is that we can solve it in closed-form as$$y = \frac{1}{\sum_{i=1}^{n} x_i} x.$$However, we will only use this for verification and pretend for now that we do not have a closed-form solution. Instead we will make use of the `scipy.optimize` module to solve the problem via an iterative method. Deriving our deep declarative node from the `LinEqConstDeclarativeNode` class, we will need to implement three functions: the `objective` function, the `solve` function, and the `linear_constraint_parameters` function (the `gradient` function is already implemented for us).
###Code
import torch
import numpy as np
import scipy.optimize as opt
import sys
sys.path.append("../")
from ddn.pytorch.node import *
import warnings
warnings.filterwarnings('ignore')
# create the example node
class MinKL(LinEqConstDeclarativeNode):
def __init__(self):
super().__init__()
def objective(self, x, y):
"""f(x, y) = -sum x*log(y)"""
return -1.0 * torch.einsum('bn,bn->b', (x, y.log()))
def linear_constraint_parameters(self, y):
"""Ay=d ==> sum(y) = 1"""
A = y.new_ones(1, y.size(-1)) # 1xm
d = y.new_ones(1) # 1
return A, d
def solve(self, x):
"""Solve the constrained optimization problem using scipy's built-in minimize function.
Here we initialize the solver at the uniform distribution.
"""
m = n = x.size(-1)
u0 = np.ones((m,)) / m
y = torch.zeros_like(x)
# Loop over batch:
for i, xi in enumerate(x):
result = opt.minimize(lambda u: -1.0 * np.dot(xi.detach().numpy(), np.log(u)),
u0,
constraints={'type': 'eq', 'fun': lambda u: np.sum(u) - 1.0},
bounds=opt.Bounds(1e-12, np.inf, keep_feasible=True),
options={'maxiter': 100000, 'ftol': 1e-12})
y[i, :] = torch.tensor(result.x)
# The solve function must always return two arguments, the solution and context (i.e., cached values needed
# for computing the gradient). In the case of linearly constrained problems we do not need the dual solution
# in computing the gradient so we return None for context.
return y, None
###Output
_____no_output_____
###Markdown
And now we test the node.
###Code
node = MinKL()
x = torch.rand(1, 5)
print("Input:\n{}".format(x.squeeze().numpy()))
print("Expected output:\n{}".format((x / x.sum(dim=-1, keepdim=True)).squeeze().numpy()))
y, _ = node.solve(x)
print("Actual output:\n{}".format(y.squeeze().numpy()))
###Output
_____no_output_____
###Markdown
We now plot the function and gradient sweeping the first component of the input $x_1$ from 0.1 to 10.0 while holding the other elements of $x$ constant.
###Code
%matplotlib notebook
import matplotlib.pyplot as plt
x1_data = torch.linspace(0.1, 10.0, 100)
x = x.detach() # Don't track computation graph
y_data = []
Dy_data = []
vjp_data = []
for x1 in x1_data:
x_new = x.clone()
x_new[0, 0] = x1
x_new.requires_grad = True
y, _ = torch.no_grad()(node.solve)(x_new) # Run node's forward pass
y.requires_grad = True
y_data.append(y.squeeze().detach().numpy())
# Note that the jacobian function call is inefficient
# and is used only for teaching and analysis purposes
Dy_data.append(node.jacobian(x_new, y=y)[0][0,:,0].detach().numpy())
vjp_data.append(node.gradient(x_new, y=y)[0][0,:].detach().numpy())
# Plot output y as x varies
fig = plt.figure()
plt.subplot(3, 1, 1)
plt.plot(x1_data, y_data)
plt.ylabel(r"$y$")
# Plot derivative dy/dx1 as x1 varies
# dy/dx = (I - y 1^T) / sum(xi)
# dy1/dx1 = (1 - y1) / sum(xi)
# dyi/dx1 = -yi / sum(xi), i > 1
plt.subplot(3, 1, 2)
plt.plot(x1_data, Dy_data)
#plt.ylabel(r"$Dy_{:,1}$")
plt.ylabel(r"$\frac{dy}{dx_1}$")
# Plot vector-Jacobian product as x1 varies
plt.subplot(3, 1, 3)
plt.plot(x1_data, vjp_data)
plt.xlabel(r"$x_1$");
plt.ylabel(r"$\mathbf{1}^\mathsf{T}Dy$")
fig.subplots_adjust(hspace=0.5)
plt.show()
###Output
_____no_output_____
###Markdown
Bi-level optimizationNow let's see whether we can use the node within a bi-level optimization problem. We will attempt to learn an input $x$ that results in an output $y$ with smallest norm-squared. Moreover, we will regularize the norm of $x$ to be close to 10. Given our understanding of KL-divergence this should learn a vector $x$ that is a constant multiple of the ones vector (i.e., all elements of $x$ should be the same). Let's see what happens.
###Code
# define the upper-level objective
def J(x, y=None):
"""Computes our upper-level objective given both x and y."""
if y is None:
y, _ = torch.no_grad()(node.solve)(x)
return ((y.norm(dim=-1)) ** 2 + (x.norm(dim=-1) - 10.0) ** 2).mean()
kl_problem = MinKL()
kl_declarative_layer = DeclarativeLayer(kl_problem)
# Solve using gradient descent:
learning_rate = 0.5
x = torch.rand(1, 5, requires_grad=True)
history = [J(x)]
for i in range(500):
y = kl_declarative_layer(x)
z = J(x, y)
z.backward()
x_new = x - learning_rate * x.grad
x = x_new.detach().requires_grad_(True)
history.append(J(x))
y, _ = torch.no_grad()(node.solve)(x)
x_np = x.detach().squeeze().numpy()
y_np = y.detach().squeeze().numpy()
print("Found x = {} with norm {:0.2f}".format(x_np, np.sqrt(np.dot(x_np, x_np))))
print("Results in y = {}".format(y_np))
fig = plt.figure()
plt.semilogy(history)
plt.ylabel("upper-level objective (log-scale)"); plt.xlabel("iteration")
plt.show()
# Solve using LBFGS:
x = torch.rand(1, 5, requires_grad=True)
history = []
optimizer = torch.optim.LBFGS([x], lr=1, max_iter=100)
def reevaluate():
optimizer.zero_grad()
y = kl_declarative_layer(x)
z = J(x, y)
z.backward()
history.append(z.clone())
return z
optimizer.step(reevaluate)
y, _ = torch.no_grad()(node.solve)(x)
x_np = x.detach().squeeze().numpy()
y_np = y.detach().squeeze().numpy()
print("Found x = {} with norm {:0.2f}".format(x_np, np.sqrt(np.dot(x_np, x_np))))
print("Results in y = {}".format(y_np))
fig = plt.figure()
plt.semilogy(history)
plt.ylabel("upper-level objective (log-scale)"); plt.xlabel("iteration")
plt.show()
###Output
_____no_output_____
###Markdown
Example 2: Minimize a robust (pseudo-Huber) distanceWe consider the problem of minimizing the distance between the input $x$ and output $y$ using the robust pseudo-Huber penalty function. The problem can be written formally as$$\begin{equation}y = \text{argmin}_u \sum_{i=1}^{n} \phi^\text{pseudo}(u - x_i; \alpha)\end{equation}$$where the pseudo-Huber penalty function is given by$$\begin{equation} \phi^{\text{pseudo}}(z; \alpha) = \alpha^2 \left( \sqrt{1 + \left(\frac{z}{\alpha}\right)^2} - 1 \right).\end{equation}$$Deriving our deep declarative node from the `AbstractDeclarativeNode` class, we will need to implement two functions: the `objective` function, and the `solve` function. However, we will also provide a `gradient` function to compare the generic gradient result with an efficient hand-coded gradient that makes use of the structure of the problem.
###Code
import torch
import numpy as np
import sys
sys.path.append("../")
from ddn.pytorch.node import *
import warnings
warnings.filterwarnings('ignore')
class GlobalPseudoHuberPool2d(AbstractDeclarativeNode):
""""""
def __init__(self):
super().__init__()
def objective(self, x, alpha, y):
alpha2 = alpha * alpha
z = y.unsqueeze(-1).unsqueeze(-1) - x
phi = alpha2 * (torch.sqrt(1.0 + torch.pow(z, 2) / alpha2) - 1.0)
return phi.sum(dim=(-2,-1)) # b
def solve(self, x, alpha):
x = x.detach()
y = x.mean([-2, -1]).clone().requires_grad_()
y = self._runOptimisation(x, alpha, y)
y = y.detach()
z = (y.unsqueeze(-1).unsqueeze(-1) - x).clone()
ctx = {'z': z}
return y, ctx
def _runOptimisation(self, x, alpha, y):
with torch.enable_grad():
opt = torch.optim.LBFGS([y],
lr=1, # Default: 1
max_iter=100, # Default: 20
max_eval=None, # Default: None
tolerance_grad=1e-05, # Default: 1e-05
tolerance_change=1e-09, # Default: 1e-09
history_size=100, # Default: 100
line_search_fn=None # Default: None, Alternative: "strong_wolfe"
)
def reevaluate():
opt.zero_grad()
f = self.objective(x, alpha, y).sum() # sum over batch elements
f.backward()
return f
opt.step(reevaluate)
return y
def gradient(self, x, alpha, y=None, v=None, ctx=None):
"""Override base class to compute the analytic gradient of the optimal solution."""
if y is None:
y, ctx = torch.no_grad()(self.solve)(x, alpha)
if v is None:
v = torch.ones_like(y)
z = ctx['z'] # b x n1 x n2
w = torch.pow(1.0 + torch.pow(z, 2) / (alpha * alpha), -1.5)
w_sum = w.sum(dim=-1, keepdim=True).sum(dim=-2, keepdim=True).expand_as(w)
Dy_at_x = torch.where(w_sum.abs() <= 1e-9, torch.zeros_like(w), w.div(w_sum)) # b x n1 x n2
return torch.einsum('b,bmn->bmn', (v, Dy_at_x)), None
###Output
_____no_output_____
###Markdown
And now we test the node.
###Code
node = GlobalPseudoHuberPool2d()
batch_size = 3
input_size = (6, 6)
x = torch.randn(batch_size, *input_size, dtype=torch.double, requires_grad=True)
alpha = torch.tensor([0.5], dtype=torch.double, requires_grad=False)
y, _ = torch.no_grad()(node.solve)(x, alpha)
print("Input:\n{}".format(x[0,...].squeeze().detach().numpy())) # First batch element only
print("Output:\n{}".format(y[0,...].squeeze().detach().numpy())) # First batch element only
###Output
_____no_output_____
###Markdown
We now plot the function and gradient sweeping the first component of the input $x_1$ from -10.0 to 10.0 while holding the other elements of $x$ constant.
###Code
%matplotlib notebook
import matplotlib.pyplot as plt
x1_data = torch.linspace(-10.0, 10.0, 110)
x = x.detach() # Don't track computation graph
y_data = []
vjp_data = []
vjp2_data = []
for x1 in x1_data:
x_new = x.clone()
x_new[:, 0, 0] = x1
x_new.requires_grad = True
y, ctx = torch.no_grad()(node.solve)(x_new, alpha)
y.requires_grad = True
y_data.append(y[0,...].squeeze().detach().numpy()) # First batch element only
vjp_data.append(super(type(node), node).gradient(x_new, alpha, y=y, ctx=ctx)[0][0,0,:].detach().numpy()) # First 6 components
vjp2_data.append(node.gradient(x_new, alpha, y=y, ctx=ctx)[0][0,0,:].detach().numpy()) # First 6 components
fig = plt.figure()
plt.subplot(3, 1, 1)
plt.plot(x1_data, y_data)
plt.ylabel(r"$y$")
plt.subplot(3, 1, 2)
plt.plot(x1_data, vjp_data)
plt.xlabel(r"$x_1$"); plt.ylabel(r"$\mathbf{1}^\mathsf{T}Dy$ (generic)")
plt.subplot(3, 1, 3)
plt.plot(x1_data, vjp2_data)
plt.xlabel(r"$x_1$"); plt.ylabel(r"$\mathbf{1}^\mathsf{T}Dy$ (analytic)")
fig.subplots_adjust(hspace=0.5)
plt.show()
###Output
_____no_output_____
###Markdown
Example 3: Minimize a PnP objective functionWe consider the problem of minimizing the weighted reprojection error between a set of corresponding 3D and 2D points $\{p_i, q_i \}_{i=1}^n$ by varying the rigid transformation parameters $y$ applied to the 3D points. Here the transformation parameters consist of an angle-axis rotation vector concatenated with a translation vector. The problem can be written formally as$$y = \text{argmin}_u \sum_{i=1}^{n} w_i \| \pi(p_i, u) - q_i \|_2^2$$where the projection $\pi(\cdot)$ is given by$$\pi(p, u) = h(K (R(u) p + t(u)))$$with intrinsic camera parameters $K$, rotation $R$, translation $t$, and map from homogeneous-to-Cartesian coordinates $h$, where $h(x) = [x_1 / x_3, x_2 / x_3]$.Deriving our deep declarative node from the `AbstractDeclarativeNode` class, we will need to implement two functions: the `objective` function, and the `solve` function. For this class, we use the `solvePnPRansac` function from the Python OpenCV library.
###Code
import torch
import numpy as np
import sys
import cv2 as cv
from math import degrees
sys.path.append("../")
from ddn.pytorch.node import *
import ddn.pytorch.geometry_utilities as geo
import warnings
warnings.filterwarnings('ignore')
class PnP(AbstractDeclarativeNode):
"""Declarative PnP layer"""
def __init__(self,
ransac_threshold=0.1,
ransac_max_iterations=1000,
):
super().__init__()
self.ransac_threshold = ransac_threshold
self.ransac_max_iterations = ransac_max_iterations
def objective(self, p, q, w, K, y):
"""Weighted reprojection error"""
p_projected = geo.project_points_by_theta(p, y, K)
squared_error = torch.sum((p_projected - q) ** 2, dim=-1)
w = torch.nn.functional.relu(w) # Enforce non-negative weights
return torch.einsum('bn,bn->b', (w, squared_error))
def solve(self, p, q, w, K=None):
p = p.detach()
q = q.detach()
w = w.detach()
K = K.detach() if K is not None else None
y = self._initialise_transformation(p, q, w, K).requires_grad_()
y = self._run_optimisation(p, q, w, K, y=y)
return y.detach(), None
def _ransac_p3p(self, p, q, K, threshold, max_iterations):
p_np = p.cpu().numpy()
q_np = q.cpu().numpy()
y = q.new_zeros(q.size(0), 6)
if K is None:
K_np = np.float32(np.array([[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]]))
for i in range(q_np.shape[0]): # loop over batch
if K is not None:
K_np = np.float32(np.array([[K[i, 0], 0.0, K[i, 2]],
[0.0, K[i, 1], K[i, 3]],
[0.0, 0.0, 1.0]]))
retval, rvec, tvec, inliers = cv.solvePnPRansac(
p_np[i, :, :], q_np[i, :, :], K_np, None,
iterationsCount=max_iterations,
reprojectionError=threshold,
flags=cv.SOLVEPNP_EPNP)
if rvec is not None and tvec is not None and retval:
rvec = torch.as_tensor(rvec, dtype=q.dtype, device=q.device).squeeze(-1)
tvec = torch.as_tensor(tvec, dtype=q.dtype, device=q.device).squeeze(-1)
if torch.isfinite(rvec).all() and torch.isfinite(tvec).all():
y[i, :3] = rvec
y[i, 3:] = tvec
return y
def _initialise_transformation(self, p, q, w, K):
return self._ransac_p3p(p, q, K, self.ransac_threshold, self.ransac_max_iterations)
def _run_optimisation(self, *xs, y):
with torch.enable_grad():
opt = torch.optim.LBFGS([y],
lr=1.0,
max_iter=1000,
max_eval=None,
tolerance_grad=1e-40,
tolerance_change=1e-40,
history_size=100,
line_search_fn="strong_wolfe"
)
def reevaluate():
opt.zero_grad()
f = self.objective(*xs, y=y).sum() # sum over batch elements
f.backward()
return f
opt.step(reevaluate)
return y
###Output
_____no_output_____
###Markdown
Now we test the node with 15 random 2D-3D point pairs, random camera parameters, significant additive Gaussian noise, and a single outlier correspondence. We should expect poor results for PnP algorithms when there are outliers, but perhaps we can learn to identify such outliers?
###Code
node = PnP()
b = 1
n = 15
# Generate camera parameters:
y_true = torch.randn(b, 6, dtype=torch.double)
R_true = geo.angle_axis_to_rotation_matrix(y_true[..., :3])
t_true = y_true[..., 3:]
# Generate image points, then assign depths:
xy = 2.0 * torch.rand(b, n, 2, dtype=torch.double) - 1.0 # [-1, 1]
z = 2.0 * torch.rand(b, n, 1, dtype=torch.double) + 1.0 # [1, 3]
p_transformed = torch.cat((z * xy, z), dim=-1)
p = torch.einsum('brs,bms->bmr', (R_true.transpose(-2,-1), p_transformed - t_true.unsqueeze(-2))) # Inverse transform
q = xy.clone()
q = q + 0.1 * torch.randn(b, n, 2, dtype=torch.double) # add noise
q[:, 0:1, :] = torch.randn(b, 1, 2, dtype=torch.double) # add outliers
# Generate weights (uniform):
w = torch.ones(b, n, dtype=torch.double) # bxn
w = w.div(w.sum(-1).unsqueeze(-1))
# Run solver:
y, _ = torch.no_grad()(node.solve)(p, q, w)
R = geo.angle_axis_to_rotation_matrix(y[..., :3])
t = y[..., 3:]
# Compute objective function value:
reproj_error = torch.no_grad()(node.objective)(p, q, w, K=None, y=y)
reproj_error_true = torch.no_grad()(node.objective)(p, q, w, K=None, y=y_true)
# Compute transformation errors:
error_rotation = (0.5 * ((R * R_true).sum(dim=(-2, -1)) - 1.0)).acos()
error_translation = (t - t_true).norm(dim=-1)
# Save original data:
p_orig = p.clone()
q_orig = q.clone()
w_orig = w.clone()
y_orig = y.clone()
print("True Output:\n{}".format(y_true[0,...].squeeze().detach().numpy())) # First batch element only
print("Est. Output:\n{}".format(y[0,...].squeeze().detach().numpy())) # First batch element only
print("True Reprojection Error: {:0.4f}".format(reproj_error_true[0,...].squeeze().detach().numpy())) # First batch element only
print("Est. Reprojection Error: {:0.4f}".format(reproj_error[0,...].squeeze().detach().numpy())) # First batch element only
print("Rotation Error: {:0.2f} degrees".format(degrees(error_rotation[0,...].squeeze().detach().numpy()))) # First batch element only
print("Translation Error: {:0.2f}".format(error_translation[0,...].squeeze().detach().numpy())) # First batch element only
###Output
_____no_output_____
###Markdown
It is clear that even a single outlier can play havoc with PnP estimation. We can visualize this by plotting the 2D points and projected 3D points, using the true and estimated transformation parameters. We link the putative 2D and 3D correspondences with a line, to make the outlier correspondence clear.
###Code
%matplotlib notebook
import matplotlib.pyplot as plt
q_np = q.numpy()
p_proj_true_np = geo.project_points_by_theta(p, y_true).numpy()
p_proj_np = geo.project_points_by_theta(p, y).numpy()
for i in range(q_np[0, :, 0].shape[0]):
plt.plot([q_np[0, :, 0], p_proj_true_np[0, :, 0]], [q_np[0, :, 1], p_proj_true_np[0, :, 1]], color='gray', linewidth=0.5)
plt.scatter(q_np[0, :, 0], q_np[0, :, 1], s=16, c='k', alpha=1.0, marker='s', label='2D points')
plt.scatter(p_proj_true_np[0, :, 0], p_proj_true_np[0, :, 1], s=16, c='r', alpha=1.0, marker='o', label='3D points (true projection)')
plt.scatter(p_proj_np[0, :, 0], p_proj_np[0, :, 1], s=16, facecolors='none', edgecolors='k', alpha=1.0, marker='o', label='3D points (est. projection)')
plt.legend(fontsize='small')
plt.show()
###Output
_____no_output_____
###Markdown
Bi-level optimizationNow let's try to learn weights $w$ that attenuate the effect of the outlier correspondences, including those that occur due to noise. Our upper-level objective function will be a weighted sum of rotation and translation errors, given that we know the true camera pose. We expect the outlier correspondence to be downweighted, as well as some of the noisier points.
###Code
# Define the upper-level objective:
def J(p, q, w, y=None):
"""Compute sum of angular and positional camera errors"""
if y is None:
y, _ = torch.no_grad()(node.solve)(p, q, w)
R = geo.angle_axis_to_rotation_matrix(y[..., :3])
t = y[..., 3:]
max_dot_product = 1.0 - 1e-7
error_rotation = (0.5 * ((R * R_true).sum(dim=(-2, -1)) - 1.0)
).clamp_(-max_dot_product, max_dot_product).acos()
error_translation = (t - t_true).norm(dim=-1)
#print("rot: {:0.2f}, trans: {:0.6f}".format(degrees(error_rotation[0,...]), error_translation[0,...]))
return (error_rotation + 0.25 * error_translation).mean(), error_rotation, error_translation
# Reset parameters:
w = w_orig.clone().detach().requires_grad_()
y = y_orig.clone()
# Form a declarative layer:
pnp_declarative_layer = DeclarativeLayer(node)
loss, error_rotation, error_translation = J(p, q, w, y)
history_loss = [loss]
history_rot = [degrees(error_rotation[0, ...])] # First batch element only
history_tran = [error_translation[0, ...]] # First batch element only
# Solve using LBFGS optimizer:
optimizer = torch.optim.LBFGS([w], lr=1, max_iter=50, line_search_fn="strong_wolfe")
def reevaluate():
optimizer.zero_grad()
y = pnp_declarative_layer(p, q, w, None)
z, error_rotation, error_translation = J(p, q, w, y)
z.backward()
history_loss.append(z.clone())
history_rot.append(degrees(error_rotation[0, ...])) # First batch element only
history_tran.append(error_translation[0, ...]) # First batch element only
return z
optimizer.step(reevaluate)
w = torch.nn.functional.relu(w) # Enforce non-negativity
y, _ = torch.no_grad()(node.solve)(p, q, w)
R = geo.angle_axis_to_rotation_matrix(y[..., :3])
t = y[..., 3:]
reproj_error = torch.no_grad()(node.objective)(p, q, w, K=None, y=y)
error_rotation = (0.5 * ((R * R_true).sum(dim=(-2, -1)) - 1.0)).acos()
error_translation = (t - t_true).norm(dim=-1)
p_np = p.detach().numpy()
q_np = q.detach().numpy()
w_np = w.detach().numpy()
y_np = y.detach().numpy()
print("Found w = {}".format(w_np[0, ...]))
print("Reprojection Error: {:0.4f}".format(reproj_error[0,...].squeeze().detach().numpy()))
print("Rotation Error: {:0.2f} degrees".format(degrees(error_rotation[0,...].squeeze().detach().numpy())))
print("Translation Error: {:0.6f}".format(error_translation[0,...].squeeze().detach().numpy()))
print("True Output: {}".format(y_true[0,...].squeeze().detach().numpy())) # First batch element only
print("Est. Output: {}".format(y[0,...].squeeze().detach().numpy())) # First batch element only
###Output
_____no_output_____
###Markdown
And now we plot the learning curves.
###Code
%matplotlib notebook
import matplotlib.pyplot as plt
fig = plt.figure()
plt.plot(history_loss)
plt.ylabel("upper-level objective"); plt.xlabel("iteration")
plt.show()
fig = plt.figure()
plt.plot(history_rot)
plt.ylabel("rotation error (degrees)"); plt.xlabel("iteration")
plt.show()
fig = plt.figure()
plt.plot(history_tran)
plt.ylabel("translation error"); plt.xlabel("iteration")
plt.show()
###Output
_____no_output_____
###Markdown
We can visualize the results by plotting the 2D points and projected 3D points. We scale the points by the estimated weight, and replace points with weight $\approx 0$ with crosses to indicate outlier correspondences.
###Code
%matplotlib notebook
import matplotlib.pyplot as plt
p_proj_true_np = geo.project_points_by_theta(p.detach(), y_true).numpy()
p_proj_np = geo.project_points_by_theta(p.detach(), y).numpy()
for i in range(q_np[0, :, 0].shape[0]):
# plt.plot([q_np[0, :, 0], p_proj_true_np[0, :, 0]], [q_np[0, :, 1], p_proj_true_np[0, :, 1]], color='gray', linewidth=0.5)
plt.plot([q_np[0, :, 0], p_proj_np[0, :, 0]], [q_np[0, :, 1], p_proj_np[0, :, 1]], color='gray', linewidth=0.5)
plt.scatter(q_np[0, :, 0], q_np[0, :, 1], s=200.*w_np[0,...], c='k', alpha=1.0, marker='s', label='2D points')
plt.scatter(p_proj_true_np[0, :, 0], p_proj_true_np[0, :, 1], s=200.*w_np[0,...], c='r', alpha=1.0, marker='o', label='3D points (true projection)')
plt.scatter(p_proj_np[0, :, 0], p_proj_np[0, :, 1], s=200.*w_np[0,...], facecolors='none', edgecolors='k', alpha=1.0, marker='o', label='3D points (est. projection)')
# Plot identified outliers separately:
plt.scatter(q_np[0, w_np[0,...] < 1e-3, 0], q_np[0, w_np[0,...] < 1e-3, 1], s=16, c='k', alpha=1.0, marker='x', label='2D points (outliers)')
plt.scatter(p_proj_true_np[0, w_np[0,...] < 1e-3, 0], p_proj_true_np[0, w_np[0,...] < 1e-3, 1], s=16, c='k', alpha=1.0, marker='x', label='3D points (outliers)')
plt.scatter(p_proj_np[0, w_np[0,...] < 1e-3, 0], p_proj_np[0, w_np[0,...] < 1e-3, 1], s=16, c='k', alpha=1.0, marker='x', label='3D points (outliers)')
plt.legend(fontsize='small')
plt.show()
###Output
_____no_output_____ |
Spacy/4_Spacy_Named_Entity_Recognition_Label.ipynb | ###Markdown
Named Entity Recognition or Detection - Classifying text into predefined categories or real world object entities- Takes a string and identifies people , places and organizations Uses- **Classifying or categorizing the content by getting the relevant tag**- **Improve search algorithms**- **For cotent recommendation**- **For info extraction**
###Code
import spacy
nlp = spacy.load('en_core_web_sm')
doc_covid = nlp(open('covid19.txt').read())
doc_covid
for token in doc_covid.ents:
print('{:<50},{:<20}'.format(token.text, token.label_))
spacy.explain('ORG')
spacy.explain('GPE')
from spacy import displacy
displacy.render(doc_covid, style = 'ent')
exp2 = nlp(u'''Google News is a news aggregator app developed by Google.
It presents a continuous, customizable flow of articles organized
from thousands of publishers and magazines. Google News is
available as an app on Android, iOS, and the Web. Google released
a beta version in September 2002 and the official app in January 2006.''')
for token in exp2.ents:
print('{:<15}: {:<20}'.format(token.text, token.label_))
displacy.render(exp2, style = 'ent')
spacy.explain('NORP')
###Output
_____no_output_____ |
Classification_3_positions.ipynb | ###Markdown
Labels
###Code
def np_labeliser(data,col):
labels = data[:,col]
return labels
labels = np_labeliser(dataset,22)
labels[:10]
###Output
_____no_output_____
###Markdown
Feature Selection
###Code
def np_featuriser(dataset, feature_list):
features = np.delete(dataset,feature_list,1)
#test = np.delete(test,feature_list,1)
#val = np.delete(val,feature_list,1)
return features
feature_list = [22]
#np.set_printoptions(precision=4)
print len(dataset[0])
#train_features_nb, test_features_nb, val_features_nb = np_featuriser(train_set_nb, test_set_nb, val_set_nb, feature_list)
features = np_featuriser(dataset, feature_list)
print len(features[0])
def vt_fsel(feature_set):
sel = VarianceThreshold(threshold=(.8 * (1 - .8)))
sel.fit_transform(feature_set)
vt_list = sel.get_support()
l_vt = []
j = 0
for i in vt_list:
if i == False:
l_vt.append(j)
print "%s. feature name: %s" %(j, columns.keys()[columns.values().index(j)])
j = j+1
return l_vt
list_vt = vt_fsel(features)
features_vt = np_featuriser(features, list_vt)
features_vt.shape
def sup_features(usp_list,x):
remove = []
j = 0
for i in usp_list:
if i == False:
remove.append(j)
if x=="vt":
print "%s. feature name: %s" %(j, columns.keys()[columns.values().index(j)])
elif x == "uni":
print "%s. feature name: %s" %(j, columns.keys()[columns.values().index(j)])
j = j+1
return remove
#sup_features(support, "uni")
def feature_selection(clf, features, labels, domain):
none = features
#print none[0]
domain = np_featuriser(features, domain)
#print domain[0]
clf = Pipeline([('feature_selection',SelectPercentile(f_classif, percentile=20)),
('classification', clf)])
clf.fit(features, labels)
print "\nUnivariate - valuable features \n"
uni = sup_features(clf.named_steps['feature_selection'].get_support(),"uni")
uni = np_featuriser(features, uni)
#print uni[0]
clf = Pipeline([('feature_selection',VarianceThreshold(threshold=(.8 * (1 - .8)))),
('classification', clf)])
clf.fit(features, labels)
print "\nVariance Threshold - removed \n"
v_th = sup_features(clf.named_steps['feature_selection'].get_support(), "vt")
#print v_th[0]
v_th = np_featuriser(features, v_th)
return none, domain, uni, v_th
#clf = GaussianNB()
#svm = SVC()
#svm.set_params(kernel='linear')
#clf = Pipeline([('feature_selection',VarianceThreshold(threshold=(.8 * (1 - .8)))),
# ('classification', svm)])
#clf.fit(features, labels)
#support = clf.named_steps['feature_selection'].get_support()
#print support
#p = clf.predict(features)
#acc = metrics.accuracy_score(labels,p)
#conf = metrics.confusion_matrix(labels, p)
#print acc
#print conf
domain = [columns["GP"],columns["GS"],columns["MIN"],columns["FG%"],
columns["3P%"],columns["FT%"],columns["PTS"],columns["YR"],columns['3PM'],columns['FTM'],columns['FGM']]
#none, domain, uni, vth = feature_selection(clf, features, labels, domain)
#print none.shape,domain.shape,uni.shape,vth.shape
def cross_val(clf, f, l, name):
print "\nFeature selection: %s" %name
scores = cross_validation.cross_val_score(clf, f, l, cv=10)
print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
def clf_all(clf, features, labels, domain):
none, domain, uni, vth = feature_selection(clf, features, labels, domain)
cross_val(clf, none, labels, "None")
print "Number of features left: %s" %none.shape[1]
cross_val(clf, domain, labels, "Domain")
print "Number of features left: %s" %domain.shape[1]
cross_val(clf, uni, labels, "Univariate")
print "Number of features left: %s" %uni.shape[1]
cross_val(clf, vth, labels, "Variance Threshold")
print "Number of features left: %s" %vth.shape[1]
###Output
_____no_output_____
###Markdown
ALL Results
###Code
#def print_metrics(name, accuracy, conf_matrix):
# print "Feature selection: %s\n" %name
# print "Accuracy score: %s\n" %accuracy
# print "Confusion matrix:"
# print "\n%s" %conf_matrix
# print"\n"
#def clf(clf, tr, tr_labels, val, val_labels):
# clf.fit(tr, tr_labels)
#
# pred = clf.predict(val)
#
# acc = metrics.accuracy_score(val_labels,pred)
# conf = metrics.confusion_matrix(val_labels, pred)
# return acc, conf
#def clf_all(CLF, l_none, l_domain, l_uni, l_vt, train_all, test_all, val_all):
# tr_none, ts_none, val_none = np_featuriser(train_all, test_all, val_all, l_none)
# print tr_none.shape
# tr_domain, ts_domain, val_domain = np_featuriser(train_all, test_all, val_all, l_domain)
#
# tr_uni, ts_uni, val_uni = np_featuriser(train_all, test_all, val_all, l_uni)
#
# tr_vt, ts_vt, val_vt = np_featuriser(train_all, test_all, val_all, l_vt)
#clfnb = GaussianNB()
#print "Naive Bayes\n"
# acc_none, conf_none = clf(CLF, tr_none, train_labels, val_none, test_labels)
# print_metrics("None", acc_none, conf_none)
# acc_domain, conf_domain = clf(CLF, tr_domain, train_labels, val_domain, val_labels)
# print_metrics("Domain knowledge", acc_domain, conf_domain)
# acc_uni, conf_uni = clf(CLF, tr_uni, train_labels, val_uni, val_labels)
# print_metrics("Univariate", acc_uni, conf_uni)
# acc_vt, conf_vt = clf(CLF, tr_vt, train_labels, val_vt, val_labels)
# print_metrics("Variance Threshold", acc_vt, conf_vt)
###Output
_____no_output_____
###Markdown
Naive Bayes
###Code
clf_all(GaussianNB(), features, labels, domain)
###Output
Univariate - valuable features
12. feature name: OFF
15. feature name: AST
17. feature name: BLK
22. feature name: W
23. feature name: H
Variance Threshold - removed
5. feature name: FG%
8. feature name: 3P%
11. feature name: FT%
Feature selection: None
Accuracy: 0.81 (+/- 0.08)
Number of features left: 24
Feature selection: Domain
Accuracy: 0.87 (+/- 0.07)
Number of features left: 13
Feature selection: Univariate
Accuracy: 0.89 (+/- 0.04)
Number of features left: 5
Feature selection: Variance Threshold
Accuracy: 0.82 (+/- 0.07)
Number of features left: 21
###Markdown
SVM
###Code
svm = SVC()
svm = svm.set_params(kernel='linear')
clf_all(svm, features, labels, domain)
###Output
Univariate - valuable features
12. feature name: OFF
15. feature name: AST
17. feature name: BLK
22. feature name: W
23. feature name: H
Variance Threshold - removed
5. feature name: FG%
8. feature name: 3P%
11. feature name: FT%
Feature selection: None
Accuracy: 0.90 (+/- 0.04)
Number of features left: 24
Feature selection: Domain
Accuracy: 0.91 (+/- 0.05)
Number of features left: 13
Feature selection: Univariate
Accuracy: 0.91 (+/- 0.04)
Number of features left: 5
Feature selection: Variance Threshold
Accuracy: 0.90 (+/- 0.04)
Number of features left: 21
###Markdown
Logistic Regression
###Code
logreg = linear_model.LogisticRegression(C=1e5)
clf_all(logreg, features, labels, domain)
###Output
Univariate - valuable features
12. feature name: OFF
15. feature name: AST
17. feature name: BLK
22. feature name: W
23. feature name: H
Variance Threshold - removed
5. feature name: FG%
8. feature name: 3P%
11. feature name: FT%
Feature selection: None
Accuracy: 0.89 (+/- 0.06)
Number of features left: 24
Feature selection: Domain
Accuracy: 0.90 (+/- 0.06)
Number of features left: 13
Feature selection: Univariate
Accuracy: 0.89 (+/- 0.06)
Number of features left: 5
Feature selection: Variance Threshold
Accuracy: 0.90 (+/- 0.05)
Number of features left: 21
|
GAN_DDP.ipynb | ###Markdown
###Code
import os
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
import torchvision.models as models
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import copy
%matplotlib inline
manual_seed = 999
print("Random Seed: ", manual_seed)
random.seed(manual_seed)
torch.manual_seed(manual_seed)
# location of folder
dataroot = "drive/My Drive/new_images"
# number of workers
workers = 4
# batch size for training
batch_size = 30
# image size for input
image_size = 128
# number of channel (1 for BW, 3 for RGB)
nc = 3
# Size of z latent vector (i.e. size of generator input)
nz = 16
# Size of feature maps in generator
ngf = 128
# Size of feature maps in discriminator
ndf = 128
# Number of training epochs
num_epochs = 50
# Learning rate for optimizers
lr = 0.0005
# Beta values hyperparam for Adam optimizers
beta1 = 0.5
beta2 = 0.99
# Number of GPUs available, currently running on CPU
ngpu = 1
print(torch.cuda.get_device_name(torch.cuda.current_device()))
# setup dataset and data loader
dataset = dset.ImageFolder(root=dataroot, transform=transforms.Compose([transforms.Resize(image_size),
transforms.CenterCrop(image_size),
transforms.Grayscale(num_output_channels=3),
transforms.ToTensor(),
transforms.Normalize((0.5,),(0.5,))]))
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True, num_workers=workers)
device = torch.device("cuda:0" if(torch.cuda.is_available() and ngpu>0) else "cpu")
# define weights for layer and normalisation
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv')!=-1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm')!=-1:
nn.init.normal_(m.weight, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
class Generator(nn.Module):
def __init__(self, ngpu):
super(Generator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input z into convolution
nn.ConvTranspose2d(nz, ngf*8, 8, 2, 0, bias=False),
nn.BatchNorm2d(ngf*8),
nn.ReLU(True),
# state ngf * 8 * 8
nn.ConvTranspose2d(ngf*8, ngf*4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf*4),
nn.ReLU(True),
# state ngf/2 * 16*16
nn.ConvTranspose2d(ngf*4, ngf*2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf*2),
nn.ReLU(True),
# state ngf/4 * 32*32
nn.ConvTranspose2d(ngf*2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state ngf/8 *64*64
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
#state 1 *128*128
)
def forward(self, input):
return self.main(input)
# Create the generator
netG = Generator(ngpu).to(device)
# Handle multi-gpu if desired
if (device.type == 'cuda') and (ngpu > 1):
netG = nn.DataParallel(netG, list(range(ngpu)))
# Apply the weights_init function to randomly initialize all weights
# to mean=0, stdev=0.2.
netG.apply(weights_init)
# Print the model
print(netG)
class Discriminator(nn.Module):
def __init__(self, ngpu):
super(Discriminator, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is (nc) *128*128
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# input is ndf/8 *64*64
nn.Conv2d(ndf, ndf*2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf*2),
nn.LeakyReLU(0.2, inplace=True),
#input is ndf/4 *32*32
nn.Conv2d(ndf*2, ndf*4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf*4),
nn.LeakyReLU(0.2, inplace=True),
#input is ndf/2 *16*16
nn.Conv2d(ndf*4, ndf*8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf*8),
nn.LeakyReLU(0.2, inplace=True),
#input is ndf *8*8
nn.Conv2d(ndf*8, 1, 8, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
return self.main(input)
# Create the Discriminator
netD = Discriminator(ngpu).to(device)
# Handle multi-gpu if desired
if (device.type == 'cuda') and (ngpu > 1):
netD = nn.DataParallel(netD, list(range(ngpu)))
# Apply the weights_init function to randomly initialize all weights
# to mean=0, stdev=0.2.
netD.apply(weights_init)
# Print the model
print(netD)
# Initialize BCELoss function
criterion = nn.BCELoss()
# Create batch of latent vectors that we will use to visualize
# the progression of the generator
fixed_noise = torch.randn(128, nz, 1, 1, device=device)
# Establish convention for real and fake labels during training
real_label = 1.
fake_label = 0.
# Setup Adam optimizers for both G and D
optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, beta2))
optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, beta2))
# Training Loop
# Lists to keep track of progress
img_list = []
G_losses = []
D_losses = []
iters = 0
alpha = 10
print("Starting Training Loop...")
# For each epoch
for epoch in tqdm(range(num_epochs)):
# For each batch in the dataloader
for i, data in enumerate(dataloader):
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
## Train with all-real batch
netD.zero_grad()
# Format batch
real_cpu = data[0].to(device)
b_size = real_cpu.size(0)
label = torch.full((b_size,), real_label, dtype=torch.float, device=device)
# Forward pass real batch through D
output = netD(real_cpu).view(-1)
# Calculate loss on all-real batch
errD_real = criterion(output, label)
# Calculate gradients for D in backward pass
errD_real.backward()
D_x = output.mean().item()
## Train with all-fake batch
# Generate batch of latent vectors
noise = torch.randn(b_size, nz, 1, 1, device=device)
# Generate fake image batch with G
fake = netG(noise)
label.fill_(fake_label)
# Classify all fake batch with D
output = netD(fake.detach()).view(-1)
# Calculate D's loss on the all-fake batch
errD_fake = criterion(output, label)
# Calculate the gradients for this batch
errD_fake.backward()
D_G_z1 = output.mean().item()
# Add the gradients from the all-real and all-fake batches
errD = errD_real + errD_fake
# Update D
optimizerD.step()
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
netG.zero_grad()
label.fill_(real_label) # fake labels are real for generator cost
# Since we just updated D, perform another forward pass of all-fake batch through D
output = netD(fake).view(-1)
# Calculate G's loss based on this output
errG = criterion(output, label)
# Calculate gradients for G
errG.backward()
D_G_z2 = output.mean().item()
# Update G
optimizerG.step()
# Output training stats
if i % 60 == 0:
print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
% (epoch, num_epochs, i, len(dataloader),
errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))
# Save Losses for plotting later
G_losses.append(errG.item())
D_losses.append(errD.item())
# Check how the generator is doing by saving G's output on fixed_noise
if (iters % 500 == 0) or ((epoch == num_epochs-1) and (i == len(dataloader)-1)):
with torch.no_grad():
fake = netG(fixed_noise).detach().cpu()
img_list.append(vutils.make_grid(fake, padding=2, normalize=True))
plt.figure(figsize=(10,5))
plt.title("Generator and Discriminator Loss During Training")
plt.plot(G_losses,label="G")
plt.plot(D_losses,label="D")
plt.xlabel("iterations")
plt.ylabel("Loss")
plt.legend()
plt.show()
# Grab a batch of real images from the dataloader
real_batch = next(iter(dataloader))
# Plot the real images
plt.figure(figsize=(15,15))
plt.subplot(1,2,1)
plt.axis("off")
plt.title("Real Images")
plt.imshow(np.transpose(vutils.make_grid(real_batch[0].to(device)[:64], padding=5, normalize=True).cpu(),(1,2,0)))
# Plot the fake images from the last epoch
plt.subplot(1,2,2)
plt.axis("off")
plt.title("Fake Images")
plt.imshow(np.transpose(img_list[-1],(1,2,0)))
plt.show()
###Output
_____no_output_____ |
solenoids/solenoid.ipynb | ###Markdown
On-Axis Field of a Finite Solenoid *This formula uses the formula for the field due to a [thin shell solenoid](../solenoids/thin_solenoid.html), integrated over a range of radii to obtain the magnetic field at any point on the axis of a finite.*  General Case $B = \frac {\mu_o i n}{2 (r_2 - r_1)} \left [ x_2 \ln \left ( \frac {\sqrt{r_2^2 + x_2^2} + r_2}{\sqrt{r_1^2 + x_2^2} + r_1} \right ) - x_1 \ln \left ( \frac {\sqrt{r_2^2 + x_1^2} + r_2}{\sqrt{r_1^2 + x_1^2} + r_1} \right ) \right ]$**B** is the magnetic field, in teslas, at any point on the axis of the solenoid. The direction of the field is parallel to the solenoid axis.$\mathbf \mu_o$ is the permeability constant (1.26x10-6 Hm-1)**i** is the current in the wire, in amperes.**n** is the number of turns of wire *per unit length* in the solenoid.**r1** is the inner radius of the solenoid, in meters.**r1** is the inner radius of the solenoid, in meters.**r2** is the outer radius of the solenoid, in meters.**x1** and **x2** are the distances, on axis, from the ends of the solenoid to the magnetic field measurement point, in meters. The "G Factor" The field can be expressed in a form that separates the unit system, power and winding configuration from the unitless geometry of the coil. This introduces the "G Factor":$B = \mu_o G \sqrt \frac {P \lambda} {r_1 \rho}$where **G** is the unitless geometry factor defined as:$G = \sqrt{\frac {1}{8 \pi \beta (\alpha^2 - 1)}} \left [ (\gamma + \beta) \ln \left ( \frac {\alpha + \sqrt{\alpha^2 + (\gamma + \beta)^2}}{1 + \sqrt{1 + (\gamma + \beta)^2}} \right ) - (\gamma - \beta) \ln \left ( \frac {\alpha + \sqrt{\alpha^2 + (\gamma - \beta)^2}}{1 + \sqrt{1 + (\gamma - \beta)^2}} \right ) \right ]$where,$\alpha = \frac {r_2}{r_1}$, $\beta = \frac l {2 r_1}$ and $\gamma = \frac {x_1 + x_2}{2 r_1}$**P** is the total power consumed by the coil, in watts.**$\lambda$** is equal to the total conductor cross section area divided by the total coil cross section area, which ranges from 0.6 to 0.8 in typical coils.**$\rho$** is the conductor resistivity, in units of ohms-length. The length units must match those of **r1**. Special Case: *x1* = -*x2* When the magnetic field measurement point is at the center of the solenoid:$B = \frac {\mu_o i N}{2(r_2 - r_1)} \ln \left ( \frac {\sqrt{r_2^2 + (\frac l 2)^2} + r_2}{\sqrt{r_1^2 + (\frac l 2)^2} + r_1} \right )$or...$B = \frac {\mu_o j l}{2} \ln \left ( \frac {\sqrt{r_2^2 + (\frac l 2)^2} + r_2}{\sqrt{r_1^2 + (\frac l 2)^2} + r_1} \right )$**j** is the bulk current density in the coil cross section, in amperes per square meter.**l** is the length of the solenoid, in meters.**N** is the total number of turns of wire in the coil.The unitless geometry factor G is simply:$G = \sqrt \frac {\beta} {2 \pi (\alpha^2 - 1)} \ln \left ( \frac {\alpha + \sqrt{\alpha^2 + \beta^2}} {1 + \sqrt{1 + \beta^2}} \right )$Note that **G** is maximum when $\alpha=3$ and $\beta=2$. A coil built with a given inner diameter and input power will deliver the highest central field strength when these conditions are met. Code ExampleThe following Python code shows how to use these formulas to calculate magnetic fields.
###Code
%matplotlib inline
from scipy.special import ellipk, ellipe, ellipkm1
from numpy import pi, sqrt, linspace, log
from pylab import plot, xlabel, ylabel, suptitle, legend, show
uo = 4E-7*pi # Permeability constant - units of H/m
# Compute G Factor from unitless parameters
def GFactorUnitless(a, b, g=0.0): # alpha, beta - omit gamma for central
gpb2 = (g+b)*(g+b)
gmb2 = (g-b)*(g-b)
if not g == 0.0:
sq = sqrt(1/(8*pi*b*(a*a-1)))
t1 = (g+b)*log((a+sqrt(a*a+gpb2))/(1+sqrt(1+gpb2)))
t2 = (g-b)*log((a+sqrt(a*a+gmb2))/(1+sqrt(1+gmb2)))
B = sq*(t1-t2)
else:
sq = sqrt(b/2/pi/(a*a-1))
B = sq*log((a+sqrt(a*a+b*b))/(1+sqrt(1+b*b)))
return B
# Compute G Factor from all dimensions
def GFactor(r1, r2, l, x1=0.0, x2=0.0): # omit x1, x2 to compute central field
a = r2/r1
b = l/2/r1
g = (x1+x2)/2/r1
return GFactorUnitless(a, b, g)
# Compute B field on axis from unitless dimensions
def BFieldUnitless(power, packing, resistivity, r1, a, b, g=0.0):
return uo*GFactorUnitless(a, b, g)*sqrt(power*packing/r1/resistivity)
# Compute B field on axis from actual dimensions (x is measurement point - center if none)
def BField(power, packing, resistivity, r1, r2, length, x=0.0):
a = r2/r1
b = length/2/r1
g = x/r1
return BFieldUnitless(power, packing, resistivity, r1, a, b, g)
###Output
_____no_output_____
###Markdown
Now let's apply the `B` function to a typical coil. We'll assume copper (at resistivity of 1.68x10-8 ohm-m) conductors at a packing density of 0.75, inner radius of 1.25 cm, power of 100 W and with supposedly optimal $\alpha$ and $\beta$ of 3 and 2, respectively:
###Code
resistivity = 1.68E-8 # ohm-meter
r1 = 0.0125 # meter
packing = 0.75
power = 100.0 # watts
B = BFieldUnitless(power, packing, resistivity, r1, 3, 2)
print("B Field: {:.3} T".format(B))
###Output
B Field: 0.107 T
###Markdown
Now try any combination of factors (assuming packing of 0.75 and standard copper conductors) to compute the field:
###Code
from ipywidgets import interactive
from IPython.display import display
def B(power, r1, r2, length, x):
return "{:.3} T".format(BField(power, 0.75, resistivity, r1, r2, length, x))
v = interactive(B,
power=(0.0, 200.0, 1),
r1 = (0.01, 0.1, 0.001),
r2 = (0.02, 0.5, 0.001),
length = (0.01, 2, 0.01),
x = (0.0, 4, 0.01))
display(v)
###Output
_____no_output_____
###Markdown
For a given inner radius, power and winding configuration, the field strength is directly proportional to G. Therefore, we can test the assertion that G is maximum when $\alpha$ is 3 and $\beta$ is 2 by constructing a map of G as a function of $\alpha$ and $\beta$:
###Code
from pylab import pcolor, colorbar, meshgrid, contour
from numpy import arange
a = arange(1.1, 6.0, 0.1)
b = arange(0.1, 4.0, 0.1)
A, B = meshgrid(a,b)
G = GFactorUnitless(A, B)
contour(A, B, G, 30)
colorbar()
xlabel("Unitless parameter, Alpha")
ylabel("Unitless parameter, Beta")
suptitle("Electromagnet 'G Factor'")
show()
print("G Factor at A=3, B=2: {:.3}".format(GFactorUnitless(3,2)))
print("G Factor at A=3, B=1.9: {:.3}".format(GFactorUnitless(3,1.9)))
###Output
_____no_output_____
###Markdown
Although it is apparent that the maximum G Factor occurs *near* the $\alpha=3$, $\beta=2$ point, it is not exactly so:
###Code
from scipy.optimize import minimize
def GMin(AB):
return -GFactorUnitless(AB[0], AB[1])
res = minimize(GMin, [3, 2])
print("G Factor is maximum at Alpha = {:.4}, Beta = {:.4}".format(*res.x))
###Output
G Factor is maximum at Alpha = 3.096, Beta = 1.862
|
scikitlearn/Ex_Files_ML_SciKit_Learn/Exercise Files/02_04_Train_Test_Split.ipynb | ###Markdown
A goal of supervised learning is to build a model that performs well on new data. If you have new data, you could see how your model performs on it. The problem is that you may not have new data, but you can simulate this experience with a train test split. In this video, I'll show you how train test split works in Scikit-Learn. What is `train_test_split` 1. Split the dataset into two pieces: a **training set** and a **testing set**. Typically, about 75% of the data goes to your training set and 25% goes to your test set. 2. Train the model on the **training set**.3. Test the model on the **testing set** and evaluate the performance Import Libraries
###Code
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
###Output
_____no_output_____
###Markdown
Load the DatasetThe boston house-price dataset is one of datasets scikit-learn comes with that do not require the downloading of any file from some external website. The code below loads the boston dataset.
###Code
data = load_boston()
df = pd.DataFrame(data.data, columns=data.feature_names)
df['target'] = data.target
df.head()
X = df.loc[:, ['RM', 'LSTAT', 'PTRATIO']].values
y = df.loc[:, 'target'].values
###Output
_____no_output_____
###Markdown
Train Test Split The colors in the image indicate which variable (X_train, X_test, y_train, y_test) the data from the dataframe df went to for a particular train test split (not necessarily the exact split of the code below).
###Code
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=3)
###Output
_____no_output_____
###Markdown
Linear Regression Model
###Code
# Make a linear regression instance
reg = LinearRegression(fit_intercept=True)
# Train the model on the training set.
reg.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Measuring Model PerformanceBy measuring model performance on the test set, you can estimate how well your model is likely to perform on new data (out-of-sample data)
###Code
# Test the model on the testing set and evaluate the performance
score = reg.score(X_test, y_test)
print(score)
###Output
_____no_output_____ |
_site/markdown_generator/PubsFromBib.ipynb | ###Markdown
Publications markdown generator for academicpagesTakes a set of bibtex of publications and converts them for use with [academicpages.github.io](academicpages.github.io). This is an interactive Jupyter notebook ([see more info here](http://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/what_is_jupyter.html)). The core python code is also in `pubsFromBibs.py`. Run either from the `markdown_generator` folder after replacing updating the publist dictionary with:* bib file names* specific venue keys based on your bib file preferences* any specific pre-text for specific files* Collection Name (future feature)TODO: Make this work with other databases of citations, TODO: Merge this with the existing TSV parsing solution
###Code
from pybtex.database.input import bibtex
import pybtex.database.input.bibtex
from time import strptime
import string
import html
import os
import re
#todo: incorporate different collection types rather than a catch all publications, requires other changes to template
publist = {
"proceeding": {
"file" : "proceedings.bib",
"venuekey": "booktitle",
"venue-pretext": "In the proceedings of ",
"collection" : {"name":"publications",
"permalink":"/publication/"}
},
"journal":{
"file": "publications.bib",
"venuekey" : "journal",
"venue-pretext" : "",
"collection" : {"name":"publications",
"permalink":"/publication/"}
}
}
html_escape_table = {
"&": "&",
'"': """,
"'": "'"
}
def html_escape(text):
"""Produce entities within text."""
return "".join(html_escape_table.get(c,c) for c in text)
for pubsource in publist:
parser = bibtex.Parser()
bibdata = parser.parse_file(publist[pubsource]["file"])
#loop through the individual references in a given bibtex file
for bib_id in bibdata.entries:
#reset default date
pub_year = "1900"
pub_month = "01"
pub_day = "01"
b = bibdata.entries[bib_id].fields
try:
pub_year = f'{b["year"]}'
#todo: this hack for month and day needs some cleanup
if "month" in b.keys():
if(len(b["month"])<3):
pub_month = "0"+b["month"]
pub_month = pub_month[-2:]
elif(b["month"] not in range(12)):
tmnth = strptime(b["month"][:3],'%b').tm_mon
pub_month = "{:02d}".format(tmnth)
else:
pub_month = str(b["month"])
if "day" in b.keys():
pub_day = str(b["day"])
pub_date = pub_year+"-"+pub_month+"-"+pub_day
#strip out {} as needed (some bibtex entries that maintain formatting)
clean_title = b["title"].replace("{", "").replace("}","").replace("\\","").replace(" ","-")
url_slug = re.sub("\\[.*\\]|[^a-zA-Z0-9_-]", "", clean_title)
url_slug = url_slug.replace("--","-")
md_filename = (str(pub_date) + "-" + url_slug + ".md").replace("--","-")
html_filename = (str(pub_date) + "-" + url_slug).replace("--","-")
#Build Citation from text
citation = ""
#citation authors - todo - add highlighting for primary author?
for author in bibdata.entries[bib_id].persons["author"]:
citation = citation+" "+author.first_names[0]+" "+author.last_names[0]+", "
#citation title
citation = citation + "\"" + html_escape(b["title"].replace("{", "").replace("}","").replace("\\","")) + ".\""
#add venue logic depending on citation type
venue = publist[pubsource]["venue-pretext"]+b[publist[pubsource]["venuekey"]].replace("{", "").replace("}","").replace("\\","")
citation = citation + " " + html_escape(venue)
citation = citation + ", " + pub_year + "."
## YAML variables
md = "---\ntitle: \"" + html_escape(b["title"].replace("{", "").replace("}","").replace("\\","")) + '"\n'
md += """collection: """ + publist[pubsource]["collection"]["name"]
md += """\npermalink: """ + publist[pubsource]["collection"]["permalink"] + html_filename
note = False
if "note" in b.keys():
if len(str(b["note"])) > 5:
md += "\nexcerpt: '" + html_escape(b["note"]) + "'"
note = True
md += "\ndate: " + str(pub_date)
md += "\nvenue: '" + html_escape(venue) + "'"
url = False
if "url" in b.keys():
if len(str(b["url"])) > 5:
md += "\npaperurl: '" + b["url"] + "'"
url = True
md += "\ncitation: '" + html_escape(citation) + "'"
md += "\n---"
## Markdown description for individual page
if note:
md += "\n" + html_escape(b["note"]) + "\n"
if url:
md += "\n[Access paper here](" + b["url"] + "){:target=\"_blank\"}\n"
else:
md += "\nUse [Google Scholar](https://scholar.google.com/scholar?q="+html.escape(clean_title.replace("-","+"))+"){:target=\"_blank\"} for full citation"
md_filename = os.path.basename(md_filename)
with open("../_publications/" + md_filename, 'w') as f:
f.write(md)
print(f'SUCESSFULLY PARSED {bib_id}: \"', b["title"][:60],"..."*(len(b['title'])>60),"\"")
# field may not exist for a reference
except KeyError as e:
print(f'WARNING Missing Expected Field {e} from entry {bib_id}: \"', b["title"][:30],"..."*(len(b['title'])>30),"\"")
continue
###Output
_____no_output_____ |
logs_demo.ipynb | ###Markdown
- (1) 2109 clusters- (2) 1054 clusters- (4) 527 clusters- (8) 263 clusters- (16) 131 clusters Justin Bieber, Taylor Swift, Ariana Grande
###Code
# 1 artist per cluster
display_recs(['1uNFoZAHBGtllmzznpCI3s', '06HL4z0CvFAxyc27GXpf02', '66CXWjxzNUsdJxJ2JdwvnR'], 1)
# 2 artists per cluster
display_recs(['1uNFoZAHBGtllmzznpCI3s', '06HL4z0CvFAxyc27GXpf02', '66CXWjxzNUsdJxJ2JdwvnR'], 2)
# 4 artists per cluster
display_recs(['1uNFoZAHBGtllmzznpCI3s', '06HL4z0CvFAxyc27GXpf02', '66CXWjxzNUsdJxJ2JdwvnR'], 4)
# 8 artists per cluster
display_recs(['1uNFoZAHBGtllmzznpCI3s', '06HL4z0CvFAxyc27GXpf02', '66CXWjxzNUsdJxJ2JdwvnR'], 8)
# 16 artists per cluster
display_recs(['1uNFoZAHBGtllmzznpCI3s', '06HL4z0CvFAxyc27GXpf02', '66CXWjxzNUsdJxJ2JdwvnR'], 16)
###Output
230 clusters
Justin Bieber
###Markdown
Andrew's Favorite Artists AJR, Quinn XCII, Twenty One Pilots, Billie Eilish, Maroon 5
###Code
# 1 artist per cluster
display_recs(['6s22t5Y3prQHyaHWUN1R1C', '3ApUX1o6oSz321MMECyIYd', '3YQKmKGau1PzlVlkL1iodx',
'6qqNVTkY8uBg9cP3Jd7DAH', '04gDigrS5kc9YWfZHwBETP'], 1)
# 2 artists per cluster
display_recs(['6s22t5Y3prQHyaHWUN1R1C', '3ApUX1o6oSz321MMECyIYd', '3YQKmKGau1PzlVlkL1iodx',
'6qqNVTkY8uBg9cP3Jd7DAH', '04gDigrS5kc9YWfZHwBETP'], 2)
# 4 artist per cluster
display_recs(['6s22t5Y3prQHyaHWUN1R1C', '3ApUX1o6oSz321MMECyIYd', '3YQKmKGau1PzlVlkL1iodx',
'6qqNVTkY8uBg9cP3Jd7DAH', '04gDigrS5kc9YWfZHwBETP'], 4)
# 8 artist per cluster
display_recs(['6s22t5Y3prQHyaHWUN1R1C', '3ApUX1o6oSz321MMECyIYd', '3YQKmKGau1PzlVlkL1iodx',
'6qqNVTkY8uBg9cP3Jd7DAH', '04gDigrS5kc9YWfZHwBETP'], 8)
# 16 artist per cluster
display_recs(['6s22t5Y3prQHyaHWUN1R1C', '3ApUX1o6oSz321MMECyIYd', '3YQKmKGau1PzlVlkL1iodx',
'6qqNVTkY8uBg9cP3Jd7DAH', '04gDigrS5kc9YWfZHwBETP'], 16)
display_recs(['3TVXtAsR1Inumwj472S9r4', '7dGJo4pcD2V6oG8kP0tJRR'], 16)
###Output
230 clusters
Drake
|
write_to_vtk/write_delaunay_mesh_to_vtk.ipynb | ###Markdown
Write all idl quantities to files
###Code
bx_all_planes = wcf.save_idl_quantity_to_unstructured_grids('bx', 'B_x', now,
x_min=-0.032, x_max=0.028,
y_min=-0.022, y_max=0.032,
z_min=0.249, z_max=0.416)
by_all_planes = wcf.save_idl_quantity_to_unstructured_grids('by', 'B_y', now,
x_min=-0.032, x_max=0.028,
y_min=-0.022, y_max=0.032,
z_min=0.249, z_max=0.416)
bz_all_planes = wcf.save_idl_quantity_to_unstructured_grids('bz', 'B_z', now,
x_min=-0.032, x_max=0.028,
y_min=-0.022, y_max=0.032,
z_min=0.249, z_max=0.416)
te_all_planes = wcf.save_idl_quantity_to_unstructured_grids('te', 'T_e', now,
x_min=-0.026, x_max=0.028,
y_min=-0.03, y_max=0.028,
z_min=0.249, z_max=0.416,
bounds=(1e-3, 1e3))
n_all_planes = wcf.save_idl_quantity_to_unstructured_grids('n', 'n', now,
x_min=-0.026, x_max=0.028,
y_min=-0.03, y_max=0.028,
z_min=0.249, z_max=0.416,
bounds=(1e3, 1e22))
n_three_planes = wcf.remove_plane(0.302, n_all_planes)
###Output
_____no_output_____
###Markdown
Normalize Temperature by plane
###Code
(x_min, x_max,
y_min, y_max,
z_min, z_max) = wcf.joint_mach_bdot_tp_extent()
spatial_increment = 0.001
mesh = np.meshgrid(np.linspace(x_min, x_max, np.ceil((x_max-x_min)/spatial_increment)),
np.linspace(y_min, y_max, np.ceil((y_max-y_min)/spatial_increment)),
np.linspace(z_min, z_max, np.ceil((z_max-z_min)/spatial_increment)))
mesh_wo_edges = wcf.remove_edges_mesh([np.array(mesh[0]),
np.array(mesh[1]),
np.array(mesh[2])])
ones = np.ones(mesh_wo_edges[0].shape)
time_point = 0
te_interpolator = te_interpolators[time_point]
n_interpolator = n_interpolators[time_point]
temperature = wcf.scalar_on_mesh(te_interpolator, mesh_wo_edges)
density = wcf.scalar_on_mesh(n_interpolator, mesh_wo_edges)
maxes = np.nanmax(np.nanmax(temperature, axis=0), axis=0)
temperature.shape
maxes.shape
(temperature / maxes[None, None, :]).shape
###Output
_____no_output_____
###Markdown
Prepare Mach probe data
###Code
timesteps = 250
database = '/home/jensv/rsx/jens_analysis/shots_database/source/shots.db'
table = 'Shots'
z_direction_1, z_direction_2 = 0, 180
y_direction_1, y_direction_2 = 90, 270
angle_signs = {0: 1,
180: -1,
90: -1,
0: 1}
min_spectral_density = 1.6e-8
condition_z_0416 = ("campaigns = 'mach_probe_plane_campaign_1'"
" AND fiducial_pre_crowbar_gyration_spectral_density > "
+ str(min_spectral_density) +
" AND mach_signals_exist = 1"
" AND (mach_orientation = " + str(z_direction_1) +
" OR mach_orientation = " + str(z_direction_2) + ")")
condition_y_0416 = ("campaigns = 'mach_probe_plane_campaign_1'"
" AND fiducial_pre_crowbar_gyration_spectral_density > "
+ str(min_spectral_density) +
" AND mach_signals_exist = 1"
" AND (mach_orientation = " + str(y_direction_1) +
" OR mach_orientation = " + str(y_direction_2) + ")")
cursor, connection = read_from_sql.cursor_with_rows(condition_z_0416,
database,
table)
z_0416_shots = cursor.fetchall()
cursor.close()
connection.close()
cursor, connection = read_from_sql.cursor_with_rows(condition_y_0416,
database,
table)
y_0416_shots = cursor.fetchall()
cursor.close()
connection.close()
condition_z_302 = ("campaigns = 'mach_probe_plane_campaign_2'"
" AND fiducial_pre_crowbar_gyration_spectral_density > "
+ str(min_spectral_density) +
" AND mach_signals_exist = 1"
" AND (mach_orientation = " + str(z_direction_1) +
" OR mach_orientation = " + str(z_direction_2) + ")")
cursor, connection = read_from_sql.cursor_with_rows(condition_z_302,
database,
table)
z_0302_shots = cursor.fetchall()
cursor.close()
connection.close()
mach_z_0416_measurements = ic_to_mach.run_mach_analysis(z_0416_shots,
timesteps,
angle_signs)
mach_y_0416_measurements = ic_to_mach.run_mach_analysis(y_0416_shots,
timesteps,
angle_signs)
mach_z_0302_measurements = ic_to_mach.run_mach_analysis(z_0302_shots,
timesteps,
angle_signs)
mach_z_0416_measurements['delays'] = np.arange(timesteps)
mach_y_0416_measurements['delays'] = np.arange(timesteps)
mach_z_0302_measurements['delays'] = np.arange(timesteps)
mach_z_0416_measurements = struc_3d.average_duplicate_points(mach_z_0416_measurements)
mach_y_0416_measurements = struc_3d.average_duplicate_points(mach_y_0416_measurements)
mach_z_0302_measurements = struc_3d.average_duplicate_points(mach_z_0302_measurements)
mach_y_measurements = {0.416: mach_y_0416_measurements}
mach_z_measurements = {0.302: mach_z_0302_measurements,
0.416: mach_z_0416_measurements}
mach_y_all_planes = wcf.save_quantity_to_unstructured_grids(mach_y_measurements,
'Mach_y', 'Mach_y', '2016-07-26',
planes=[0.416],
x_min=-0.052, x_max=0.052,
y_min=-0.022, y_max=0.032,
z_min=0.249, z_max=0.416,
bounds=(-10, 10))
mach_z_all_planes = wcf.save_quantity_to_unstructured_grids(mach_z_measurements,
'Mach_z', 'Mach_z', '2016-07-26',
planes=[0.302, 0.416],
x_min=-0.032, x_max=0.032,
y_min=-0.022, y_max=0.032,
z_min=0.249, z_max=0.416,
bounds=(-10, 10))
mach_y_all_planes = wcf.remove_nan_points(mach_y_all_planes)
mach_z_all_planes = wcf.remove_nan_points(mach_z_all_planes)
###Output
_____no_output_____
###Markdown
Determine derivatives and write to files
###Code
bx_triangulation, bx_interpolators = wcf.give_delaunay_and_interpolator(bx_all_planes)
by_triangulation, by_interpolators = wcf.give_delaunay_and_interpolator(by_all_planes)
bz_triangulation, bz_interpolators = wcf.give_delaunay_and_interpolator(bz_all_planes)
te_triangulation, te_interpolators = wcf.give_delaunay_and_interpolator(te_all_planes)
n_triangulation, n_interpolators = wcf.give_delaunay_and_interpolator(n_all_planes)
#mach_y_triangulation, mach_y_interpolators = wcf.give_delaunay_and_interpolator(mach_y_all_planes)
#mach_z_triangulation, mach_z_interpolators = wcf.give_delaunay_and_interpolator(mach_z_all_planes)
n_three_triangulation, n_three_interpolators = wcf.give_delaunay_and_interpolator(n_three_planes)
###Output
_____no_output_____
###Markdown
Examine planes
###Code
(x_min, x_max,
y_min, y_max,
z_min, z_max) = wcf.joint_mach_bdot_tp_extent()
spatial_increment = 0.001
mesh = np.meshgrid(np.linspace(x_min, x_max, np.ceil((x_max-x_min)/spatial_increment)),
np.linspace(y_min, y_max, np.ceil((y_max-y_min)/spatial_increment)))
wcf.plot_planes([0.249, 0.302, 0.357, 0.416], mesh, n_three_interpolators[9])
mesh[1].shape
###Output
_____no_output_____
###Markdown
Interpolate mach and temperature in plane calculate ion velocity
###Code
(x_min, x_max,
y_min, y_max,
z_min, z_max) = wcf.joint_mach_bdot_tp_extent()
spatial_increment = 0.001
mesh = np.meshgrid(np.linspace(x_min, x_max, np.ceil((x_max-x_min)/spatial_increment)),
np.linspace(y_min, y_max, np.ceil((y_max-y_min)/spatial_increment)),
np.linspace(z_min, z_max, np.ceil((z_max-z_min)/spatial_increment)))
mach_y_interpolator = mach_y_interpolators[0]
mach_z_interpolator = mach_z_interpolators[0]
te_interpolator = te_interpolators[0]
mach_y = wcf.scalar_on_mesh(mach_y_interpolator, mesh[:2])
mach_z = wcf.scalar_on_mesh(mach_z_interpolator, mesh)
te = wcf.scalar_on_mesh(te_interpolator, mesh)
u_i_y = np.sqrt(te*q_e/m_i)*mach_y
u_i_z = np.sqrt(te*q_e/m_i)*mach_z
u_i_y = np.reshape(u_i_y, mesh[0].shape)
u_i_z = np.reshape(u_i_z, mesh[0].shape)
u_i_y = wcf.remove_edges_scalar_quantity_meshes(u_i_y)
u_i_z = wcf.remove_edges_scalar_quantity_meshes(u_i_z)
###Output
_____no_output_____
###Markdown
Fit $\alpha$
###Code
alpha = 1
filter_width = 15
(x_min, x_max,
y_min, y_max,
z_min, z_max) = wcf.joint_mach_bdot_tp_extent()
spatial_increment = 0.001
mesh = np.meshgrid(np.linspace(x_min, x_max, np.ceil((x_max-x_min)/spatial_increment)),
np.linspace(y_min, y_max, np.ceil((y_max-y_min)/spatial_increment)),
np.linspace(z_min, z_max, np.ceil((z_max-z_min)/spatial_increment)))
mesh_wo_edges = wcf.remove_edges_mesh([np.array(mesh[0]),
np.array(mesh[1]),
np.array(mesh[2])])
ones = np.ones(mesh_wo_edges[0].shape)
time_point = 200
bx_interpolator = bx_interpolators[time_point]
by_interpolator = by_interpolators[time_point]
bz_interpolator = bz_interpolators[time_point]
te_interpolator = te_interpolators[time_point]
n_interpolator = n_interpolators[time_point]
bx_derivative = wcf.triangulate_derivatives(mesh, bx_triangulation, bx_interpolator,
increment=0.0000001)
bx_derivative = wcf.remove_edges_derivative_meshes(bx_derivative)
by_derivative = wcf.triangulate_derivatives(mesh, by_triangulation, by_interpolator,
increment=0.0000001)
by_derivative = wcf.remove_edges_derivative_meshes(by_derivative)
bz_derivative = wcf.triangulate_derivatives(mesh, bz_triangulation, bz_interpolator,
increment=0.0000001)
bz_derivative = wcf.remove_edges_derivative_meshes(bz_derivative)
current = wcf.current_on_mesh([bx_derivative,
by_derivative,
bz_derivative])
b_field, b_field_norm = wcf.b_field_on_mesh([bx_interpolator,
by_interpolator,
bz_interpolator],
mesh_wo_edges,
bias=2e-2)
temperature = wcf.scalar_on_mesh(te_interpolator, mesh_wo_edges)
density = wcf.scalar_on_mesh(n_interpolator, mesh_wo_edges)
current = np.asarray(current)
density = np.asarray(density)
b_field_norm = np.asarray(b_field_norm)
density = wcf.boxcar_filter_quantity_mesh(density, filter_width)
for direction in xrange(len(current)):
current[direction] = wcf.boxcar_filter_quantity_mesh(current[direction], filter_width)
density_constant = 1e18*np.ones(density.shape)
ion_velocity_term_1 = wcf.calc_ion_velocity_term_1(current, density, q_e)
ion_velocity_term_1_constant_density = wcf.calc_ion_velocity_term_1(current, density_constant, q_e)
ion_velocity_term_2 = wcf.calc_ion_velocity_term_2(b_field_norm, alpha)
ion_vorticity_term_1 = wcf.calc_ion_vorticity_term_1(current, density, q_e, mesh_wo_edges)
ion_vorticity_term_1_constant_density = wcf.calc_ion_vorticity_term_1(current, density_constant, q_e, mesh_wo_edges)
ion_vorticity_term_2 = wcf.calc_ion_vorticity_term_2(b_field_norm, alpha, mesh_wo_edges)
mesh_wo_edges = wcf.remove_edges_mesh([np.array(mesh[0]),
np.array(mesh[1]),
np.array(mesh[2])])
np.allclose(np.reshape(mesh_wo_edges[0][:,:,-1].ravel(), mesh_wo_edges[0][:,:,-1].shape), mesh_wo_edges[0][:,:,-1])
alpha_from_y = (u_i_y - ion_velocity_term_1[1])/ b_field_norm[1]
alpha_from_z = (u_i_z - ion_velocity_term_1[2])/ b_field_norm[2]
alpha_from_y_flattened_z04 = alpha_from_y[:,:,-1].ravel()
alpha_from_z_flattened_z04 = alpha_from_z[:,:,-1].ravel()
points_x = mesh_wo_edges[0][:, :, -1].ravel()
points_y = mesh_wo_edges[1][:, :, -1].ravel()
points_z_z04 = mesh[2][0,0,-1]*np.ones(points_x.shape)
std_z04 = np.expand_dims(np.zeros(points_x.shape), 0)
data_y_z04 = {'a_out': np.expand_dims(alpha_from_y_flattened_z04, 0),
'x_out': points_x,
'y_out': points_y,
'z_out': points_z_z04,
'std': std_z04
}
data_z_z04 = {'a_out': np.expand_dims(alpha_from_z_flattened_z04, 0),
'x_out': points_x,
'y_out': points_y,
'z_out': points_z_z04,
'std': std_z04
}
data_y_z04 = wcf.remove_nan_points(data_y_z04)
data_z_z04 = wcf.remove_nan_points(data_z_z04)
alpha_interp_z04 = wcf.fit_z_alphas(data_z_z04, mesh_wo_edges, s=7e10)
alpha_fitted_z04 = np.repeat(np.expand_dims(alpha_interp_z04, 2), density.shape[2], 2)
alpha_from_z_flattened_z03 = alpha_from_z[:,:,53].ravel()
points_x = mesh_wo_edges[0][:, :, -1].ravel()
points_y = mesh_wo_edges[1][:, :, -1].ravel()
points_z_z03 = mesh[2][0,0,53]*np.ones(points_x.shape)
std_z03 = np.expand_dims(np.zeros(points_x.shape), 0)
data_z_z03 = {'a_out': np.expand_dims(alpha_from_z_flattened_z03, 0),
'x_out': points_x,
'y_out': points_y,
'z_out': points_z_z03,
'std': std_z03
}
data_z_z03 = wcf.remove_nan_points(data_z_z03)
alpha_interp_z03 = wcf.fit_z_alphas(data_z_z03, mesh_wo_edges, s=8e10)
alpha_fitted_z03 = np.repeat(np.expand_dims(alpha_interp_z03, 2), density.shape[2], 2)
wcf.plot_spline_data_knots(alpha_interp_z03, mesh_wo_edges[0][:,:,-1], mesh_wo_edges[1][:,:, -1], [], [])
wcf.plot_spline_data_knots(alpha_interp_z04, mesh_wo_edges[0][:,:,-1], mesh_wo_edges[1][:,:, -1], [], [])
u_i_from_alpha_z03 = ion_velocity_term_1[2][:,:,53] + alpha_interp_z03*b_field_norm[2][:,:,53]
u_i_from_alpha_z04 = ion_velocity_term_1[2][:,:,-1] + alpha_interp_z04*b_field_norm[2][:,:,-1]
wcf.plot_spline_data_knots(u_i_from_alpha_z03, mesh_wo_edges[0][:,:,-1], mesh_wo_edges[1][:,:, -1], [], [])
wcf.plot_spline_data_knots(u_i_from_alpha_z04, mesh_wo_edges[0][:,:,-1], mesh_wo_edges[1][:,:, -1], [], [])
wcf.plot_spline_data_knots(ion_velocity_term_1[2][:,:,-1], mesh_wo_edges[0][:,:,-1], mesh_wo_edges[1][:,:, -1], [], [])
m, y0 = wcf.fit_line(0.3, 0.4, alpha_interp_z03, alpha_interp_z04)
wcf.line(m, y0, np.linspace(0.24, 0.4, 100)).shape
now = datetime.now().strftime("%Y-%m-%d-%H-%M")
out_dir = '../output/' + now
try:
os.makedirs(out_dir)
except:
pass
alpha = 1
(x_min, x_max,
y_min, y_max,
z_min, z_max) = joint_mach_bdot_tp_extent()
spatial_increment = 0.001
mesh = np.meshgrid(np.linspace(x_min, x_max, np.ceil((x_max-x_min)/spatial_increment)),
np.linspace(y_min, y_max, np.ceil((y_max-y_min)/spatial_increment)),
np.linspace(z_min, z_max, np.ceil((z_max-z_min)/spatial_increment)))
mesh_wo_edges = wcf.remove_edges_mesh([np.array(mesh[0]),
np.array(mesh[1]),
np.array(mesh[2])])
ones = np.ones(mesh_wo_edges[0].shape)
print time_point
bx_interpolator = bx_interpolators[time_point]
by_interpolator = by_interpolators[time_point]
bz_interpolator = bz_interpolators[time_point]
te_interpolator = te_interpolators[time_point]
n_interpolator = n_interpolators[time_point]
bx_derivative = wcf.triangulate_derivatives(mesh, bx_triangulation, bx_interpolator,
increment=0.0000001)
bx_derivative = wcf.remove_edges_derivative_meshes(bx_derivative)
by_derivative = wcf.triangulate_derivatives(mesh, by_triangulation, by_interpolator,
increment=0.0000001)
by_derivative = wcf.remove_edges_derivative_meshes(by_derivative)
bz_derivative = wcf.triangulate_derivatives(mesh, bz_triangulation, bz_interpolator,
increment=0.0000001)
bz_derivative = wcf.remove_edges_derivative_meshes(bz_derivative)
current = wcf.current_on_mesh([bx_derivative,
by_derivative,
bz_derivative])
b_field, b_field_norm = wcf.b_field_on_mesh([bx_interpolator,
by_interpolator,
bz_interpolator],
mesh_wo_edges,
bias=2e-2)
temperature = wcf.scalar_on_mesh(te_interpolator, mesh_wo_edges)
density = wcf.scalar_on_mesh(n_interpolator, mesh_wo_edges)
###Output
_____no_output_____
###Markdown
Joint quantities interpolation
###Code
filter_width = 15
now = datetime.now().strftime("%Y-%m-%d-%H-%M")
out_dir = '../output/' + now
try:
os.makedirs(out_dir)
except:
pass
alpha = 1
(x_min, x_max,
y_min, y_max,
z_min, z_max) = joint_mach_bdot_tp_extent()
spatial_increment = 0.001
mesh = np.meshgrid(np.linspace(x_min, x_max, np.ceil((x_max-x_min)/spatial_increment)),
np.linspace(y_min, y_max, np.ceil((y_max-y_min)/spatial_increment)),
np.linspace(z_min, z_max, np.ceil((z_max-z_min)/spatial_increment)))
mesh_wo_edges = wcf.remove_edges_mesh([np.array(mesh[0]),
np.array(mesh[1]),
np.array(mesh[2])])
ones = np.ones(mesh_wo_edges[0].shape)
quantity_names = ['B_x', 'B_y', 'B_z',
'B_norm_x', 'B_norm_y', 'B_norm_z',
'j_x', 'j_y', 'j_z', 'n', 'Te',
'u_i_term1_x', 'u_i_term1_y', 'u_i_term1_z',
'u_e_norm_x', 'u_e_norm_y', 'u_e_norm_z',
'w_i_term1_x', 'w_i_term1_y', 'w_i_term1_z',
'w_i_term2_x', 'w_i_term2_y', 'w_i_term2_z',
'u_i_term1_x_constant_density', 'u_i_term1_y_constant_density', 'u_i_term1_z_constant_density',
'w_i_term1_x_constant_density', 'w_i_term1_y_constant_density', 'w_i_term1_z_constant_density',
'ones',
'u_e_x_fitted_alpha', 'u_e_y_fitted_alpha', 'u_e_z_fitted_alpha',
'w_i_term2_x_fitted_alpha', 'w_i_term2_y_fitted_alpha', 'w_i_term2_z_fitted_alpha',
'alpha_fitted']
for time_point in xrange(len(bx_interpolators)):
print time_point
bx_interpolator = bx_interpolators[time_point]
by_interpolator = by_interpolators[time_point]
bz_interpolator = bz_interpolators[time_point]
te_interpolator = te_interpolators[time_point]
n_interpolator = n_interpolators[time_point]
bx_derivative = wcf.triangulate_derivatives(mesh, bx_triangulation, bx_interpolator,
increment=0.0000001)
bx_derivative = wcf.remove_edges_derivative_meshes(bx_derivative)
by_derivative = wcf.triangulate_derivatives(mesh, by_triangulation, by_interpolator,
increment=0.0000001)
by_derivative = wcf.remove_edges_derivative_meshes(by_derivative)
bz_derivative = wcf.triangulate_derivatives(mesh, bz_triangulation, bz_interpolator,
increment=0.0000001)
bz_derivative = wcf.remove_edges_derivative_meshes(bz_derivative)
current = wcf.current_on_mesh([bx_derivative,
by_derivative,
bz_derivative])
b_field, b_field_norm = wcf.b_field_on_mesh([bx_interpolator,
by_interpolator,
bz_interpolator],
mesh_wo_edges,
bias=2e-2)
temperature = wcf.scalar_on_mesh(te_interpolator, mesh_wo_edges)
density = wcf.scalar_on_mesh(n_interpolator, mesh_wo_edges)
current = np.asarray(current)
density = np.asarray(density)
b_field_norm = np.asarray(b_field_norm)
density = wcf.boxcar_filter_quantity_mesh(density, filter_width)
for direction in xrange(len(current)):
current[direction] = wcf.boxcar_filter_quantity_mesh(current[direction],
filter_width)
density_constant = 1e18*np.ones(density.shape)
ion_velocity_term_1 = wcf.calc_ion_velocity_term_1(current, density, q_e)
ion_velocity_term_1_constant_density = wcf.calc_ion_velocity_term_1(current,
density_constant, q_e)
mach_y_interpolator = mach_y_interpolators[time_point]
mach_z_interpolator = mach_z_interpolators[time_point]
mach_y = wcf.scalar_on_mesh(mach_y_interpolator, mesh_wo_edges[:2])
mach_z = wcf.scalar_on_mesh(mach_z_interpolator, mesh_wo_edges)
te = wcf.scalar_on_mesh(te_interpolator, mesh_wo_edges)
u_i_y = np.sqrt(te*q_e/m_i)*mach_y
u_i_z = np.sqrt(te*q_e/m_i)*mach_z
u_i_y = np.reshape(u_i_y, mesh_wo_edges[0].shape)
u_i_z = np.reshape(u_i_z, mesh_wo_edges[0].shape)
alpha_from_y = (u_i_y - ion_velocity_term_1[1])/ b_field_norm[1]
alpha_from_z = (u_i_z - ion_velocity_term_1[2])/ b_field_norm[2]
alpha_from_y_flattened = alpha_from_y[:,:,-1].ravel()
alpha_from_z_flattened = alpha_from_z[:,:,-1].ravel()
points_x = mesh_wo_edges[0][:, :, -1].ravel()
points_y = mesh_wo_edges[1][:, :, -1].ravel()
points_z = mesh[2][0,0,-1]*np.ones(points_x.shape)
std = np.expand_dims(np.zeros(points_x.shape), 0)
data_y = {'a_out': np.expand_dims(alpha_from_y_flattened, 0),
'x_out': points_x,
'y_out': points_y,
'z_out': points_z,
'std': std
}
data_z = {'a_out': np.expand_dims(alpha_from_z_flattened, 0),
'x_out': points_x,
'y_out': points_y,
'z_out': points_z,
'std': std
}
data_y = wcf.remove_nan_points(data_y)
data_z = wcf.remove_nan_points(data_z)
alpha_interp = wcf.fit_z_alphas(data_z, mesh_wo_edges, s=7e10)
alpha_fitted = np.repeat(np.expand_dims(alpha_interp, 2), density.shape[2], 2)
ion_velocity_term_2 = wcf.calc_ion_velocity_term_2(b_field_norm, alpha)
ion_velocity_term_2_alpha = wcf.calc_ion_velocity_term_2(b_field_norm, alpha_fitted)
ion_vorticity_term_1 = wcf.calc_ion_vorticity_term_1(current, density, q_e, mesh_wo_edges)
ion_vorticity_term_1_constant_density = wcf.calc_ion_vorticity_term_1(current, density_constant,
q_e, mesh_wo_edges)
ion_vorticity_term_2 = wcf.calc_ion_vorticity_term_2(b_field_norm, alpha, mesh_wo_edges)
ion_vorticity_term_2_alpha = wcf.alc_ion_vorticity_term_2(b_field_norm, alpha_fitted, mesh_wo_edges)
for direction in xrange(len(ion_vorticity_term_1)):
ion_vorticity_term_1[direction] = wcf.boxcar_filter_quantity_mesh(ion_vorticity_term_1[direction],
filter_width)
ion_vorticity_term_1_constant_density[direction] = wcf.boxcar_filter_quantity_mesh(ion_vorticity_term_1_constant_density[direction],
filter_width)
ion_vorticity_term_2[direction] = wcf.boxcar_filter_quantity_mesh(ion_vorticity_term_2[direction], filter_width)
ion_vorticity_term_2_alpha[direction] = wcf.boxcar_filter_quantity_mesh(ion_vorticity_term_2_alpha[direction],
filter_width)
fields = (list(b_field) + list(b_field_norm) + list(current) +
[density] + [temperature] +
list(ion_velocity_term_1) + list(ion_velocity_term_2) +
list(ion_vorticity_term_1) + list(ion_vorticity_term_2) +
list(ion_velocity_term_1_constant_density) +
list(ion_vorticity_term_1_constant_density) +
[ones] +
list(ion_velocity_term_2_alpha)+
list(ion_vorticity_term_2_alpha) +
[alpha_fitted])
numpy_archive_name = out_dir + '/Bdot_triple_probe_quantities' + str(time_point).zfill(4) + '.npz'
save_to_numpy_mesh(mesh_wo_edges, fields[5:9], quantity_names[5:9], numpy_archive_name)
x, y, z, variables = wcf.prepare_for_rectilinear_grid(mesh_wo_edges, fields,
quantity_names)
wcf.write_fields_and_currents_to_structured_mesh(now, 'Bdot_triple_probe_quantities',
x, y, z, variables, time_point)
print 'density between', density.max(), density.min(), density.mean()
print 'abs density between', np.abs(density).max(), np.abs(density).min(), np.abs(density).mean()
print 'charge', q_e
print 'current x between', current[0].max(), current[0].min(), current[0].mean()
print 'current y between', current[1].max(), current[1].min(), current[1].mean()
print 'current z between', current[2].max(), current[2].min(), current[2].mean()
denominator = density*q_e
print 'denominator between', denominator.max(), denominator.min(), denominator.mean()
print 'abs denominator between', np.abs(denominator).max(), np.abs(denominator).min(), np.abs(denominator).mean()
term = [current[0]/denominator,
current[1]/denominator,
current[2]/denominator]
print 'term x between', term[0].max(), term[0].min(), term[0].mean()
print 'term y between', term[1].max(), term[1].min(), term[1].mean()
print 'term z between', term[2].max(), term[2].min(), term[2].mean()
###Output
density between 7.18761714797e+19 1.26165033215e+17 7.67230104918e+18
abs density between 7.18761714797e+19 1.26165033215e+17 7.67230104918e+18
charge 1.6021766208e-19
current x between 1445823.091 -664716.029921 287.957349552
current y between 1715203.22622 -1893650.19427 -3720.90718255
current z between 3060044.45744 -2740383.91419 -28779.1816356
denominator between 11.5158321537 0.020213866658 1.22923813687
abs denominator between 11.5158321537 0.020213866658 1.22923813687
term x between 13140771.3648 -4042828.16287 1798.77853144
term y between 8944163.4699 -8274999.82743 -4840.150886
term z between 7991128.24954 -7980696.62142 -10766.3934199
|
module4/My_Notes_lesson_regression_classification_4.ipynb | ###Markdown
Lambda School Data Science*Unit 2, Sprint 1, Module 4*--- Regression & Classification, Module 4 (Logistic Regression)- do train/validate/test split- begin with baselines for classification- express and explain the intuition and interpretation of Logistic Regression- use sklearn.linear_model.LogisticRegression to fit and interpret Logistic Regression modelsLogistic regression is the baseline for classification models, as well as a handy way to predict probabilities (since those too live in the unit interval). While relatively simple, it is also the foundation for more sophisticated classification techniques such as neural networks (many of which can effectively be thought of as networks of logistic models). SetupYou can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab (run the code cell below).Libraries:- category_encoders 2.0.0- numpy- pandas- scikit-learn
###Code
import os, sys
in_colab = 'google.colab' in sys.modules
# If you're in Colab...
if in_colab:
# Pull files from Github repo
os.chdir('/content')
!git init .
!git remote add origin https://github.com/LambdaSchool/DS-Unit-2-Regression-Classification.git
!git pull origin master
# Install required python packages
!pip install -r requirements.txt
# Change into directory for module
os.chdir('module4')
###Output
Initialized empty Git repository in /content/.git/
remote: Enumerating objects: 156, done.[K
remote: Total 156 (delta 0), reused 0 (delta 0), pack-reused 156[K
Receiving objects: 100% (156/156), 19.30 MiB | 19.67 MiB/s, done.
Resolving deltas: 100% (71/71), done.
From https://github.com/LambdaSchool/DS-Unit-2-Regression-Classification
* branch master -> FETCH_HEAD
* [new branch] master -> origin/master
Collecting category_encoders==2.0.0 (from -r requirements.txt (line 1))
[?25l Downloading https://files.pythonhosted.org/packages/6e/a1/f7a22f144f33be78afeb06bfa78478e8284a64263a3c09b1ef54e673841e/category_encoders-2.0.0-py2.py3-none-any.whl (87kB)
[K |████████████████████████████████| 92kB 3.5MB/s
[?25hCollecting eli5==0.10.0 (from -r requirements.txt (line 2))
[?25l Downloading https://files.pythonhosted.org/packages/e6/ea/47bd5844bb609d45821114aa7e0bc9e4422053fe24a6cf6b357f0d3f74d3/eli5-0.10.0-py2.py3-none-any.whl (105kB)
[K |████████████████████████████████| 112kB 8.6MB/s
[?25hRequirement already satisfied: matplotlib!=3.1.1 in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 3)) (3.0.3)
Collecting pandas-profiling==2.3.0 (from -r requirements.txt (line 4))
[?25l Downloading https://files.pythonhosted.org/packages/2c/2f/aae19e2173c10a9bb7fee5f5cad35dbe53a393960fc91abc477dcc4661e8/pandas-profiling-2.3.0.tar.gz (127kB)
[K |████████████████████████████████| 133kB 42.5MB/s
[?25hCollecting pdpbox==0.2.0 (from -r requirements.txt (line 5))
[?25l Downloading https://files.pythonhosted.org/packages/87/23/ac7da5ba1c6c03a87c412e7e7b6e91a10d6ecf4474906c3e736f93940d49/PDPbox-0.2.0.tar.gz (57.7MB)
[K |████████████████████████████████| 57.7MB 347kB/s
[?25hRequirement already satisfied: plotly==4.1.1 in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 6)) (4.1.1)
Requirement already satisfied: seaborn==0.9.0 in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 7)) (0.9.0)
Requirement already satisfied: scikit-learn==0.21.3 in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 8)) (0.21.3)
Collecting shap==0.29.3 (from -r requirements.txt (line 9))
[?25l Downloading https://files.pythonhosted.org/packages/80/82/bab67238ac27d53214b12f6ed095493dc7b43be07c615b8b0dbb7da33157/shap-0.29.3.tar.gz (230kB)
[K |████████████████████████████████| 235kB 39.4MB/s
[?25hRequirement already satisfied: statsmodels==0.10.1 in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 10)) (0.10.1)
Requirement already satisfied: xgboost==0.90 in /usr/local/lib/python3.6/dist-packages (from -r requirements.txt (line 11)) (0.90)
Requirement already satisfied: pandas>=0.21.1 in /usr/local/lib/python3.6/dist-packages (from category_encoders==2.0.0->-r requirements.txt (line 1)) (0.24.2)
Requirement already satisfied: patsy>=0.4.1 in /usr/local/lib/python3.6/dist-packages (from category_encoders==2.0.0->-r requirements.txt (line 1)) (0.5.1)
Requirement already satisfied: numpy>=1.11.3 in /usr/local/lib/python3.6/dist-packages (from category_encoders==2.0.0->-r requirements.txt (line 1)) (1.16.5)
Requirement already satisfied: scipy>=0.19.0 in /usr/local/lib/python3.6/dist-packages (from category_encoders==2.0.0->-r requirements.txt (line 1)) (1.3.1)
Requirement already satisfied: jinja2 in /usr/local/lib/python3.6/dist-packages (from eli5==0.10.0->-r requirements.txt (line 2)) (2.10.1)
Requirement already satisfied: six in /usr/local/lib/python3.6/dist-packages (from eli5==0.10.0->-r requirements.txt (line 2)) (1.12.0)
Requirement already satisfied: graphviz in /usr/local/lib/python3.6/dist-packages (from eli5==0.10.0->-r requirements.txt (line 2)) (0.10.1)
Requirement already satisfied: attrs>16.0.0 in /usr/local/lib/python3.6/dist-packages (from eli5==0.10.0->-r requirements.txt (line 2)) (19.1.0)
Requirement already satisfied: typing in /usr/local/lib/python3.6/dist-packages (from eli5==0.10.0->-r requirements.txt (line 2)) (3.7.4.1)
Requirement already satisfied: tabulate>=0.7.7 in /usr/local/lib/python3.6/dist-packages (from eli5==0.10.0->-r requirements.txt (line 2)) (0.8.5)
Requirement already satisfied: python-dateutil>=2.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.1.1->-r requirements.txt (line 3)) (2.5.3)
Requirement already satisfied: pyparsing!=2.0.4,!=2.1.2,!=2.1.6,>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.1.1->-r requirements.txt (line 3)) (2.4.2)
Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.1.1->-r requirements.txt (line 3)) (1.1.0)
Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.6/dist-packages (from matplotlib!=3.1.1->-r requirements.txt (line 3)) (0.10.0)
Requirement already satisfied: missingno>=0.4.2 in /usr/local/lib/python3.6/dist-packages (from pandas-profiling==2.3.0->-r requirements.txt (line 4)) (0.4.2)
Collecting htmlmin>=0.1.12 (from pandas-profiling==2.3.0->-r requirements.txt (line 4))
Downloading https://files.pythonhosted.org/packages/b3/e7/fcd59e12169de19f0131ff2812077f964c6b960e7c09804d30a7bf2ab461/htmlmin-0.1.12.tar.gz
Collecting phik>=0.9.8 (from pandas-profiling==2.3.0->-r requirements.txt (line 4))
[?25l Downloading https://files.pythonhosted.org/packages/45/ad/24a16fa4ba612fb96a3c4bb115a5b9741483f53b66d3d3afd987f20fa227/phik-0.9.8-py3-none-any.whl (606kB)
[K |████████████████████████████████| 614kB 39.4MB/s
[?25hCollecting confuse>=1.0.0 (from pandas-profiling==2.3.0->-r requirements.txt (line 4))
Downloading https://files.pythonhosted.org/packages/4c/6f/90e860cba937c174d8b3775729ccc6377eb91f52ad4eeb008e7252a3646d/confuse-1.0.0.tar.gz
Requirement already satisfied: astropy in /usr/local/lib/python3.6/dist-packages (from pandas-profiling==2.3.0->-r requirements.txt (line 4)) (3.0.5)
Requirement already satisfied: joblib in /usr/local/lib/python3.6/dist-packages (from pdpbox==0.2.0->-r requirements.txt (line 5)) (0.13.2)
Requirement already satisfied: psutil in /usr/local/lib/python3.6/dist-packages (from pdpbox==0.2.0->-r requirements.txt (line 5)) (5.4.8)
Requirement already satisfied: retrying>=1.3.3 in /usr/local/lib/python3.6/dist-packages (from plotly==4.1.1->-r requirements.txt (line 6)) (1.3.3)
Requirement already satisfied: tqdm>4.25.0 in /usr/local/lib/python3.6/dist-packages (from shap==0.29.3->-r requirements.txt (line 9)) (4.28.1)
Requirement already satisfied: ipython in /usr/local/lib/python3.6/dist-packages (from shap==0.29.3->-r requirements.txt (line 9)) (5.5.0)
Requirement already satisfied: scikit-image in /usr/local/lib/python3.6/dist-packages (from shap==0.29.3->-r requirements.txt (line 9)) (0.15.0)
Requirement already satisfied: pytz>=2011k in /usr/local/lib/python3.6/dist-packages (from pandas>=0.21.1->category_encoders==2.0.0->-r requirements.txt (line 1)) (2018.9)
Requirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.6/dist-packages (from jinja2->eli5==0.10.0->-r requirements.txt (line 2)) (1.1.1)
Requirement already satisfied: setuptools in /usr/local/lib/python3.6/dist-packages (from kiwisolver>=1.0.1->matplotlib!=3.1.1->-r requirements.txt (line 3)) (41.2.0)
Requirement already satisfied: numba>=0.38.1 in /usr/local/lib/python3.6/dist-packages (from phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4)) (0.40.1)
Collecting pytest-pylint>=0.13.0 (from phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4))
Downloading https://files.pythonhosted.org/packages/64/dc/6f35f114844fb12e38d60c4f3d2441a55baff7043ad4e013777dff55746c/pytest_pylint-0.14.1-py3-none-any.whl
Requirement already satisfied: nbconvert>=5.3.1 in /usr/local/lib/python3.6/dist-packages (from phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4)) (5.6.0)
Requirement already satisfied: jupyter-client>=5.2.3 in /usr/local/lib/python3.6/dist-packages (from phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4)) (5.3.3)
Collecting pytest>=4.0.2 (from phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4))
[?25l Downloading https://files.pythonhosted.org/packages/9a/46/903ea822d83187bb8b354fcb3d085fb10b7787be39f9cf1628bc6ef8f9c9/pytest-5.2.0-py3-none-any.whl (226kB)
[K |████████████████████████████████| 235kB 41.6MB/s
[?25hRequirement already satisfied: pyyaml in /usr/local/lib/python3.6/dist-packages (from confuse>=1.0.0->pandas-profiling==2.3.0->-r requirements.txt (line 4)) (3.13)
Requirement already satisfied: pygments in /usr/local/lib/python3.6/dist-packages (from ipython->shap==0.29.3->-r requirements.txt (line 9)) (2.1.3)
Requirement already satisfied: pexpect; sys_platform != "win32" in /usr/local/lib/python3.6/dist-packages (from ipython->shap==0.29.3->-r requirements.txt (line 9)) (4.7.0)
Requirement already satisfied: prompt-toolkit<2.0.0,>=1.0.4 in /usr/local/lib/python3.6/dist-packages (from ipython->shap==0.29.3->-r requirements.txt (line 9)) (1.0.16)
Requirement already satisfied: traitlets>=4.2 in /usr/local/lib/python3.6/dist-packages (from ipython->shap==0.29.3->-r requirements.txt (line 9)) (4.3.2)
Requirement already satisfied: pickleshare in /usr/local/lib/python3.6/dist-packages (from ipython->shap==0.29.3->-r requirements.txt (line 9)) (0.7.5)
Requirement already satisfied: simplegeneric>0.8 in /usr/local/lib/python3.6/dist-packages (from ipython->shap==0.29.3->-r requirements.txt (line 9)) (0.8.1)
Requirement already satisfied: decorator in /usr/local/lib/python3.6/dist-packages (from ipython->shap==0.29.3->-r requirements.txt (line 9)) (4.4.0)
Requirement already satisfied: pillow>=4.3.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image->shap==0.29.3->-r requirements.txt (line 9)) (4.3.0)
Requirement already satisfied: PyWavelets>=0.4.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image->shap==0.29.3->-r requirements.txt (line 9)) (1.0.3)
Requirement already satisfied: imageio>=2.0.1 in /usr/local/lib/python3.6/dist-packages (from scikit-image->shap==0.29.3->-r requirements.txt (line 9)) (2.4.1)
Requirement already satisfied: networkx>=2.0 in /usr/local/lib/python3.6/dist-packages (from scikit-image->shap==0.29.3->-r requirements.txt (line 9)) (2.3)
Requirement already satisfied: llvmlite>=0.25.0dev0 in /usr/local/lib/python3.6/dist-packages (from numba>=0.38.1->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4)) (0.29.0)
Collecting pylint>=1.4.5 (from pytest-pylint>=0.13.0->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4))
[?25l Downloading https://files.pythonhosted.org/packages/ef/ed/1cb8e7b85a31807aa0bff8b3e60935370bed7e141df8b530aac6352bddff/pylint-2.4.2-py3-none-any.whl (302kB)
[K |████████████████████████████████| 307kB 28.7MB/s
[?25hRequirement already satisfied: jupyter-core in /usr/local/lib/python3.6/dist-packages (from nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4)) (4.5.0)
Requirement already satisfied: testpath in /usr/local/lib/python3.6/dist-packages (from nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4)) (0.4.2)
Requirement already satisfied: bleach in /usr/local/lib/python3.6/dist-packages (from nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4)) (3.1.0)
Requirement already satisfied: mistune<2,>=0.8.1 in /usr/local/lib/python3.6/dist-packages (from nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4)) (0.8.4)
Requirement already satisfied: defusedxml in /usr/local/lib/python3.6/dist-packages (from nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4)) (0.6.0)
Requirement already satisfied: entrypoints>=0.2.2 in /usr/local/lib/python3.6/dist-packages (from nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4)) (0.3)
Requirement already satisfied: pandocfilters>=1.4.1 in /usr/local/lib/python3.6/dist-packages (from nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4)) (1.4.2)
Requirement already satisfied: nbformat>=4.4 in /usr/local/lib/python3.6/dist-packages (from nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4)) (4.4.0)
Requirement already satisfied: pyzmq>=13 in /usr/local/lib/python3.6/dist-packages (from jupyter-client>=5.2.3->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4)) (17.0.0)
Requirement already satisfied: tornado>=4.1 in /usr/local/lib/python3.6/dist-packages (from jupyter-client>=5.2.3->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4)) (4.5.3)
Requirement already satisfied: more-itertools>=4.0.0 in /usr/local/lib/python3.6/dist-packages (from pytest>=4.0.2->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4)) (7.2.0)
Requirement already satisfied: py>=1.5.0 in /usr/local/lib/python3.6/dist-packages (from pytest>=4.0.2->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4)) (1.8.0)
Collecting pluggy<1.0,>=0.12 (from pytest>=4.0.2->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4))
Downloading https://files.pythonhosted.org/packages/92/c7/48439f7d5fd6bddb4c04b850bb862b42e3e2b98570040dfaf68aedd8114b/pluggy-0.13.0-py2.py3-none-any.whl
Requirement already satisfied: atomicwrites>=1.0 in /usr/local/lib/python3.6/dist-packages (from pytest>=4.0.2->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4)) (1.3.0)
Requirement already satisfied: packaging in /usr/local/lib/python3.6/dist-packages (from pytest>=4.0.2->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4)) (19.2)
Requirement already satisfied: wcwidth in /usr/local/lib/python3.6/dist-packages (from pytest>=4.0.2->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4)) (0.1.7)
Requirement already satisfied: importlib-metadata>=0.12; python_version < "3.8" in /usr/local/lib/python3.6/dist-packages (from pytest>=4.0.2->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4)) (0.23)
Requirement already satisfied: ptyprocess>=0.5 in /usr/local/lib/python3.6/dist-packages (from pexpect; sys_platform != "win32"->ipython->shap==0.29.3->-r requirements.txt (line 9)) (0.6.0)
Requirement already satisfied: ipython-genutils in /usr/local/lib/python3.6/dist-packages (from traitlets>=4.2->ipython->shap==0.29.3->-r requirements.txt (line 9)) (0.2.0)
Requirement already satisfied: olefile in /usr/local/lib/python3.6/dist-packages (from pillow>=4.3.0->scikit-image->shap==0.29.3->-r requirements.txt (line 9)) (0.46)
Collecting isort<5,>=4.2.5 (from pylint>=1.4.5->pytest-pylint>=0.13.0->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4))
[?25l Downloading https://files.pythonhosted.org/packages/e5/b0/c121fd1fa3419ea9bfd55c7f9c4fedfec5143208d8c7ad3ce3db6c623c21/isort-4.3.21-py2.py3-none-any.whl (42kB)
[K |████████████████████████████████| 51kB 18.0MB/s
[?25hCollecting mccabe<0.7,>=0.6 (from pylint>=1.4.5->pytest-pylint>=0.13.0->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4))
Downloading https://files.pythonhosted.org/packages/87/89/479dc97e18549e21354893e4ee4ef36db1d237534982482c3681ee6e7b57/mccabe-0.6.1-py2.py3-none-any.whl
Collecting astroid<2.4,>=2.3.0 (from pylint>=1.4.5->pytest-pylint>=0.13.0->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4))
[?25l Downloading https://files.pythonhosted.org/packages/13/e1/74a63c85c501c29c52da5be604c025e368f4dd77daf1fa13c878a33e5a36/astroid-2.3.1-py3-none-any.whl (205kB)
[K |████████████████████████████████| 215kB 45.5MB/s
[?25hRequirement already satisfied: webencodings in /usr/local/lib/python3.6/dist-packages (from bleach->nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4)) (0.5.1)
Requirement already satisfied: jsonschema!=2.5.0,>=2.4 in /usr/local/lib/python3.6/dist-packages (from nbformat>=4.4->nbconvert>=5.3.1->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4)) (2.6.0)
Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.6/dist-packages (from importlib-metadata>=0.12; python_version < "3.8"->pytest>=4.0.2->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4)) (0.6.0)
Requirement already satisfied: wrapt==1.11.* in /usr/local/lib/python3.6/dist-packages (from astroid<2.4,>=2.3.0->pylint>=1.4.5->pytest-pylint>=0.13.0->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4)) (1.11.2)
Collecting lazy-object-proxy==1.4.* (from astroid<2.4,>=2.3.0->pylint>=1.4.5->pytest-pylint>=0.13.0->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4))
[?25l Downloading https://files.pythonhosted.org/packages/0e/26/534a6d32572a9dbca11619321535c0a7ab34688545d9d67c2c204b9e3a3d/lazy_object_proxy-1.4.2-cp36-cp36m-manylinux1_x86_64.whl (49kB)
[K |████████████████████████████████| 51kB 8.9MB/s
[?25hCollecting typed-ast<1.5,>=1.4.0; implementation_name == "cpython" and python_version < "3.8" (from astroid<2.4,>=2.3.0->pylint>=1.4.5->pytest-pylint>=0.13.0->phik>=0.9.8->pandas-profiling==2.3.0->-r requirements.txt (line 4))
[?25l Downloading https://files.pythonhosted.org/packages/31/d3/9d1802c161626d0278bafb1ffb32f76b9d01e123881bbf9d91e8ccf28e18/typed_ast-1.4.0-cp36-cp36m-manylinux1_x86_64.whl (736kB)
[K |████████████████████████████████| 737kB 30.4MB/s
[?25hBuilding wheels for collected packages: pandas-profiling, pdpbox, shap, htmlmin, confuse
Building wheel for pandas-profiling (setup.py) ... [?25l[?25hdone
Created wheel for pandas-profiling: filename=pandas_profiling-2.3.0-py2.py3-none-any.whl size=145035 sha256=a9ba8edebd1d1bd533d7568e3981657805be7995a63792e03ce8d7c0429e33fc
Stored in directory: /root/.cache/pip/wheels/ce/c7/f1/dbfef4848ebb048cb1d4a22d1ed0c62d8ff2523747235e19fe
Building wheel for pdpbox (setup.py) ... [?25l[?25hdone
Created wheel for pdpbox: filename=PDPbox-0.2.0-cp36-none-any.whl size=57690723 sha256=d3faf38bd10026695db995bd02a1e206381e696cdaef6847e903991dde385f4f
Stored in directory: /root/.cache/pip/wheels/7d/08/51/63fd122b04a2c87d780464eeffb94867c75bd96a64d500a3fe
Building wheel for shap (setup.py) ... [?25l[?25hdone
Created wheel for shap: filename=shap-0.29.3-cp36-cp36m-linux_x86_64.whl size=344720 sha256=2f982d0042efc34c080553bab81ea1a4f0489e04a1764492fa2c041d0f22343a
Stored in directory: /root/.cache/pip/wheels/00/20/87/d199e4d7397997f5494e4098104f91313ac8120753bee7b032
Building wheel for htmlmin (setup.py) ... [?25l[?25hdone
Created wheel for htmlmin: filename=htmlmin-0.1.12-cp36-none-any.whl size=27084 sha256=8c45241f3349c9bfb213711efd99eb6b546044d6dc613a8786d420e5efbec13c
Stored in directory: /root/.cache/pip/wheels/43/07/ac/7c5a9d708d65247ac1f94066cf1db075540b85716c30255459
Building wheel for confuse (setup.py) ... [?25l[?25hdone
Created wheel for confuse: filename=confuse-1.0.0-cp36-none-any.whl size=17486 sha256=86b506fb7719f5a537fc75d6d0a4b41f50003ad623efa227b339469b183d5c14
Stored in directory: /root/.cache/pip/wheels/b0/b2/96/2074eee7dbf7b7df69d004c9b6ac4e32dad04fb7666cf943bd
Successfully built pandas-profiling pdpbox shap htmlmin confuse
[31mERROR: datascience 0.10.6 has requirement folium==0.2.1, but you'll have folium 0.8.3 which is incompatible.[0m
Installing collected packages: category-encoders, eli5, htmlmin, isort, mccabe, lazy-object-proxy, typed-ast, astroid, pylint, pluggy, pytest, pytest-pylint, phik, confuse, pandas-profiling, pdpbox, shap
Found existing installation: pluggy 0.7.1
Uninstalling pluggy-0.7.1:
Successfully uninstalled pluggy-0.7.1
Found existing installation: pytest 3.6.4
Uninstalling pytest-3.6.4:
Successfully uninstalled pytest-3.6.4
Found existing installation: pandas-profiling 1.4.1
Uninstalling pandas-profiling-1.4.1:
Successfully uninstalled pandas-profiling-1.4.1
Successfully installed astroid-2.3.1 category-encoders-2.0.0 confuse-1.0.0 eli5-0.10.0 htmlmin-0.1.12 isort-4.3.21 lazy-object-proxy-1.4.2 mccabe-0.6.1 pandas-profiling-2.3.0 pdpbox-0.2.0 phik-0.9.8 pluggy-0.13.0 pylint-2.4.2 pytest-5.2.0 pytest-pylint-0.14.1 shap-0.29.3 typed-ast-1.4.0
###Markdown
Do train/validate/test split Overview Predict Titanic survival 🚢Kaggle is a platform for machine learning competitions. [Kaggle has used the Titanic dataset](https://www.kaggle.com/c/titanic/data) for their most popular "getting started" competition. Kaggle splits the data into train and test sets for participants. Let's load both:
###Code
import pandas as pd
train = pd.read_csv('../data/titanic/train.csv')
test = pd.read_csv('../data/titanic/test.csv')
###Output
_____no_output_____
###Markdown
Notice that the train set has one more column than the test set:
###Code
train.shape, test.shape
###Output
_____no_output_____
###Markdown
Which column is in train but not test? The target!
###Code
set(train.columns) - set(test.columns)
###Output
_____no_output_____
###Markdown
Why doesn't Kaggle give you the target for the test set? Rachel Thomas, [How (and why) to create a good validation set](https://www.fast.ai/2017/11/13/validation-sets/)> One great thing about Kaggle competitions is that they force you to think about validation sets more rigorously (in order to do well). For those who are new to Kaggle, it is a platform that hosts machine learning competitions. Kaggle typically breaks the data into two sets you can download:>> 1. a **training set**, which includes the _independent variables,_ as well as the _dependent variable_ (what you are trying to predict).>> 2. a **test set**, which just has the _independent variables._ You will make predictions for the test set, which you can submit to Kaggle and get back a score of how well you did.>> This is the basic idea needed to get started with machine learning, but to do well, there is a bit more complexity to understand. **You will want to create your own training and validation sets (by splitting the Kaggle “training” data). You will just use your smaller training set (a subset of Kaggle’s training data) for building your model, and you can evaluate it on your validation set (also a subset of Kaggle’s training data) before you submit to Kaggle.**>> The most important reason for this is that Kaggle has split the test data into two sets: for the public and private leaderboards. The score you see on the public leaderboard is just for a subset of your predictions (and you don’t know which subset!). How your predictions fare on the private leaderboard won’t be revealed until the end of the competition. The reason this is important is that you could end up overfitting to the public leaderboard and you wouldn’t realize it until the very end when you did poorly on the private leaderboard. Using a good validation set can prevent this. You can check if your validation set is any good by seeing if your model has similar scores on it to compared with on the Kaggle test set. ...>> Understanding these distinctions is not just useful for Kaggle. In any predictive machine learning project, you want your model to be able to perform well on new data. 2-way train/test split is not enough Hastie, Tibshirani, and Friedman, [The Elements of Statistical Learning](http://statweb.stanford.edu/~tibs/ElemStatLearn/), Chapter 7: Model Assessment and Selection> If we are in a data-rich situation, the best approach is to randomly divide the dataset into three parts: a training set, a validation set, and a test set. The training set is used to fit the models; the validation set is used to estimate prediction error for model selection; the test set is used for assessment of the generalization error of the final chosen model. Ideally, the test set should be kept in a "vault," and be brought out only at the end of the data analysis. Suppose instead that we use the test-set repeatedly, choosing the model with the smallest test-set error. Then the test set error of the final chosen model will underestimate the true test error, sometimes substantially. Andreas Mueller and Sarah Guido, [Introduction to Machine Learning with Python](https://books.google.com/books?id=1-4lDQAAQBAJ&pg=PA270)> The distinction between the training set, validation set, and test set is fundamentally important to applying machine learning methods in practice. Any choices made based on the test set accuracy "leak" information from the test set into the model. Therefore, it is important to keep a separate test set, which is only used for the final evaluation. It is good practice to do all exploratory analysis and model selection using the combination of a training and a validation set, and reserve the test set for a final evaluation - this is even true for exploratory visualization. Strictly speaking, evaluating more than one model on the test set and choosing the better of the two will result in an overly optimistic estimate of how accurate the model is. Hadley Wickham, [R for Data Science](https://r4ds.had.co.nz/model-intro.htmlhypothesis-generation-vs.hypothesis-confirmation)> There is a pair of ideas that you must understand in order to do inference correctly:>> 1. Each observation can either be used for exploration or confirmation, not both.>> 2. You can use an observation as many times as you like for exploration, but you can only use it once for confirmation. As soon as you use an observation twice, you’ve switched from confirmation to exploration.>> This is necessary because to confirm a hypothesis you must use data independent of the data that you used to generate the hypothesis. Otherwise you will be over optimistic. There is absolutely nothing wrong with exploration, but you should never sell an exploratory analysis as a confirmatory analysis because it is fundamentally misleading.>> If you are serious about doing an confirmatory analysis, one approach is to split your data into three pieces before you begin the analysis. Sebastian Raschka, [Model Evaluation](https://sebastianraschka.com/blog/2018/model-evaluation-selection-part4.html)> Since “a picture is worth a thousand words,” I want to conclude with a figure (shown below) that summarizes my personal recommendations ...Usually, we want to do **"Model selection (hyperparameter optimization) _and_ performance estimation."** (The green box in the diagram.)Therefore, we usually do **"3-way holdout method (train/validation/test split)"** or **"cross-validation with independent test set."** What's the difference between Training, Validation, and Testing sets? Brandon Rohrer, [Training, Validation, and Testing Data Sets](https://end-to-end-machine-learning.teachable.com/blog/146320/training-validation-testing-data-sets)> The validation set is for adjusting a model's hyperparameters. The testing data set is the ultimate judge of model performance.>> Testing data is what you hold out until very last. You only run your model on it once. You don’t make any changes or adjustments to your model after that. ... Follow Along> You will want to create your own training and validation sets (by splitting the Kaggle “training” data).Do this, using the [sklearn.model_selection.train_test_split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) function:
###Code
from sklearn.model_selection import train_test_split
small_train, small_val = train_test_split(train, random_state=42)
small_train.shape, small_val.shape
###Output
_____no_output_____
###Markdown
Challenge For your assignment, you'll begin to participate in a private Kaggle challenge, just for your cohort! You will be provided with data split into 2 sets: training and test. You will create your own training and validation sets, by splitting the Kaggle "training" data, so you'll end up with 3 sets total. Begin with baselines for classification Overview We'll begin with the **majority class baseline.**[Will Koehrsen](https://twitter.com/koehrsen_will/status/1088863527778111488)> A baseline for classification can be the most common class in the training dataset.[*Data Science for Business*](https://books.google.com/books?id=4ZctAAAAQBAJ&pg=PT276), Chapter 7.3: Evaluation, Baseline Performance, and Implications for Investments in Data> For classification tasks, one good baseline is the _majority classifier,_ a naive classifier that always chooses the majority class of the training dataset (see Note: Base rate in Holdout Data and Fitting Graphs). This may seem like advice so obvious it can be passed over quickly, but it is worth spending an extra moment here. There are many cases where smart, analytical people have been tripped up in skipping over this basic comparison. For example, an analyst may see a classification accuracy of 94% from her classifier and conclude that it is doing fairly well—when in fact only 6% of the instances are positive. So, the simple majority prediction classifier also would have an accuracy of 94%. Follow Along Determine majority class
###Code
small_train.describe()
target = 'Survived'
y_train = small_train[target]
y_train.value_counts(normalize=True)
###Output
_____no_output_____
###Markdown
What if we guessed the majority class for every prediction?
###Code
y_train.mode()[0]
majority_class = y_train.mode()[0]
y_pred = [majority_class] * len(y_train)
sum(abs(y_pred - y_train))/len(y_train)
###Output
_____no_output_____
###Markdown
Use a classification metric: accuracy[Classification metrics are different from regression metrics!](https://scikit-learn.org/stable/modules/model_evaluation.html)- Don't use _regression_ metrics to evaluate _classification_ tasks.- Don't use _classification_ metrics to evaluate _regression_ tasks.[Accuracy](https://scikit-learn.org/stable/modules/model_evaluation.htmlaccuracy-score) is a common metric for classification. Accuracy is the ["proportion of correct classifications"](https://en.wikipedia.org/wiki/Confusion_matrix): the number of correct predictions divided by the total number of predictions. What is the baseline accuracy if we guessed the majority class for every prediction?
###Code
from sklearn.metrics import accuracy_score
accuracy_score(y_train, y_pred)
small_val.describe()
y_val = small_val[target]
y_pred = [majority_class] * len(y_val)
accuracy_score(y_pred, y_val)
###Output
_____no_output_____
###Markdown
Challenge In your Kaggle challenge, you'll begin with the majority class baseline. How quickly can you beat this baseline? Express and explain the intuition and interpretation of Logistic Regression OverviewTo help us get an intuition for *Logistic* Regression, let's start by trying *Linear* Regression instead, and see what happens... Follow Along Linear Regression?
###Code
small_train.describe()
# 1. Import estimator class
from sklearn.linear_model import LinearRegression
# 2. Instantiate this class
linear_reg = LinearRegression()
# 3. Arrange X feature matrices (already did y target vectors)
features = ['Pclass', 'Age', 'Fare']
X_train = small_train[features]
X_val = small_val[features]
# Impute missing values
from sklearn.impute import SimpleImputer
imputer = SimpleImputer()
X_train_imputed = imputer.fit_transform(X_train)
X_val_imputed = imputer.transform(X_val)
# 4. Fit the model
linear_reg.fit(X_train_imputed, y_train)
# 5. Apply the model to new data.
# The predictions look like this ...
linear_reg.predict(X_val_imputed)
# Get coefficients
pd.Series(linear_reg.coef_, features)
small_train.describe()
test_case = [[1, 5, 500]] # 1st class, 5-year old, Rich
linear_reg.predict(test_case)
# This kid is REALLLLLY gonna survive
###Output
_____no_output_____
###Markdown
Logistic Regression!
###Code
from sklearn.linear_model import LogisticRegression
### SCORE
log_reg = LogisticRegression(solver='lbfgs')
log_reg.fit(X_train_imputed, y_train)
print('Validation Accuracy', log_reg.score(X_val_imputed, y_val))
# The predictions look like this
log_reg.predict(X_val_imputed)
log_reg.predict_proba(X_val_imputed)
log_reg.predict(test_case)
test_case_2 = [[1, 50, 1500]]
log_reg.predict_proba(test_case_2)
log_reg.predict_proba(test_case)
# What's the math?
# Logistic coefficients
log_reg.coef_
# Linear coefficients
linear_reg.coef_
log_reg.intercept_
# The logistic sigmoid "squishing" function, implemented to accept numpy arrays
import numpy as np
def sigmoid(x):
return 1 / (1 + np.e**(-x))
# This is the "glue"/difference between linear and logistic regression
sigmoid(log_reg.intercept_ + np.dot(log_reg.coef_, np.transpose(test_case)))
###Output
_____no_output_____
###Markdown
So, clearly a more appropriate model in this situation! For more on the math, [see this Wikipedia example](https://en.wikipedia.org/wiki/Logistic_regressionProbability_of_passing_an_exam_versus_hours_of_study). Use sklearn.linear_model.LogisticRegression to fit and interpret Logistic Regression models OverviewNow that we have more intuition and interpretation of Logistic Regression, let's use it within a realistic, complete scikit-learn workflow, with more features and transformations. Follow AlongSelect these features: `['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']`(Why shouldn't we include the `Name` or `Ticket` features? What would happen here?) Fit this sequence of transformers & estimator:- [category_encoders.one_hot.OneHotEncoder](https://contrib.scikit-learn.org/categorical-encoding/onehot.html)- [sklearn.impute.SimpleImputer](https://scikit-learn.org/stable/modules/generated/sklearn.impute.SimpleImputer.html)- [sklearn.preprocessing.StandardScaler](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html)- [sklearn.linear_model.LogisticRegressionCV](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegressionCV.html)Get validation accuracy.
###Code
import category_encoders as ce
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegressionCV
from sklearn.preprocessing import StandardScaler
target = 'Survived'
features = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']
X_train = small_train[features]
y_train = small_train[target]
X_val = small_val[features]
y_val = small_val[target]
encoder = ce.OneHotEncoder(use_cat_names=True)
X_train_encoded = encoder.fit_transform(X_train)
X_val_encoded = encoder.transform(X_val)
imputer = SimpleImputer()
X_train_imputed = imputer.fit_transform(X_train_encoded)
X_val_imputed = imputer.fit_transform(X_val_encoded)
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train_imputed)
X_val_scaled = scaler.transform(X_val_imputed)
model = LogisticRegressionCV(cv=5, n_jobs=-1, random_state=42)
model.fit(X_train_scaled, y_train)
model.score(X_val_scaled, y_val)
###Output
_____no_output_____
###Markdown
Plot coefficients:
###Code
%matplotlib inline
coefficients = pd.Series(model.coef_[0], X_train_encoded.columns)
coefficients.sort_values().plot.barh()
X_train['Embarked'].value_counts()
###Output
_____no_output_____
###Markdown
Generate [Kaggle](https://www.kaggle.com/c/titanic) submission:
###Code
X_test = test[features]
X_test_encoded = encoder.transform(X_test)
X_test_imputed = imputer.transform(X_test_encoded)
X_test_scaled = scaler.transform(X_test_imputed)
y_pred = model.predict(X_test_scaled)
print(y_pred)
submission = test[['PassengerId']].copy()
submission['Survived'] = y_pred
submission.describe()
###Output
_____no_output_____ |
soluciones/jp.mallarino50/tarea3y4/tarea3y4.ipynb | ###Markdown
Tarea 3: Encuentre la regresiónUd recibe unos datos $x$ y $y$ cómo se muestran a continuación. Ud debe responder cuatro preguntas a partir de estos datos. Suponga que ud tiene un modelo tal que $y=f(x)$ más aún desconoce $f$.
###Code
df = pd.read_pickle('ex1.gz')
sns.scatterplot(x='x',y='y',data=df)
plt.show()
df
###Output
_____no_output_____
###Markdown
(A) Pendiente e interceptoDetermine la pendiente de los datos en el intervalo $[0,1.5]$ y el valor del intercepto con el eje $y$. Es decir, $f(0)=?$. ¿Cuál es el valor de $r^2$?
###Code
df_filtrado = df.loc[df['x']<=1.5, :]
# insertamos una columna de unos para calcular el intercepto
df_filtrado.insert(0, 'x0', 1)
# Método SciPy
# ver -> https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.linregress.html
sol_3a = sp.stats.linregress(x=df_filtrado['x'], y=df_filtrado['y'])
print(f'Pendiente: {sol_3a.slope:.8f}')
print(f'Intercepto: {sol_3a.intercept:.8f}')
print(f'r^2: {sol_3a.rvalue**2:.6f}')
# calle 110 no 9 25 piso 5 oficina 512: jimmy guerrero
# Método Matricial: Sistemas Lineales de la clase
# => Vamos a crear una funcion que hace la regresion -> nos simplifica la vida
def regresion_lineal(data):
assert isinstance(data, pd.DataFrame), 'FEATURE MISSING: `data` must be a Pandas Dataframe'
_data = data.copy()
_cols = list(map(lambda x: ''.join(str(x).split()).lower(), data.columns))
assert 'y' in _cols, 'ERROR: \'y\' not in the columns'
_data.columns = _cols
# creamos las matrices X y Y: X*beta = Y
_Y = np.matrix(_data.loc[:, ['y']].to_numpy(dtype=np.float64))
_cols.pop(_cols.index('y'))
_X = np.matrix(_data.loc[:, _cols].to_numpy(dtype=np.float64))
# calculamos beta estimado
_beta = np.linalg.inv(_X.T*_X)*_X.T*_Y
# calculamos las predicciones estimadas y "reescribimos" observaciones
_Y_pred = np.array(_X*_beta).flatten()
_Y_obs = np.array(_Y).flatten()
_Y_barra = _Y_obs.mean()
_r2 = 1-(np.linalg.norm(_Y_pred - _Y_obs)**2)/(np.linalg.norm(_Y_obs - _Y_barra)**2)
# organizamos el diccionario que vamos a retornar
_X = np.array(_X)
_X = _X.flatten() if 1 in _X.shape else _X
_beta = np.array(_beta).flatten()
_resultado = dict(zip(_cols, _beta), **{
'y_pred': _Y_pred, 'r2': _r2,
'beta': _beta, 'vars': _cols
})
return _resultado
sol_3a_alt = regresion_lineal(df_filtrado)
print(f'Pendiente: {sol_3a_alt["x"]:.8f}')
print(f'Intercepto: {sol_3a_alt["x0"]:.8f}')
print(f'r^2: {sol_3a_alt["r2"]:.6f}')
sns.scatterplot(x='x', y='y', data=df_filtrado)
plt.plot(df_filtrado['x'], sol_3a_alt['y_pred'], 'r--')
plt.show()
###Output
Pendiente: 0.81638696
Intercepto: 0.18270691
r^2: 0.931642
###Markdown
(B) Regresión polinomialSuponga que quiere realizar la siguiente regresión polinomial,$$y=\beta_1+\beta_2x+\beta_2x^2+\beta_2x^3+\beta_2x^4+\beta_2x^5.$$Plantee la función de costo que le permita calcular los coeficientes y calcule $\beta_1$, $\beta_2$, $\beta_3$, $\beta_4$, y $\beta_5$. ¿Cuál es el $r^2$?Calcule $f(0)$ y compare con los resultados anteriores
###Code
# => Vamos a crear una funcion que prepara los datos polinomiales -> nos simplifica la vida
def preparar_datos(data, /, poly=1):
assert isinstance(data, pd.DataFrame), 'FEATURE MISSING: `data` must be a Pandas Dataframe'
_data = data.copy()
_cols = list(map(lambda x: ''.join(str(x).split()).lower(), data.columns))
assert 'y' in _cols, 'ERROR: \'y\' not in the columns'
_data.columns = _cols
_cols.pop(_cols.index('y'))
assert isinstance(poly, int) and poly>0
# 1. funciones auxiliares que me dan el nombre y los valores de la columna
# nombre_columna = lambda _cs, _n: '*'.join(((_cs+' ')*_n).split())
nombre_columna = lambda _cs, _n: str(_cs)+'^'+str(_n)
datos_columna = lambda _dc, _n: np.power(_dc, _n)
# 2. Agregar los terminos polinomiales columna a columna (no hace las combinaciones)
# NOTA: si queremos las combinaciones, tenemos que usar `itertools`
# https://docs.python.org/3/library/itertools.html#itertools.combinations_with_replacement
_data.insert(0, '_intercepto', 1)
_new_data = _data.loc[:, ['_intercepto', 'y']]
for _c in reversed(_cols):
for _p in range(poly, 0, -1):
_new_data.insert(
1,
nombre_columna(_c, _p),
datos_columna(_data[_c].values, _p)
)
return _new_data
# => Vamos a crear una funcion de costo que me toma los datos en formato DataFrame y una funcion
# NOTA: esa funcion puede ser por ejemplo la sigmoidal
def FuncCosto(beta, data, func):
Y = data['y'].values
deltaY = func(x=data, params=beta) - Y
# vamos a usar el estimador de distancia cuadratica media
return np.dot(deltaY, deltaY)/len(deltaY)
# => Escribo las funciones
def func_linear(x, params):
X = x.loc[:, x.columns != 'y'].values
return np.dot(X, params)
##### dejo como ejemplo la funcion sigmoidal, pero vamos a usar la lineal
def func_sigmoidal(x, params):
X = x.loc[:, x.columns != 'y'].values
return 1/(1+np.exp(-np.dot(X, params)))
new_df = preparar_datos(df, poly=5)
new_df
sol_3b = sp.optimize.minimize(
fun=FuncCosto,
x0=np.zeros(new_df.shape[1]-1),
args = (new_df, func_linear),
tol=1e-10
)
sol_3b
# graficamos:
X_graph = pd.DataFrame({
'x': np.linspace(df['x'].min(), df['x'].max(), 1000),
'y': 0
})
X_graph['y'] = func_linear(
x=preparar_datos(X_graph, poly=5),
params=sol_3b['x']
)
sns.scatterplot(x='x', y='y', data=df)
plt.plot(X_graph['x'], X_graph['y'], 'r--')
plt.show()
###Output
_____no_output_____
###Markdown
(C) Regresión polinomial exactaResulta, que cuando se quiere hacer alguna regresión polinomial esta se puede hacer de forma exacta. ¿Cómo? Suponga que ud va a considerar que su problema en lugar de tener $1$ variable ($x$) tiene $n+1$, siendo $n$ el orden del polinomio a ajustar. Es decir, sus nuevas variables van a ser $\{x_0,\,x_1,\,x_2,\,x_3,\dots,\,x_n\}$ definiendo $x_j=x^j$. Así pues, siguiendo el mismo procedimiento para la regresión lineal multidimensional que realizamos para el ejercicio de datos inmobiliarios, puede encontrar los valores de los coeficientes $\beta_1$, $\beta_2$, $\beta_3$, $\beta_4$, y $\beta_5$. Encuentre estos valores y compare con los resultados en la sección **(B)**.Calcule $f(0)$ y compare con los resultados anteriores.> Si ud se pregunta si esto es posible la respuesta es sí. Inclusive, esto se puede extender a cualquier a cualquier conjunto de funciones, tal que $x_j=f_j(x)$, que represente un conjunto "linealmente independiente" (¡Me estoy adelantando a *Fourier*!). Para quienes quieran explorar algunas curiosidades matemáticas, cuando $n+1$ es igual al número de puntos o valores de $x$ (y todos diferentes) la matriz es siempre invertible y resulta ser la inversa de una matriz de Vandermonde.
###Code
# Siendo que hicimos muchas cosas anteriormente, todo va a ser mucho mas facil ahora...
# 1. datos preparados
new_df = preparar_datos(df, poly=5)
# 2. hacemos la "regresion lineal" exacta
sol_3c = regresion_lineal(new_df)
# elimino, porque sobran, `graf X` y `graf Y`
sol_3c.pop('graf X', None)
sol_3c.pop('graf Y', None)
print(f'Variables: {sol_3c["vars"]}')
print(f' Pesos: {sol_3c["beta"]}')
print(f'r^2: {sol_3c["r2"]:.6f}')
# ahora graficamos...
# graficamos:
X_graph = pd.DataFrame({
'x': np.linspace(df['x'].min(), df['x'].max(), 1000),
'y': 0
})
X_graph['y'] = func_linear(
x=preparar_datos(X_graph, poly=5),
params=sol_3c['beta']
)
sns.scatterplot(x='x', y='y', data=df)
plt.plot(X_graph['x'], X_graph['y'], 'r--')
plt.show()
###Output
_____no_output_____
###Markdown
(D) Regresión a un modelo teóricoSuponga que su modelo teórico es el siguiente:$$y=\frac{a}{\left[(x-b)^2+c\right]^\gamma}.$$Halle $a$, $b$, $c$ y $\gamma$.Calcule $f(0)$ y compare con los resultados anteriores
###Code
# Siendo que hicimos muchas cosas anteriormente, todo va a ser mucho mas facil ahora...
# lo único que tengo que hacer es escribir una funcion especial y hacer minimizacion/optimizacion
# => Escribo las funciones
def func_especial(x, params):
a, b, c, gamma = params
X = x['x'].values
return a/np.power((X-b)**2+c, gamma)
# => Hago la optimizacion
sol_3d = sp.optimize.minimize(
fun=FuncCosto,
x0=np.array([0,0,0,1]), ## a, b, c, gamma
args = (df, func_especial),
tol=1e-10
)
sol_3d
# ahora graficamos...
# graficamos:
X_graph = pd.DataFrame({'x': np.linspace(df['x'].min(), df['x'].max(), 1000)})
X_graph['y'] = func_especial(x=X_graph, params=sol_3d['x'])
sns.scatterplot(x='x', y='y', data=df)
plt.plot(X_graph['x'], X_graph['y'], 'r--')
plt.show()
###Output
_____no_output_____
###Markdown
Tarea 4Con base a los métodos vistos en clase resuelva las siguientes dos preguntas (A) Integrales* $\int_{0}^{1}x^{-1/2}\,\text{d}x$* $\int_{0}^{\infty}e^{-x}\ln{x}\,\text{d}x$* $\int_{0}^{\infty}\frac{\sin{x}}{x}\,\text{d}x$
###Code
### pueden usar cuadraturas si quieren!
# -> https://docs.scipy.org/doc/scipy/reference/generated/scipy.special.roots_chebyt.html
# para la primera recordemos que debemos tomar un epsilon mayor a cero muy pequeño
epsilon = 1e-6
# vamos a tomar el intervalo entre 0 y 1 no uniforme tal que distancia entre uno y otro disminuya a medida q nos acercamos a 0
alpha = 5/2 # es una potencia muy conveniente que me define como decrece dx vs la altura y vs el area. Mantiene el error bajito
N = int(1/(epsilon**(1/alpha))) + 1 # numero de puntos queda determinado por epsilon
print(f'N={N}')
x = np.linspace(0, 1, N)**alpha
y = np.power(x, -1/2)
y[0] = np.power(x[1]/2, -1/2)
print('Integral exacta: 2')
print('Integral:',sp.integrate.simpson(y, x))
# para la segunda... usamos los polinomios generalizados de laguerre
N = 10
z, w = sp.special.roots_genlaguerre(n=N, alpha=0)
def f(x):
return np.log(x)
print('Valor exacto: -Euler Mascheroni=-0.577215664901...')
print('Integral:', np.dot(f(z), w))
###Output
Valor exacto: -Euler Mascheroni=-0.577215664901...
Integral: -0.5147180612524798
###Markdown
-> Sin embargo, uno puede mejorar esta integral con una integral por partes$$\begin{eqnarray*}\int_{0}^{\infty}e^{-x}\ln{x}\,\text{d}x = \overbrace{\left(x\ln{x}-x\right)e^{-x}\Big\vert_{0}^{\infty}}^{\text{cancels}}+\int_{0}^{\infty}\left(x\ln{x}-x\right)e^{-x}\,\text{d}x = \int_{0}^{\infty}\left(x\ln{x}-x\right)e^{-x}\,\text{d}x\end{eqnarray*}$$
###Code
def f1(x):
return x*(np.log(x)-1)
print('Integral:', np.dot(f1(z), w))
###Output
Integral: -0.5798107635615206
###Markdown
y si hago ooootra integración por partes...$$\int_{0}^{\infty}e^{-x}\ln{x}\,\text{d}x = \overbrace{\left(\frac{x^2}{2}\ln{x}-\frac{3}{4}x^2\right)e^{-x}\Big\vert_{0}^{\infty}}^{\text{cancels}}+\int_{0}^{\infty}\left(\frac{x^2}{2}\ln{x}-\frac{3}{4}x^2\right)e^{-x}\,\text{d}x = \int_{0}^{\infty}\left(\frac{x^2}{2}\ln{x}-\frac{3}{4}x^2\right)e^{-x}\,\text{d}x$$
###Code
def f2(x):
return 0.5*(x**2)*(np.log(x)-1.5)
print('Integral:', np.dot(f2(z), w))
# por ultimo, la de sinc(x) o sin(x)/x, tomemos los periodos de 2pi, y como es una funcion suave se puede usar simpson o trapecio
# pero... hay que mirar lo asintotico... aqui es mas importante ver lo asintotico
import math
x_periodo = np.linspace(0,2*np.pi,20)
x0 = lambda j:2*np.pi*j # donde j es el trozo de periodicidad 2pi que estamos analizando
def sinc(x):
_sinc = lambda y: math.sin(y)/y if y!=0 else 1
return np.array(list(map(_sinc, x)))
# Podemos usar simpson para integrar en cada periodo la contribución a la integral
Nj = 10 # vamos a integrar 10 periodos
sum_integral = 0
for j in range(Nj):
x = x0(j) + x_periodo
sum_integral += sp.integrate.simpson(sinc(x), x)
print('Valor exacto: pi/2 = 1.570796326794...')
print('Integral:',sum_integral)
# si queremos ver como se aproxima, veamos, tomemos Nj como si fuera una secuencia de fibonacci (por que? se los dejo a uds)
def fib(n):
_f = []
for _n in range(n):
if _n <= 1:
_f.append(1)
else:
_f.append(_f[-2]+_f[-1])
return _f.copy()
Nj = fib(26)[1:]
partial_sum = []
for nj in Nj: #-> se puede optimizar y se los dejo a uds!
sum_integral = 0
print('progreso:', nj)
for j in range(nj):
x = x0(j) + x_periodo
sum_integral += sp.integrate.simpson(sinc(x), x)
partial_sum.append(sum_integral)
plt.hlines(np.pi/2, min(Nj), max(Nj), 'blue')
ax = plt.gca()
ax.scatter(Nj ,partial_sum , c='red', alpha=0.7, edgecolors='none')
ax.set_xscale('log')
###Output
_____no_output_____
###Markdown
(B) FourierCalcule la transformada rápida de Fourier para la función de la **Tarea 3 (D)** en el intervalo $[0,4]$ ($k$ máximo $2\pi n/L$ para $n=25$). Ajuste la transformada de Fourier para los datos de la **Tarea 3** usando el método de regresión exacto de la **Tarea 3 (C)** y compare con el anterior resultado. Para ambos ejercicios haga una interpolación y grafique para comparar.
###Code
# literalmente tomen el ejemplo de scipy: https://docs.scipy.org/doc/scipy/reference/tutorial/fft.html#d-discrete-fourier-transforms
# o... usen las funciones que hicimos en clase
from scipy.fft import fft, rfft, fftfreq
N = 200
x = np.linspace(0, 4, N, endpoint=False)
dx = 4/N
y = func_especial(x=pd.DataFrame({'x': x}), params=sol_3d.x)
plt.plot(x, y, 'r--')
# aplicamos la de la clase
def FFT(x, y, a, L, /, Nf=None):
detNf = lambda n: (n+1)//2 if n%2==1 else n//2
Nf = detNf(len(x)) if Nf is None else Nf
assert all(x>=a) and all(x<a+L), f'`x` fuera del interfalo [{a}, {a+L})'
k = lambda j: 2*j*np.pi/L
def a_j(j):
new_y = y*np.cos(k(j)*x)/L
if j > 0:
new_y = new_y * 2
return sp.integrate.simpson(new_y, x)
def b_j(j):
new_y = y*np.sin(k(j)*x)/L
if j > 0:
new_y = new_y * 2
return sp.integrate.simpson(new_y, x)
Aj = np.array([a_j(j) for j in range(Nf)])
Bj = np.array([b_j(j) for j in range(Nf)])
Cj = np.array([(Aj[j]-Bj[j]*1J)*(0.5 if j>0 else 1) for j in range(Nf)])
return {
'Nf': Nf, 'a': a, 'L': L,
'Aj': Aj, 'Bj': Bj, 'Cj': Cj
}
def invFFT(fft, /, x=None, frec=0, N=None):
# fft is the output of FFT
Nf=fft['Nf']
a=fft['a']
L=fft['L']
Aj=fft['Aj']
Bj=fft['Bj']
if N is not None:
Nf = min(N,Nf)
x_tilde = np.linspace(
a-frec*L,
a+(frec+1)*L,
1000*(2*frec+1),
endpoint=False
) if x is None else x.copy()
k = lambda j: 2*j*np.pi/L
y_tilde = np.sum([
Aj[j] * np.cos(k(j)*x_tilde) + Bj[j] * np.sin(k(j)*x_tilde) for j in range(Nf)
], axis=0)
return x_tilde, y_tilde
sol_4b_1 = FFT(x, y, 0, 4)
graf_x, graf_y = invFFT(sol_4b_1, frec=0, N=10)
plt.plot(graf_x, graf_y, 'b-')
plt.plot(x, y, 'r--')
# usando SciPy -> para comparar!
yf = rfft(y)/N
yf[:10]
# Fijense que a pesar de lo parecidos, hay diferencias... a que creen uds que se debe?
sol_4b_1['Cj'][:10]
###Output
_____no_output_____
###Markdown
Por ultimo, para la "regresion" tenemos que hacer un ligero ajuste en una sola funcionRepitamos 3c
###Code
# => Vamos a crear una funcion que prepara los datos polinomiales -> nos simplifica la vida
def preparar_datos_fourier(data, a, L, /, Nf=None):
assert isinstance(data, pd.DataFrame), 'FEATURE MISSING: `data` must be a Pandas Dataframe'
_data = data.copy()
_cols = list(map(lambda x: ''.join(str(x).split()).lower(), data.columns))
_data.columns = _cols
assert {'x', 'y'}.issubset(set(_cols)), 'ERROR: \'x\' or \'y\' not in the columns'
assert L > 0, 'ERROR: `L` must be a nonzero distance'
# 1. la funcion que me calcula el numero de onda
k = lambda j: 2*j*np.pi/L
# 2. funciones auxiliares que dan el valor de la columna
def a_j(j, x):
return np.cos(k(j)*x)
def b_j(j, x):
return np.sin(k(j)*x)
# 3. Agregar los terminos polinomiales columna a columna (no hace las combinaciones)
# NOTA: si queremos las combinaciones, tenemos que usar `itertools`
# https://docs.python.org/3/library/itertools.html#itertools.combinations_with_replacement
_data.insert(0, 'A_0', 1)
_x = _data['x'].values
_new_data = _data.loc[:, ['A_0', 'y']]
for _p in range(Nf, 0, -1):
_new_data.insert(1, 'B_'+str(_p), b_j(_p, _x))
_new_data.insert(1, 'A_'+str(_p), a_j(_p, _x))
return _new_data
# Siendo que hicimos muchas cosas anteriormente, todo va a ser mucho mas facil ahora...
# 1. datos preparados
new_df = preparar_datos_fourier(df, 0, 4, Nf=6)
# 2. hacemos la "regresion lineal" exacta
sol_4b_2 = regresion_lineal(new_df)
# elimino, porque sobran, `graf X` y `graf Y`
sol_4b_2.pop('graf X', None)
sol_4b_2.pop('graf Y', None)
from pprint import pformat
Aj_seleccion = list(map(lambda s: s.startswith('a_'), sol_4b_2['vars']))
Aj = sol_4b_2["beta"][Aj_seleccion]
Bj_seleccion = list(map(lambda s: s.startswith('b_'), sol_4b_2['vars']))
Bj = sol_4b_2["beta"][Bj_seleccion]
print(f'Aj (integracion): {sol_4b_1["Aj"][:10]}')
print(f'Aj (regresion): {Aj}')
print(f'Bj (integracion): {sol_4b_1["Bj"][:10]}')
print(f'Bj (regresion): {Bj}')
print(f'r^2: {sol_4b_2["r2"]:.6f}')
# ahora graficamos...
# graficamos:
X_graph = pd.DataFrame({
'x': np.linspace(df['x'].min(), df['x'].max(), 1000),
'y': 0
})
X_graph['y'] = func_linear(
x=preparar_datos_fourier(X_graph, 0, 4, Nf=6),
params=sol_4b_2['beta']
)
sns.scatterplot(x='x', y='y', data=df)
plt.plot(X_graph['x'], X_graph['y'], 'r--')
plt.show()
###Output
_____no_output_____ |
lectures/01_intro/code/learn-pandas/lessons/Cookbook - Merge.ipynb | ###Markdown
Merge I have two dataframes that have dates as their index. The problem is that one of the dataframes has a timestamp and this is preventing me from adding the dataframes together. How can I match up the time stamps?
###Code
df1 = pd.DataFrame({'col1':[pd.Timestamp('20130102000030'),
pd.Timestamp('2013-01-03 00:00:30'),
pd.Timestamp('1/4/2013 000030')],
'col2':[1,10,18]
})
df1
df1 = df1.set_index('col1')
df1
d = {'col2':[22,10,113]}
i = [pd.Timestamp('20130102'),
pd.Timestamp('2013-01-03'),
pd.Timestamp('1/4/2013')]
df2 = pd.DataFrame(data=d, index = i)
df2.index.name = 'col1'
df2
# If we try to add the data frames together, we do not get the results we want.
df2+df1
# Make the index of df2 the same as the index of df1
# Fill the missing values with previous known value
#
#2013-01-02 00:00:00 => 22
#2013-01-02 00:00:30 => 22
#2013-01-03 00:00:00 => 10
#2013-01-03 00:00:00 => 10
#2013-01-04 00:00:00 => 113
#2013-01-04 00:00:00 => 113
df2.reindex(df1.index, method='pad')
# Now we can add them
df2 = df2.reindex(df1.index, method='pad')
df1+df2
###Output
_____no_output_____
###Markdown
How do I add two dataframes together by row?
###Code
df1 = pd.DataFrame([1,2,3])
df1
df2 = pd.DataFrame([4,5,6])
df2
pd.concat([df1,df2])
###Output
_____no_output_____
###Markdown
How do I join two data frames by index?
###Code
d = {'col1':[22,10,113]}
i = [pd.Timestamp('1/1/2013'),
pd.Timestamp('1/2/2013'),
pd.Timestamp('1/3/2013')]
df1 = pd.DataFrame(data=d, index = i)
df1
d = {'col2':[5,5]}
i = [pd.Timestamp('1/1/2013'),
pd.Timestamp('1/3/2013')]
df2 = pd.DataFrame(data=d, index = i)
df2
df1.merge(df2, left_index=True, right_index=True, how='left')
###Output
_____no_output_____ |
ComplementaryScripts/5. Close Reaction Balances and Implement N-Assimilation.ipynb | ###Markdown
Validation and correction of the model's growth rate This will be achieved by using uptake/ secretion rates from literature and experiments Import
###Code
import cameo
from cobra import Model, Reaction, Metabolite
from cobra.io import read_sbml_model
from cobra.io import save_json_model
from cameo.flux_analysis.simulation import pfba
import cobra.test
import os
from Functions_Modules.curation_tools import *
relative_directory = os.getcwd()
filename = relative_directory + '/Reconstructions/MethylococcusModel7.xml'
model = cameo.load_model(filename)
###Output
_____no_output_____
###Markdown
Test for functionality since the import
###Code
model.objective = model.reactions.get_by_id('BIOMASS_REACTION')
solution = show_uptake_excretion(model,model.reactions.get_by_id('BIOMASS_REACTION'))
model.objective.expression
len(model.solver.constraints)
[(k,v) for k,v in solution.fluxes.items() if v >= 0.01]
###Output
_____no_output_____
###Markdown
Search metabolic flexibility when the Methane Uptake Rate is fixed
###Code
# fva,completely_blocked = find_blocked_reactions(model)
from cameo import flux_variability_analysis
fva_result = flux_variability_analysis(model, reactions=model.reactions)
model.objective.expression
fva_result.data_frame.loc['o2_in']
fva_result.data_frame.loc['nh3_in']
fva_result.data_frame.loc['co2_out']
fva_result.data_frame.loc['so4_in']
fva_result.data_frame.loc['sMMO_c']
fva_result.data_frame.loc['pMMO_im']
solution.fluxes['pMMO_im']
solution.fluxes['sMMO_c']
###Output
_____no_output_____
###Markdown
Show all reactions that require oxygen and their flux variability. (in above's condition)
###Code
df = fva_result.data_frame.loc[[r.id for r in model.reactions if model.metabolites.get_by_id('o2_c') in r.reactants]]
df.loc[(df != 0).any(1)]
###Output
_____no_output_____
###Markdown
For some reason the model predicts a O2/CH4 uptake ratio about 1.5.In experimental measurements by Leak&Dalton this ratio ranges from 1.41 to 1.6 in various conditions.The metabolic model constructed by de la Torre et al has this ratio at 1.11 (but this is at a much higher growth rate = 0.269.)
###Code
solution = show_uptake_excretion(model,model.reactions.get_by_id('BIOMASS_REACTION'))
cameo.pfba(model).data_frame.loc[[r.id for r in model.exchanges]].query("abs(flux) > 0")
###Output
_____no_output_____
###Markdown
Fix the growth rate to 0.1 and check for the minimal Oxygen uptake rate
###Code
model.solver = "cplex"
model.reactions.BIOMASS_REACTION.lower_bound = 0.1
model.reactions.ch4_in.lower_bound = 1
model.objective = model.reactions.o2_in
model.objective.direction = 'min'
solution = model.solve()
solution.data_frame
cameo.pfba(model).data_frame.loc[[r.id for r in model.exchanges]].query("abs(flux) > 0")
###Output
_____no_output_____
###Markdown
The model can predict uptake rates for oxygen that can match the experimental data, however the ratio to CH4 uptake remains unchanged. Identify unbalanced essential reactions:
###Code
model.reactions.BIOMASS_REACTION.lower_bound = 0.1
model.reactions.ch4_in.lower_bound = 1
model.objective = model.reactions.o2_in
model.objective.direction = 'min'
essential_reactions = model.essential_reactions()
unbalanced_list = find_unbalanced_reactions(model)
check_and_fix = [x.id for x in essential_reactions if x.id in unbalanced_list[0]]
for x in check_and_fix:
print x,unbalanced_list[0][x]
###Output
There are 216 unbalanced reactions and 6 metabolites with a faulty syntax
3HAD180_c {'C': 0, 'H': 0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
3OAS180_c {'C': 0, 'H': -1.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': -1.0, 'R': -1.0}
3OAR180_c {'C': 0, 'H': 0, 'charge': 1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
ACOATA_c {'C': 0, 'H': 1.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': 1.0, 'R': 1.0}
KAS14_c {'C': 0, 'H': -1.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': -1.0, 'R': -1.0}
3OAS160_c {'C': 0, 'H': -1.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': -1.0, 'R': -1.0}
3OAS140_c {'C': 12.0, 'H': 22.0, 'charge': 0, 'O': 1.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
3OAS120_c {'C': 0, 'H': -1.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': -1.0, 'R': -1.0}
3OAS100_c {'C': 0, 'H': -1.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': -1.0, 'R': -1.0}
3OAS80_c {'C': 0, 'H': -1.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': -1.0, 'R': -1.0}
3OAS60_c {'C': 0, 'H': -1.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': -1.0, 'R': -1.0}
MCOATA_c {'C': 0, 'H': 2.0, 'charge': 1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 1.0, 'R': 1.0}
RNDR2_c {'C': 0, 'H': 1.0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
RNDR1_c {'C': 0, 'H': 2.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
PRFGS_c {'C': 0, 'H': -1.0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
TRDR_c {'C': 0, 'H': -2.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
RNDR3_c {'C': 0, 'H': 2.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
PSSA_MC_c {'C': -1.0, 'H': -1.0, 'charge': 0, 'O': 7.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
DHDPRy_c {'C': 0, 'H': -2.0, 'charge': 0, 'O': -1.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
U23GAAT_c {'C': 0, 'H': -2.0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': -1.0, 'R': -1.0}
UAGAAT_c {'C': 0, 'H': -1.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': -1.0, 'R': -1.0}
UDCPDP_c {'C': 0, 'H': 0, 'charge': 2.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
PAPPT3_c {'C': 0, 'H': 0, 'charge': -2.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
APG3PAT_MC_c {'C': -1.0, 'H': -1.0, 'charge': 0, 'O': -3.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
G3PAT_MC_c {'C': 1.0, 'H': -1.0, 'charge': 1.0, 'O': 1.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
PHCYT_MC_c {'C': 1.0, 'H': 0, 'charge': -2.0, 'O': -7.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
PSD_MC_c {'C': 0, 'H': -1.0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
PMETM2_MC_c {'C': 2.0, 'H': 7.0, 'charge': 2.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
PMETM_MC_c {'C': 2.0, 'H': 7.0, 'charge': 2.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
PGSA_MC_c {'C': -17.0, 'H': -33.0, 'charge': 0, 'O': 7.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
PMPS_c {'C': 0, 'H': 1.0, 'charge': 1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
FMNAT_c {'C': -17.0, 'H': -19.0, 'charge': 2.0, 'O': -9.0, 'N': -4.0, 'P': -1.0, 'S': 0, 'R': 0}
RBFK_c {'C': -17.0, 'H': -19.0, 'charge': 2.0, 'O': -9.0, 'N': -4.0, 'P': -1.0, 'S': 0, 'R': 0}
MNXR18583_c {'C': -4.0, 'H': -2.0, 'charge': 0, 'O': -3.0, 'N': -2.0, 'P': 0, 'S': 0, 'R': 0}
PPNCL2_c {'C': 0, 'H': -1.0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
GLUTRR_c {'C': 5.0, 'H': 7.0, 'charge': 0, 'O': 3.0, 'N': 1.0, 'P': 0, 'S': 0, 'R': 0}
GLUTRS_c {'C': -5.0, 'H': -7.0, 'charge': 0, 'O': -3.0, 'N': -1.0, 'P': 0, 'S': 0, 'R': 0}
PAPR_c {'C': 0, 'H': 2.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
DES_9_c {'C': 0, 'H': 0, 'charge': -2.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
OCT_GAPFILLING_c {'C': 0, 'H': -1.0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
DES_9_2_c {'C': 0, 'H': 0, 'charge': 2.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
FACOAE141_c {'C': 0, 'H': 0, 'charge': -4.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
FA180ACPHi_c {'C': 0, 'H': -1.0, 'charge': 1.0, 'O': 0, 'N': 0, 'P': 0, 'S': -1.0, 'R': -1.0}
FA160ACPHi_c {'C': 0, 'H': -1.0, 'charge': 1.0, 'O': 0, 'N': 0, 'P': 0, 'S': -1.0, 'R': -1.0}
FA140ACPHi_c {'C': 0, 'H': -1.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': -1.0, 'R': -1.0}
3OAR150_GAPFILLING_c {'C': 15.0, 'H': 10.0, 'charge': 0, 'O': 15.0, 'N': 0, 'P': 0, 'S': -1.0, 'R': -1.0}
3OAR170_GAPFILLING_c {'C': 0, 'H': -1.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': -1.0, 'R': -1.0}
FA150ACPHi_c {'C': 0, 'H': -2.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': -1.0, 'R': -1.0}
FA170ACPHi_c {'C': 0, 'H': -2.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': -1.0, 'R': -1.0}
POR_syn_c {'C': 0, 'H': -2.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
CFACPOA2H_Lumped_c {'C': 0, 'H': -2.0, 'charge': -1.0, 'O': 1.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
FACOAEcyc170_c {'C': -42.0, 'H': -64.0, 'charge': 9.0, 'O': -33.0, 'N': -14.0, 'P': -6.0, 'S': -2.0, 'R': 0}
MECDPDH5_c {'C': 0, 'H': 0, 'charge': -2.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
PFOR_c {'C': 0, 'H': -2.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
PCOATA_c {'C': 0, 'H': 1.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': 1.0, 'R': 1.0}
DM_4hba_c {'C': -7.0, 'H': -8.0, 'charge': 0, 'O': -2.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MC_Average_FattyAcid_c {'C': -0.040999999999999426, 'H': 0.03699999999999959, 'charge': -0.09299999999999997, 'O': 0.0020000000000002308, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MC_AFAA_c {'C': 0, 'H': 1.0, 'charge': 0, 'O': 1.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
PPTGS_MC_c {'C': -40.0, 'H': -62.0, 'charge': 2.0, 'O': -21.0, 'N': -8.0, 'P': 0, 'S': 0, 'R': 0}
HEPK1_GAPFILLING_c {'C': 0, 'H': 0, 'charge': 2.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
HEPK2_GAPFILLING_c {'C': 0, 'H': 0, 'charge': 2.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MOAT3C_GAPFILLING_c {'C': 0, 'H': 0, 'charge': 1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
EDTXS1_GAPFILLING_c {'C': 12.0, 'H': 22.0, 'charge': 6.0, 'O': 1.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
EDTXS2_GAPFILLING_c {'C': 0, 'H': -1.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': -1.0, 'R': -1.0}
COLIPAabctex_GAPFILLING_c {'C': -176.0, 'H': -303.0, 'charge': 0, 'O': -100.0, 'N': -2.0, 'P': -4.0, 'S': 0, 'R': 0}
###Markdown
Fix faulty metabolites:
###Code
met = model.metabolites.get_by_id('3hoctaACP_c')
met.charge = -1
from cameo import load_model
model2 = load_model("iJO1366")
unbalanced_list2 = find_unbalanced_reactions(model2)
for x in model.metabolites:
if x.id in model2.metabolites:
print x,x.charge,x.formula
if x.formula != model2.metabolites.get_by_id(x.id).formula:
x.formula = str(model2.metabolites.get_by_id(x.id).formula)
print x,x.charge,x.formula,'\n'
#x.charge = model2.metabolites.get_by_id(x.id).charge
essential_reactions = model.essential_reactions()
unbalanced_list = find_unbalanced_reactions(model)
check_and_fix = [x.id for x in essential_reactions if x.id in unbalanced_list[0]]
for x in check_and_fix:
print x,unbalanced_list[0][x]
#{str(k.id):v for (k,v) in model2.reactions.PRAIS.metabolites.iteritems()}
for x in check_and_fix:
if x[:-2] in model2.reactions:
id_dict = {str(k.id):v for (k,v) in model2.reactions.get_by_id(x[:-2]).metabolites.iteritems()}
try:
translated_dict = {model.metabolites.get_by_id(k):v for (k,v) in id_dict.iteritems()}
except:
translated_dict = model.reactions.get_by_id(x).metabolites
model.reactions.get_by_id(x).add_metabolites(translated_dict, combine=False)
###Output
_____no_output_____
###Markdown
Close the balances!
###Code
# def formula_sum(dict_list,classified):
# sum_dict = {}
# formula = str()
# for d in dict_list:
# for key,value in d.iteritems():
# if key not in sum_dict.keys():
# sum_dict[key]=abs(value)
# elif key not in classified:
# sum_dict[key]+=abs(value)
# for e in 'CHONPRS':
# if e in sum_dict.keys():
# formula += e+str(sum_dict[e])
# return sum_dict,formula
def superprint(rxn_id):
rxn = model.reactions.get_by_id(rxn_id)
print rxn.reaction,'\n'
for x in rxn.reactants:
print x.id,x.formula,x.charge
print '<=>'
for x in rxn.products:
print x.id,x.formula,x.charge
essential_reactions = model.essential_reactions()
unbalanced_list = find_unbalanced_reactions(model)
check_and_fix = [x.id for x in essential_reactions if x.id in unbalanced_list[0]]
for x in check_and_fix:
print x,unbalanced_list[0][x]
model.metabolites.get_by_id('3hbutACP_c').formula = 'C15H27O9N2PRS'
model.metabolites.get_by_id('colipa_e_None').formula = 'C176H303N2O100P4'
model.reactions.FACOAEcyc170_c.add_metabolites({model.metabolites.coa_c:1},combine=False)
model.metabolites.ptdcalACP_c_None.charge = -1
model.metabolites.ptdcalACP_c_None.formula = 'C26H49O8N2PRS'
model.reactions.get_by_id('PMETM_MC_c').add_metabolites({model.metabolites.ahcys_c:1,model.metabolites.amet_c:-1},combine=False)
model.reactions.get_by_id('PMETM2_MC_c').add_metabolites({model.metabolites.ahcys_c:1,model.metabolites.amet_c:-1},combine=False)
superprint('DHDPRy_c')
met = model.metabolites.get_by_id('MNXM30985_c')
met.id = '23dhdp_c'
met.formula = 'C7H5NO4'
met.charge = -2
superprint('MNXR61396_c')
rxn = model.reactions.MNXR61396_c
rxn.add_metabolites({model.metabolites.h2o_c:2},combine=False)
rxn.id = 'DHDPS_c'
rxn.name = 'Dihydrodipicolinate synthase'
met = Metabolite('urea_c')
met.id = 'urea_c'
met.formula = 'CH4N2O'
met.charge = 0
model.add_metabolites([met])
# https://pubchem.ncbi.nlm.nih.gov/compound/3844765#section=Top
# This is the same!
met = Metabolite('doxopa_c')
met.id = 'doxopa_c'
met.formula = 'C3H2O4'
met.charge = 0
model.add_metabolites([met])
superprint('MNXR18583_c')
rxn = model.reactions.get_by_id('MNXR18583_c')
rxn.add_metabolites({model.metabolites.urea_c:1,model.metabolites.doxopa_c:1,model.metabolites.h2o_c:-1},combine=False)
rxn.id = 'BLUB_c'
rxn.name = '5,6-dimethylbenzimidazole synthase'
# http://pubs.acs.org/doi/abs/10.1021/ja1106207
# http://bigg.ucsd.edu/models/iYL1228/reactions/BLUB
met = Metabolite('allphn_c')
met.id = 'allphn_c'
met.formula = 'C2H3N2O3'
met.charge = -1
model.add_metabolites([met])
rxn = Reaction('UREASE_GAPFILLING_c')
rxn.id = 'UREASE_GAPFILLING_c'
rxn.name = 'Urea carboxylase'
rxn.add_metabolites({model.metabolites.urea_c:-1
,model.metabolites.hco3_c:-1
,model.metabolites.atp_c:-1
,model.metabolites.adp_c:1
,model.metabolites.pi_c:1
,model.metabolites.h_c:1
,model.metabolites.allphn_c:1},combine=False)
model.add_reaction(rxn)
rxn = Reaction('ALPHNH_c')
rxn.id = 'ALPHNH_c'
rxn.notes.update({'CONFIDENCE SCORE':['2']})
rxn.notes.update({'EC Number':['3.5.1.54']})
rxn.notes.update({'GENE_ASSOCIATION': ['( MCA0477 and MCA0478 )']})
rxn.gene_reaction_rule = '( MCA0477 and MCA0478 )'
rxn.name = 'Allophanate hydrolase'
rxn.add_metabolites({model.metabolites.allphn_c:-1
,model.metabolites.h2o_c:-1
,model.metabolites.co2_c:2
,model.metabolites.h_c:3
,model.metabolites.nh3_c:2},combine=False)
model.add_reaction(rxn)
model.add_demand(model.metabolites.doxopa_c)
met = model.metabolites.get_by_id('cpoa2hcoa_c_None')
met.formula = 'C38H63N7O17P3S'
met.charge = -4
met = model.metabolites.get_by_id('mc_fattyacidcoa_c_None')
met.formula = 'C37H63N7O17P3S'
model.metabolites.pe_MC_c_None.charge = 0
model.metabolites.but2eACP_c.formula='C15H25N2O8PRS'
rxn = model.reactions.get_by_id('PFOR_c')
rxn.add_metabolites({model.metabolites.h_c:1},combine=False)
model.metabolites.flxr_c.charge = -2
model.metabolites.malACP_c.charge = -2
model.metabolites.ACP_c.charge = -1
model.metabolites.ppACP_c_None.formula = 'C14H25N2O8PRS'
model.metabolites.ACP_c.formula = 'C11H21N2O7PRS'
model.metabolites.mc_fattyacidcoa_c_None.formula = 'C37H62N7O17P3S'
rxn = model.reactions.POR_syn_c
rxn.add_metabolites({model.metabolites.h_c:1},combine=False)
model.metabolites.fdxrd_c.charge = -1
model.metabolites.fdxrd_c.formula = 'Fe2S2X'
model.metabolites.fdxox_c.charge = 0
model.metabolites.fdxox_c.formula = 'Fe2S2X'
rxn = model.reactions.PAPR_c
rxn.add_metabolites({model.metabolites.h_c:2},combine=False)
rxn = model.reactions.ALPHNH_c
rxn.add_metabolites({model.metabolites.h_c:-3},combine=False)
rxn = model.reactions.SULR_c
rxn.add_metabolites({model.metabolites.h2o_c:3,
model.metabolites.h_c:-5,
model.metabolites.h2s_c:1,
model.metabolites.so3_c:-1,
model.metabolites.nadp_c:3,
model.metabolites.nadph_c:-3},combine=False)
rxn = model.reactions.KDOCT_c
rxn.add_metabolites({model.metabolites.h_c:0},combine=False)
rxn = model.reactions.SHSL2r_c
rxn.add_metabolites({model.metabolites.h_c:1},combine=False)
rxn = model.reactions.GALU_c
rxn.add_metabolites({model.metabolites.h_c:-1},combine=False)
rxn = model.reactions.GK1_c
rxn.add_metabolites({model.metabolites.h_c:0},combine=False)
rxn = model.reactions.NDPK4_c
rxn.add_metabolites({model.metabolites.h_c:0},combine=False)
model.metabolites.dttp_c.charge = -4
model.metabolites.utp_c.charge = -4
model.metabolites.ctp_c.charge = -4
model.metabolites.acACP_c.charge = -1
model.metabolites.actACP_c.charge = -1
model.metabolites.get_by_id('3hbutACP_c').charge = -1
model.metabolites.get_by_id('but2eACP_c').charge = -1
model.metabolites.get_by_id('ppACP_c_None').charge = -1
rxn = model.reactions.get_by_id('3OAR150_GAPFILLING_c')
rxn.add_metabolites({model.metabolites.malACP_c:-6,model.metabolites.h_c:-18},combine=False)
model.metabolites.get_by_id('hpdcalACP_c_None').formula = 'C28H55N2O8PRS'
rxn = model.reactions.get_by_id('3OAR170_GAPFILLING_c')
rxn.add_metabolites({model.metabolites.h_c:-5},combine=False)
model.metabolites.get_by_id('hpdcalACP_c_None').charge = -1
model.metabolites.get_by_id('peptido_MC_c_None').formula = 'C40H62N8O21'
model.metabolites.get_by_id('peptido_MC_c_None').charge = -2
model.metabolites.get_by_id('1agpgafa_c_None').formula = 'C19H37O7P1'
model.metabolites.get_by_id('1agpgafa_c_None').charge = -2
model.metabolites.get_by_id('pa_MC_c_None').charge = -2
model.metabolites.cdpdag_MC_c_None.formula = 'C44H79N3O15P2'
model.metabolites.pgp_MC_c_None.formula = 'C38H73O13P2'
model.metabolites.pg_MC_c_None.formula = 'C38H74O10P1'
model.metabolites.clpn_MC_c_None.formula = 'C73H140O17P2'
model.metabolites.gdp_c.charge = -3
model.metabolites.fpram_c.charge = -1
model.metabolites.air_c.charge = -2
model.metabolites.h2s_c.charge = 0
model.metabolites.get_by_id('5mthf_c').charge = -1
model.metabolites.get_by_id('3hmrsACP_c').charge = -1
model.metabolites.get_by_id('tmrs2eACP_c').charge = -1
model.metabolites.get_by_id('3omrsACP_c').charge = -1
model.metabolites.get_by_id('ddcaACP_c').charge = -1
model.metabolites.get_by_id('udcpp_c_None').charge = -2
model.metabolites.get_by_id('pe_MC_c_None').formula = 'C37H74N1O8P1'
model.metabolites.get_by_id('pc_MC_c_None').charge = 0
model.metabolites.get_by_id('pdme_c_None').charge = 0
model.metabolites.get_by_id('pme_c_None').formula = 'C38H76N1O8P1'
model.metabolites.get_by_id('pme_c_None').charge = 0
model.metabolites.get_by_id('co_c').formula = 'CO'
model.metabolites.get_by_id('co_c').charge = 0
rxn = model.reactions.get_by_id('PMPS_c')
rxn.add_metabolites({model.metabolites.h_c:2},combine=False)
model.metabolites.get_by_id('fmn_c').charge = -2
model.metabolites.get_by_id('hhlipa_c').charge = -6
model.metabolites.get_by_id('phhlipa_c').charge = -8
model.metabolites.get_by_id('hphhlipa_c').charge = -8
model.metabolites.get_by_id('phphhlipa_c').charge = -10
model.metabolites.get_by_id('hlipa_c').charge = -6
model.metabolites.get_by_id('lipa_c').charge = -6
model.metabolites.get_by_id('kphphhlipa_c').charge = -11
model.metabolites.get_by_id('kdo2lipid4L_c').charge = -6
model.metabolites.get_by_id('myrsACP_c').charge = -1
model.metabolites.get_by_id('icolipa_c').charge = -11
model.metabolites.get_by_id('gicolipa_c').charge = -11
model.metabolites.get_by_id('gagicolipa_c').charge = -11
model.metabolites.get_by_id('ggagicolipa_c').charge = -11
model.metabolites.get_by_id('gggagicolipa_c').charge = -11
model.metabolites.get_by_id('colipa_c').charge = -11
model.metabolites.get_by_id('colipa_p').charge = -11
model.metabolites.get_by_id('colipa_e_None').charge = -11
model.metabolites.get_by_id('flxr_c').charge = -1
rxn = model.reactions.get_by_id('ANS_c')
rxn.add_metabolites({model.metabolites.anth_c:-1},combine=False)
model.metabolites.get_by_id('thfglu_c').formula = 'C24H27N8O9'
model.metabolites.get_by_id('thfglu_c').charge = -3
rxn = model.reactions.get_by_id('THFGLUS_c')
rxn.add_metabolites({model.metabolites.h_c:1},combine=False)
model.metabolites.get_by_id('alpro_c').formula = 'CH6NS2X'
model.metabolites.get_by_id('alpro_c').charge = 1
model.metabolites.get_by_id('dhlpro_c').formula = 'H2S2X'
model.metabolites.get_by_id('dhlpro_c').charge = 0
model.metabolites.get_by_id('dna5mtc_c').formula = 'CH2'
model.metabolites.get_by_id('dnac_c').formula = ''
model.metabolites.get_by_id('asntrna_c').formula = 'C4H6N2O2R'
rxn = model.reactions.get_by_id('SELADT_c')
rxn.add_metabolites({model.metabolites.h_c:-1},combine=False)
model.metabolites.get_by_id('sel_c').charge = -2
model.metabolites.get_by_id('adsel_c').formula = 'C10H12N5O10PSe'
model.metabolites.get_by_id('adsel_c').charge = -2
model.metabolites.get_by_id('argtrna_c').charge = 2
rxn = model.reactions.get_by_id('ARGTRS_c')
rxn.add_metabolites({model.metabolites.h_c:0},combine=False)
rxn = model.reactions.get_by_id('TYRTRS_c')
rxn.add_metabolites({model.metabolites.h_c:0},combine=False)
rxn.lower_bound = -1000
rxn.upper_bound = 1000
model.metabolites.get_by_id('tyrtrna_c').charge = 1
model.metabolites.get_by_id('hemeA__1_c').charge = -6
rxn = model.reactions.get_by_id('HEMEAS_c')
rxn.add_metabolites({model.metabolites.h_c:4,model.metabolites.h2o_c:-1},combine=False)
rxn.lower_bound = -1000
rxn.upper_bound = 1000
model.metabolites.get_by_id('so3_c').charge = -2
rxn = model.reactions.get_by_id('PAPR_c')
rxn.add_metabolites({model.metabolites.h_c:2},combine=False)
rxn = model.reactions.get_by_id('PAPR_c')
rxn.id = 'PAPSR_c'
model.metabolites.get_by_id('3opalmACP_c').charge = -1
model.metabolites.get_by_id('3hpalmACP_c').charge = -1
model.metabolites.get_by_id('tpalm2eACP_c').charge = -1
rxn = model.reactions.get_by_id('OCT_GAPFILLING_c')
rxn.add_metabolites({model.metabolites.h_c:1},combine=False)
model.metabolites.get_by_id('tdecoa_c_None').charge = -4
model.metabolites.get_by_id('hpdcalACP_c_None').formula = 'C28H53N2O8PRS'
rxn = model.reactions.get_by_id('3OAR170_GAPFILLING_c')
rxn.add_metabolites({model.metabolites.h_c:-3},combine=False)
model.metabolites.get_by_id('cpoa2hcoa_c_None').formula = 'C38H64N7O17P3S'
rxn = model.reactions.get_by_id('CFACPOA2H_Lumped_c')
rxn.add_metabolites({model.metabolites.h_c:1},combine=False)
model.metabolites.get_by_id('cpoa2h_c_None').formula = 'C17H33O2'
model.metabolites.get_by_id('cpoa2h_c_None').charge = -1
model.metabolites.get_by_id('garagund_c').charge = -2
model.metabolites.get_by_id('udpgalfur_c').charge = -2
model.metabolites.get_by_id('gfgaragund_c').charge = -2
rxn = model.reactions.get_by_id('OOR3_c')
rxn.add_metabolites({model.metabolites.h_c:1,model.metabolites.fdxrd_c:-1,model.metabolites.fdxox_c:1},combine=False)
rxn.lower_bound = -1000
rxn.id = 'OOR3r_c'
rxn.upper_bound = 1000
model.metabolites.get_by_id('fdxrd_c').charge = 0
model.metabolites.get_by_id('fdxrd_c').formula = 'Fe8S8XH2'
model.metabolites.get_by_id('fdxox_c').charge = 0
model.metabolites.get_by_id('fdxox_c').formula = 'Fe8S8X'
rxn = model.reactions.get_by_id('DES_9_c')
rxn.add_metabolites({model.metabolites.h_c:0,model.metabolites.h2o_c:2},combine=False)
rxn.id = 'DES_9_Modified_c'
rxn.name = 'Stearoyl-CoA desaturase (delta-9 desaturase) Changed cofactors to fdxrd/fdxox'
rxn = model.reactions.get_by_id('DES_9_2_c')
rxn.add_metabolites({model.metabolites.h_c:0,model.metabolites.h2o_c:2},combine=False)
rxn.id = 'DES_9_2_Modified_c'
rxn.name = 'Stearoyl-CoA desaturase (delta-9 desaturase) Changed cofactors to fdxrd/fdxox'
rxn = model.reactions.get_by_id('DES_9_Modified_c')
rxn.add_metabolites({model.metabolites.h_c:0,model.metabolites.h2o_c:3,model.metabolites.o2_c:-1.5},combine=False)
rxn = model.reactions.get_by_id('DES_9_2_Modified_c')
rxn.add_metabolites({model.metabolites.h_c:0,model.metabolites.h2o_c:3,model.metabolites.o2_c:-1.5},combine=False)
rxn = model.reactions.get_by_id('PMETM2_MC_c')
rxn.add_metabolites({model.metabolites.h_c:0,model.metabolites.h2o_c:3,model.metabolites.o2_c:-1.5},combine=False)
model.metabolites.get_by_id('pc_MC_c_None').formula = 'C40H80N1O8P1'
model.metabolites.get_by_id('codhpre6_c_None').charge = -7
model.metabolites.get_by_id('aragund_c').charge = -2
model.metabolites.get_by_id('ragund_c').charge = -2
model.metabolites.get_by_id('o16aund_c').charge = -2
model.metabolites.get_by_id('uLa4n_p').charge = 0
model.metabolites.get_by_id('acolipa_p').charge = -9
model.metabolites.get_by_id('udcpp_p').charge = -2
model.metabolites.get_by_id('acolipa_e').charge = -9
rxn = model.reactions.get_by_id('GTPOPm_c')
rxn.add_metabolites({model.metabolites.h_c:1},combine=False)
rxn = model.reactions.get_by_id('POR_syn_c')
rxn.add_metabolites({model.metabolites.h_c:-3},combine=False)
model.metabolites.get_by_id('copre2_c').formula = 'C42H38CoN4O16'
rxn = model.reactions.get_by_id('ALATRS_c')
rxn.add_metabolites({model.metabolites.h_c:0},combine=False)
model.metabolites.get_by_id('alatrna_c').charge = 1
for met in model.metabolites:
if met.id.endswith('trna_c'):
met.charge = 1
for rxn in model.reactions:
if rxn.id.endswith('TRS_c') and rxn.metabolites.has_key(model.metabolites.h_c):
rxn.add_metabolites({model.metabolites.h_c:0},combine=False)
model.metabolites.get_by_id('itp_c').charge = -4
rxn = model.reactions.get_by_id('NDPK9_c')
rxn.add_metabolites({model.metabolites.h_c:0},combine=False)
rxn = model.reactions.get_by_id('NDPK6_c')
rxn.add_metabolites({model.metabolites.h_c:0},combine=False)
model.metabolites.get_by_id('dutp_c').charge = -4
model.metabolites.get_by_id('dgtp_c').charge = -4
model.metabolites.get_by_id('dctp_c').charge = -4
model.metabolites.get_by_id('datp_c').charge = -4
rxn = model.reactions.get_by_id('NDPK5_c')
rxn.add_metabolites({model.metabolites.h_c:0},combine=False)
rxn = model.reactions.get_by_id('NDPK2_c')
rxn.add_metabolites({model.metabolites.h_c:0},combine=False)
rxn = model.reactions.get_by_id('NDPK3_c')
rxn.add_metabolites({model.metabolites.h_c:0},combine=False)
rxn = model.reactions.get_by_id('NDPK1_c')
rxn.add_metabolites({model.metabolites.h_c:0},combine=False)
rxn = model.reactions.get_by_id('NDPK7_c')
rxn.add_metabolites({model.metabolites.h_c:0},combine=False)
rxn = model.reactions.get_by_id('SHCHCC_c')
rxn.add_metabolites({model.metabolites.scl_c:0,model.metabolites.dscl_c:-1},combine=False)
rxn = model.reactions.get_by_id('ASPO2_c')
rxn.add_metabolites({model.metabolites.h_c:-2},combine=False)
model.metabolites.get_by_id('lpro_c').formula = 'S2X'
rxn = model.reactions.get_by_id('GCCa_c')
rxn.add_metabolites({model.metabolites.h_c:1},combine=False)
rxn = model.reactions.get_by_id('MTHFR2_c')
rxn.add_metabolites({model.metabolites.h_c:-2},combine=False)
rxn = model.reactions.get_by_id('ACPS1_c')
rxn.add_metabolites({model.metabolites.h_c:1},combine=False)
rxn = model.reactions.get_by_id('HPROb_c')
rxn.add_metabolites({model.metabolites.h_c:-2},combine=False)
rxn = model.reactions.get_by_id('HPROa_c')
rxn.add_metabolites({model.metabolites.h_c:-2},combine=False)
rxn = model.reactions.get_by_id('PPGPPDP_c')
rxn.add_metabolites({model.metabolites.h_c:0},combine=False)
model.metabolites.get_by_id('tsul_c').charge = -2
rxn = model.reactions.get_by_id('CYSS2_c')
rxn.add_metabolites({model.metabolites.h_c:0},combine=False)
rxn = model.reactions.get_by_id('CYSS_trdrd_c')
rxn.add_metabolites({model.metabolites.h_c:1},combine=False)
model.metabolites.get_by_id('tddec2eACP_c').charge = -1
rxn = model.reactions.get_by_id('MAN1PT2_c')
rxn.add_metabolites({model.metabolites.h_c:1},combine=False)
rxn = model.reactions.get_by_id('CYSDS_c')
rxn.add_metabolites({model.metabolites.h_c:0},combine=False)
rxn = model.reactions.get_by_id('3SALATAi_c')
rxn.add_metabolites({model.metabolites.h_c:-1},combine=False)
model.metabolites.get_by_id('3sala_c').charge = -2
rxn = model.reactions.get_by_id('ACDO_co_c')
rxn.add_metabolites({model.metabolites.h_c:1},combine=False)
rxn = model.reactions.get_by_id('MTHFR3_c')
rxn.add_metabolites({model.metabolites.h_c:-2},combine=False)
rxn = model.reactions.get_by_id('P5CRx_c')
rxn.add_metabolites({model.metabolites.h_c:-2},combine=False)
rxn = model.reactions.get_by_id('SHCHF_c')
rxn.add_metabolites({model.metabolites.h_c:-3},combine=False)
rxn = model.reactions.get_by_id('ASPO2y_c')
rxn.add_metabolites({model.metabolites.h_c:-2},combine=False)
model.metabolites.get_by_id('2ahethmpp_c').charge = -2
model.metabolites.get_by_id('2ahethmpp_c').formula = 'C14H20N4O8P2S'
model.metabolites.get_by_id('pimACP_c').charge = -1
model.metabolites.get_by_id('butACP_c').charge = -1
model.metabolites.get_by_id('3ohexACP_c').charge = -1
model.metabolites.get_by_id('3hhexACP_c').charge = -1
model.metabolites.get_by_id('thex2eACP_c').charge = -1
model.metabolites.get_by_id('3hddecACP_c').charge = -1
model.metabolites.get_by_id('hexACP_c').charge = -1
model.metabolites.get_by_id('3ooctACP_c').charge = -1
model.metabolites.get_by_id('3oddecACP_c').charge = -1
model.metabolites.get_by_id('dcaACP_c').charge = -1
for met in model.metabolites:
if met.id.endswith('ACP_c') and met.charge == 0 and met.id != 'apoACP_c':
met.charge = -1
model.metabolites.get_by_id('xtp_c').charge = -4
rxn = model.reactions.get_by_id('APLh_c')
rxn.add_metabolites({model.metabolites.h_c:0},combine=False)
rxn = model.reactions.get_by_id('BTNC_c')
rxn.add_metabolites({model.metabolites.h_c:1},combine=False)
rxn = model.reactions.get_by_id('LYSTRS_c')
rxn.add_metabolites({model.metabolites.trnalys_c:-1,model.metabolites.MNXM95609_c:0},combine=False)
model.metabolites.get_by_id('lystrna_c').charge = 2
model.metabolites.get_by_id('asptrna_c').charge = 0
rxn = model.reactions.get_by_id('FMETTRS_c')
rxn.add_metabolites({model.metabolites.h_c:-1},combine=False)
model.metabolites.get_by_id('fmettrna_c').charge = 0
model.metabolites.get_by_id('argtrna_c').charge = 2
model.metabolites.get_by_id('glutrna_c').charge = 0
rxn = model.reactions.get_by_id('GLNTRAT_c')
rxn.add_metabolites({model.metabolites.h_c:1},combine=False)
#model.reactions.get_by_id('ASNTRAT_c').check_mass_balance()
#superprint('ASNTRAT_c')
essential_reactions = model.essential_reactions()
unbalanced_list = find_unbalanced_reactions(model)
check_and_fix = [x.id for x in essential_reactions if x.id in unbalanced_list[0]]
for x in check_and_fix:
print x,unbalanced_list[0][x]
for x in unbalanced_list[0].keys():
print x,unbalanced_list[0][x]
###Output
MNXR84768_c {'C': 10.0, 'H': 11.0, 'charge': -1.0, 'O': 6.0, 'N': 5.0, 'P': 1.0, 'S': 0, 'R': 0}
MNXR84803_c {'C': 0, 'H': 2.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR85102_c {'C': 0, 'H': 0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR73874_c {'C': 0, 'H': -1.0, 'charge': -2.0, 'O': 3.0, 'N': 0, 'P': 1.0, 'S': 0, 'R': 0}
MNXR73876_c {'C': 0, 'H': -1.0, 'charge': -2.0, 'O': 3.0, 'N': 0, 'P': 1.0, 'S': 0, 'R': 0}
MNXR73685_c {'C': 0, 'H': -1.0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR59619_c {'C': 0, 'H': 0, 'charge': 0, 'O': -1.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR59368_c {'C': 6.0, 'H': 11.0, 'charge': 1.0, 'O': 5.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR74049_c {'C': 0, 'H': -1.0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR85318_c {'C': 0, 'H': 7.0, 'charge': 7.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR59113_c {'C': 5.0, 'H': 7.0, 'charge': 0, 'O': 1.0, 'N': 1.0, 'P': 0, 'S': 0, 'R': 0}
MNXR70768_c {'C': 0, 'H': 2.0, 'charge': -4.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR14585_c {'C': 10.0, 'H': 12.0, 'charge': -1.0, 'O': 7.0, 'N': 2.0, 'P': 1.0, 'S': 0, 'R': 0}
MNXR73725_c {'C': 6.0, 'H': 10.0, 'charge': 0, 'O': 5.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR85100_c {'C': 0, 'H': 1.0, 'charge': -2.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR14583_c {'C': 10.0, 'H': 11.0, 'charge': -1.0, 'O': 6.0, 'N': 5.0, 'P': 1.0, 'S': 0, 'R': 0}
DM_4hba_c {'C': -7.0, 'H': -8.0, 'charge': 0, 'O': -2.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
PFOR_c {'C': 0, 'H': 0, 'charge': 1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR10316_c {'C': 0, 'H': -3.0, 'charge': -3.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR74431_c {'C': 0, 'H': 0, 'charge': -2.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR17319_c {'C': -5.0, 'H': -5.0, 'charge': -1.0, 'O': -1.0, 'N': -1.0, 'P': 1.0, 'S': 1.0, 'R': 0}
MNXR75823_c {'C': 11.0, 'H': 20.0, 'charge': -1.0, 'O': 7.0, 'N': 2.0, 'P': 1.0, 'S': 0, 'R': 0}
MNXR84567_c {'C': 0, 'H': 0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR18370_c {'C': -16.0, 'H': -22.0, 'charge': 0, 'O': -1.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR17479_c {'C': 0, 'H': 3.0, 'charge': 1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR76933_c {'C': 0, 'H': 3.0, 'charge': 1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
DM_doxopa_c {'C': -3, 'H': -2, 'charge': 0, 'O': -4, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR86013_c {'C': 0, 'H': -1.0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR59223_c {'C': -5.0, 'H': -8.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': -1.0}
MNXR17476_c {'C': 0, 'H': -2.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR75760_c {'C': 0, 'H': 1.0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR84774_c {'C': 9.0, 'H': 10.0, 'charge': -1.0, 'O': 8.0, 'N': 2.0, 'P': 1.0, 'S': 0, 'R': 0}
MNXR74048_c {'C': 0, 'H': -1.0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR36005_c {'C': -5.0, 'H': -7.0, 'charge': 0, 'O': -3.0, 'N': -1.0, 'P': 0, 'S': 0, 'R': 0}
MNXR74164_c {'C': 0, 'H': 0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR84891_c {'C': 0, 'H': 0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR84814_c {'C': 0, 'H': -6.0, 'charge': -6.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
POR_syn_c {'C': 0, 'H': 0, 'charge': -2.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR85631_c {'C': -8.0, 'H': -14.0, 'charge': 0, 'O': -1.0, 'N': -1.0, 'P': 0, 'S': 0, 'R': -1.0, 'X': 1.0}
MNXR85427_c {'C': 0, 'H': 1.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR74084_c {'C': -6.0, 'H': -13.0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR86014_c {'C': 11.0, 'H': 19.0, 'charge': -2.0, 'O': 7.0, 'N': 2.0, 'P': 1.0, 'S': 0, 'R': 0}
MNXR84961_c {'C': 8.0, 'H': 15.0, 'charge': 0, 'O': 1.0, 'N': 1.0, 'P': 0, 'S': 0, 'R': 1.0, 'X': -1.0}
MNXR85460_c {'C': 0, 'H': -1.0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
ACCOAC_1_c {'C': 0, 'H': 0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MC_Average_FattyAcid_c {'C': -0.040999999999999426, 'H': -0.05500000000000271, 'charge': -0.0010000000000001154, 'O': 0.0020000000000002308, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR84766_c {'C': -10.0, 'H': -11.0, 'charge': 1.0, 'O': -6.0, 'N': -5.0, 'P': -1.0, 'S': 0, 'R': 0}
MNXR84773_c {'C': -9.0, 'H': -11.0, 'charge': 1.0, 'O': -7.0, 'N': -3.0, 'P': -1.0, 'S': 0, 'R': 0}
MNXR85295_c {'C': 7.0, 'H': 3.0, 'charge': 0, 'O': -4.0, 'N': 0, 'P': -2.0, 'S': 0, 'R': 0}
MNXR17318_c {'C': 8.0, 'H': 12.0, 'charge': 0, 'O': 6.0, 'N': 0, 'P': 0, 'S': -1.0, 'R': 0}
MNXR84930_c {'C': 0, 'H': -2.0, 'charge': -2.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR17778_c {'C': 0, 'H': 2.0, 'charge': 0, 'O': -1.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR85919_c {'C': 0, 'H': -1.0, 'charge': -1.0, 'O': -2.0, 'N': 0, 'P': 0, 'S': 2.0, 'R': 0}
MNXR84997_c {'C': 6.0, 'H': 10.0, 'charge': 0, 'O': 5.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR73718_c {'C': -86.0, 'H': -143.0, 'charge': -1.0, 'O': -9.0, 'N': 0, 'P': -1.0, 'S': 0, 'R': 0}
MNXR70905_c {'C': 0, 'H': 0, 'charge': 2.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR79218_c {'C': 0, 'H': 0, 'charge': 4.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR84771_c {'C': -9.0, 'H': -11.0, 'charge': 1.0, 'O': -7.0, 'N': -3.0, 'P': -1.0, 'S': 0, 'R': 0}
EX_cobalt2_c {'C': 0, 'Co': -1.0, 'H': 0, 'charge': -2.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR86022_c {'C': 0, 'H': 0, 'charge': 4.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR70855_c {'C': 0, 'H': 0, 'charge': 2.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR18597_c {'C': 0, 'H': 0, 'charge': 0, 'O': 1.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR73920_c {'C': 8.0, 'H': 15.0, 'charge': 1.0, 'O': 1.0, 'N': 1.0, 'P': 0, 'S': 0, 'R': 1.0, 'X': -1.0}
MNXR59708_c {'C': 5.0, 'H': 10.0, 'charge': 1.0, 'O': 1.0, 'N': 1.0, 'P': 0, 'S': 0, 'R': 1.0, 'Se': 1.0}
MNXR21510_c {'C': 11.0, 'H': 20.0, 'charge': -1.0, 'O': 7.0, 'N': 2.0, 'P': 1.0, 'S': 0, 'R': 0}
MNXR15996_c {'C': 0, 'H': 1.0, 'charge': -3.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR74047_c {'C': 0, 'H': -1.0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR85991_c {'C': 0, 'H': -2.0, 'charge': 0, 'O': -1.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR84837_c {'C': 0, 'H': -2.0, 'charge': -2.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
BTNC_c {'C': 0, 'H': 0, 'charge': 1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR85034_c {'C': 8.0, 'H': 14.0, 'charge': 0, 'O': 1.0, 'N': 1.0, 'P': 0, 'S': 0, 'R': 1.0, 'X': -1.0}
MNXR16250_c {'C': 0, 'H': 0, 'charge': 0, 'O': 1.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR73792_c {'C': -2.0, 'H': -4.0, 'charge': -1.0, 'O': -1.0, 'N': -1.0, 'P': 0, 'S': 0, 'R': -1.0}
MNXR84802_c {'C': 0, 'H': -11.0, 'charge': -5.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
SUCOAS1m_c {'C': 0, 'H': -1.0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR59754_c {'C': 1.0, 'H': -1.0, 'charge': 0, 'O': 2.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR60918_c {'C': 0, 'H': 1.0, 'charge': 1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR85086_c {'C': 1.0, 'H': 2.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR84770_c {'C': 10.0, 'H': 11.0, 'charge': -1.0, 'O': 7.0, 'N': 5.0, 'P': 1.0, 'S': 0, 'R': 0}
MNXR61040_c {'C': 0, 'H': 0, 'charge': -2.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR84772_c {'C': -10.0, 'H': -11.0, 'charge': 1.0, 'O': -7.0, 'N': -5.0, 'P': -1.0, 'S': 0, 'R': 0}
MNXR74445_c {'C': 0, 'H': 0, 'charge': -2.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR18599_c {'C': 0, 'H': 0, 'charge': 1.0, 'O': 1.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
ASNTRAT_c {'C': 0, 'H': 0, 'charge': 1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR75060_c {'C': -2.0, 'H': 2.0, 'charge': 0, 'O': -2.0, 'N': 0, 'P': 0, 'S': 0, 'R': -2.0}
MNXR59801_c {'C': -6.0, 'H': -10.0, 'charge': 0, 'O': -5.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR57061_c {'C': 0, 'H': -4.0, 'charge': -2.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR84769_c {'C': -9.0, 'H': -10.0, 'charge': 1.0, 'O': -8.0, 'N': -2.0, 'P': -1.0, 'S': 0, 'R': 0}
MNXR26440_c {'C': 0, 'H': -5.0, 'charge': -5.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR60921_c {'C': 19.0, 'H': 26.0, 'charge': 0, 'O': 4.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR85646_c {'C': 0, 'H': 0, 'charge': -2.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR14582_c {'C': 10.0, 'H': 11.0, 'charge': -1.0, 'O': 5.0, 'N': 5.0, 'P': 1.0, 'S': 0, 'R': 0}
MNXR59323_c {'C': -6.0, 'H': -10.0, 'charge': 0, 'O': -5.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR75548_c {'C': 0, 'H': 1.0, 'charge': 1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR85317_c {'C': 0, 'H': -30.0, 'charge': -22.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR8683_c {'C': 0, 'H': -3.0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
PMETM2_MC_c {'C': 0, 'H': 5.0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR61295_c {'C': -12.0, 'H': -22.0, 'charge': 0, 'O': -11.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR14584_c {'C': 9.0, 'H': 11.0, 'charge': -1.0, 'O': 6.0, 'N': 3.0, 'P': 1.0, 'S': 0, 'R': 0}
MNXR73919_c {'C': 8.0, 'H': 14.0, 'charge': 0, 'O': 1.0, 'N': 1.0, 'P': 0, 'S': 0, 'R': 1.0, 'X': -1.0}
MNXR59755_c {'C': 1.0, 'H': 2.0, 'charge': 0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR19023_c {'C': 0, 'H': 2.0, 'charge': 1.0, 'O': -1.0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR85655_c {'C': 0, 'H': 0, 'charge': 2.0, 'O': 0, 'N': 0, 'P': 0, 'S': -2.0, 'R': 1.0}
MNXR85044_c {'C': 0, 'H': -1.0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR61128_c {'C': 0, 'H': 0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR85656_c {'C': 11.0, 'H': 20.0, 'charge': 1.0, 'O': 7.0, 'N': 2.0, 'P': 1.0, 'S': -2.0, 'R': 0}
MNXR61041_c {'C': 0, 'H': 0, 'charge': -2.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
MNXR75718_c {'C': 11.0, 'H': 19.0, 'charge': -2.0, 'O': 7.0, 'N': 2.0, 'P': 1.0, 'S': 0, 'R': 0}
MNXR84920_c {'C': 0, 'H': 0, 'charge': -1.0, 'O': 0, 'N': 0, 'P': 0, 'S': 0, 'R': 0}
###Markdown
Test if the ratios have improved
###Code
model.objective.expression
model.objective.direction = 'max'
model.objective = model.reactions.get_by_id('BIOMASS_REACTION')
solution = show_uptake_excretion(model,model.reactions.get_by_id('BIOMASS_REACTION'))
###Output
nh3_in 7.38136038756
so4_in 0.0585567047958
pi_in 4.17949182777
h_in 16.560573946
h2o_in 955.288659916
o2_in 27.615468895
h_out 1000.0
h2o_out 1000.0
cbp_out 3.82532919182
cmp_out 0.0779734538858
pap_out 0.0585567047958
MNXM4297_out 0.0972549619372
ch4_in 18.405
BIOMASS_REACTION 0.333106006006
###Markdown
They haven't improved and now the model produces 2 side products that shouldn't be producedcmp_out 0.0779734538856pap_out 0.0585567047956 Below I am trying to find and fix MNX reactions that carried flux (or looked suspicious). All core reactions should have either been manually curated or imported from BiGG so MNXR Reactions are either lumped reactions, or parallel reactions with the similar chemical substrate groups or cofactors.
###Code
[(k,v) for k,v in solution.fluxes.items() if v >= 0.01 and k.startswith('MNXR')]
rxn = model.reactions.MNXR56251_c
print rxn
model.remove_reactions([rxn])
rxn = model.reactions.IPPMIa_c
rxn.upper_bound = 1000
rxn.lower_bound = -1000
rxn = model.reactions.IPPMIb_c
rxn.upper_bound = 1000
rxn.lower_bound = -1000
rxn = model.reactions.MNXR6217_c
print rxn
model.remove_reactions([rxn])
rxn = model.reactions.MNXR5662_c
print rxn
model.remove_reactions([rxn])
rxn = model.reactions.IPMD_c
print rxn
invert_reaction(rxn)
print rxn
###Output
IPMD_c: 3c4mop_c + h_c + nadh_c --> 3c2hmp_c + nad_c
IPMD_c: 3c2hmp_c + nad_c --> 3c4mop_c + h_c + nadh_c
###Markdown
Turns out that the Tetramethanoopterine Pathway of Oxidizing Formaldehyde hadn't been mapped yet. BiGG doesn't have a concise mapping for this pathway so the IDs were adapted from De la Torre et al.
###Code
rxn = model.reactions.MNXR17799_c
print rxn
rxn.id ='FAE_c'
rxn.name = '5,6,7,8-tetrahydromethanopterin hydro-lyase'
met = model.metabolites.MNXM667_c
met.id = '510mh4mpt_c'
met.name = '5,10-methylene-tetrahydromethanopterin'
rxn = model.reactions.MNXR17800_c
print rxn
rxn.id ='MTDB_c'
rxn.name = 'nad(p)-dependent methylene h4mpt dehydrogenase'
met = model.metabolites.MNXM809_c
met.id = '510methmpt_c'
met.name = '5,10-methenyltetrahydromethanopterin'
rxn = model.reactions.MNXR5519_c
print rxn
rxn.id ='MCH_c'
rxn.name = 'n(5),n(10)-methenyltetrahydromethanopterin cyclohydrolase'
met = model.metabolites.MNXM1436_c
met.id = '5fthmpt_c'
met.name = '5-formyl-tetrahydromethanopterin'
rxn = model.reactions.MNXR6072_c
print rxn
rxn.id = 'FTR_c'
rxn.name = 'formylmethanofuran-tetrahydromethanopterin formyltransferase'
met = model.metabolites.MNXM1050_c
met.id = 'mfr_c'
met.name = 'methanofuranate'
met = model.metabolites.MNXM1087_c
met.id = 'formmfr_c'
met.name = 'N-formylmethanofuran'
rxn = model.reactions.MNXR17801_c
print rxn
rxn.id = 'FMFRD_c'
rxn.name = 'formylmethanofuran dehydrogenase'
###Output
MNXR17801_c: formmfr_c + h2o_c --> for_c + mfr_c
###Markdown
For reactions that had no BiGG ID I tried to come up with one.
###Code
rxn = model.reactions.MNXR5732_c
print rxn
rxn.id ='LLEUDr_c'
rxn.name = 'leucine dehydrogenase'
rxn.notes['BIGG'] = 'LLEUDr'
rxn = model.reactions.MNXR79509_c
print rxn
invert_reaction(rxn)
print rxn
rxn.id ='PKL_c'
rxn.name = 'Phosphoketolase'
rxn.notes['COFACTOR'] = '1 Thiamin Diphosphate'
rxn = model.reactions.MNXR5933_c
print rxn
rxn.id ='HPS_c'
rxn.name = '3-hexulose-6-phosphate synthase'
rxn.notes['COFACTOR'] = 'Mg2+ or Mn2+'
met = model.metabolites.MNXM1659_c
met.id = 'ah6p__D_c'
met.name = 'Arabino-3-hexulose-6-P'
met.notes['BIGG'] = 'ah6p__D'
rxn = model.reactions.MNXR85335_c
print rxn
rxn.id ='PHI_c'
rxn.name = '3-hexulose-6-phosphate isomerase'
rxn = model.reactions.MNXR1417_c
print rxn
invert_reaction(rxn)
print rxn
rxn.lower_bound = 0
rxn.upper_bound = 0
rxn = model.reactions.MNXR84805_c
print rxn
invert_reaction(rxn)
print rxn
rxn.id ='NO3R1bpp_c'
rxn.name = 'Nitrate reductase (Ubiquinol-8)'
rxn.add_metabolites({model.metabolites.MNXM24_c:0,
model.metabolites.MNXM35_c:0,
model.metabolites.q8h2_im:-1,
model.metabolites.q8_im:1
},combine=False)
rxn = model.reactions.MNXR4097_c
print rxn
rxn.lower_bound = 0
rxn.upper_bound = 0
rxn = model.reactions.MNXR8072_c
print rxn
rxn.id ='PHEPYRTA_c'
rxn.name = 'phenylalanine:pyruvate aminotransferase'
rxn = model.reactions.MNXR26374_c
print rxn
model.remove_reactions([rxn])
rxn = model.reactions.MNXR14818_c
print rxn
invert_reaction(rxn)
print rxn
rxn.id = 'VALDHr_c'
rxn.name = 'Valine dehydrogenase'
rxn.notes['BIGG'] = 'VALDHr'
rxn = model.reactions.MNXR56274_c
print rxn
model.remove_reactions([rxn])
rxn = model.reactions.MNXR84844_c
print rxn
invert_reaction(rxn)
print rxn
rxn.id = 'PROD2_c'
rxn.name = 'Proline dehydrogenase'
rxn.notes['BIGG'] = 'PROD2'
rxn.add_metabolites({model.metabolites.MNXM24_c:0,
model.metabolites.MNXM35_c:0,
model.metabolites.fad_c:-1,
model.metabolites.fadh2_c:1
},combine=False)
rxn = model.reactions.MNXR14750_c
print rxn
rxn.id = 'AHMT'
rxn.name = 'D-alanine 2-hydroxymethyltransferase'
print rxn
invert_reaction(rxn)
print rxn
rxn.lower_bound = 0
rxn.upper_bound = 1000
met = model.metabolites.MNXM4297_c
met.id = 'mser__L_c'
met.name = '2-methyl-L-serine'
rxn = model.reactions.MNXR73445_c
print rxn
rxn.lower_bound = 0
rxn.upper_bound = 1000
print rxn
###Output
MNXR73445_c: h2o_c + o2_c + tyr__L_c <=> 34hpp_c + h2o2_c + nh3_c
MNXR73445_c: h2o_c + o2_c + tyr__L_c --> 34hpp_c + h2o2_c + nh3_c
###Markdown
Removal of MNX reactions from the list of essential reactions in addition to finding loops.
###Code
solution = show_uptake_excretion(model,model.reactions.get_by_id('BIOMASS_REACTION'))
[(k,v) for k,v in solution.fluxes.items() if v >= 900], len([(k,v) for k,v in solution.fluxes.items() if v >= 900])
rxn = model.reactions.ALATA_L_c
print rxn
invert_reaction(rxn)
print rxn
rxn = model.reactions.GLYTA_c
print rxn
rxn.lower_bound = -1000
print rxn
rxn = model.reactions.AGT_c
print rxn
invert_reaction(rxn)
rxn.lower_bound = -1000
print rxn
rxn = model.reactions.ACITL_c
print rxn
rxn.lower_bound = 0
print rxn
rxn = model.reactions.PFK_ppi_c
print rxn
invert_reaction(rxn)
print rxn
rxn = model.reactions.DADNK_c
print rxn
invert_reaction(rxn)
print rxn
rxn = model.reactions.DHFR_c
print rxn
invert_reaction(rxn)
print rxn
rxn = model.reactions.ASNS2_c
print rxn
invert_reaction(rxn)
rxn.lower_bound = 0
print rxn
rxn = model.reactions.ASNN_c
print rxn
invert_reaction(rxn)
print rxn
rxn = model.reactions.ACKr_c
print rxn
invert_reaction(rxn)
print rxn
rxn = model.reactions.ASPTA_c
print rxn
invert_reaction(rxn)
print rxn
rxn = model.reactions.ILETA_c
print rxn
invert_reaction(rxn)
rxn.lower_bound = -1000
print rxn
rxn = model.reactions.GLYCTO1_c
print rxn
invert_reaction(rxn)
print rxn
rxn = model.reactions.ORNTAC_c
print rxn
invert_reaction(rxn)
print rxn
rxn = model.reactions.PANTS_c
print rxn
invert_reaction(rxn)
print rxn
rxn = model.reactions.GTPOPm_c
print rxn
invert_reaction(rxn)
print rxn
rxn = model.reactions.GLUKA_c
print rxn
rxn.lower_bound = -1000
invert_reaction(rxn)
print rxn
rxn = model.reactions.ACS_c
print rxn
rxn.lower_bound = 0
invert_reaction(rxn)
print rxn
rxn = model.reactions.MNXR74047_c
print rxn
essential_reactions = model.essential_reactions()
[x for x in essential_reactions if x.id.startswith('MNXR')]
rxn = model.reactions.MNXR79510_c
print rxn
rxn.id = 'TKT1_c'
rxn.name = 'Transketolase'
rxn.notes['BIGG'] = 'TKT1'
print rxn
rxn = model.reactions.MNXR6078_c
print rxn
rxn.id = 'PAH_c'
rxn.name = '(R)-pantothenate amidohydrolase, Panthothenase'
print rxn
rxn = model.reactions.MNXR74047_c
print rxn
rxn = model.reactions.PFK_adp_c
print rxn
invert_reaction(rxn)
print rxn
rxn.lower_bound = 0
rxn = model.reactions.PFK_2_c
print rxn
invert_reaction(rxn)
print rxn
rxn = model.reactions.CTPS1_c
print rxn
invert_reaction(rxn)
print rxn
rxn = model.reactions.CYTK1_c
print rxn
rxn.lower_bound = -1000
print rxn
Unused_BM_rxns = [x for x in model.reactions if x.id.endswith('_bm')]
model.remove_reactions(Unused_BM_rxns,remove_orphans=True)
model.metabolites.atp_bm.remove_from_model()
model.metabolites.adp_bm.remove_from_model()
model.metabolites.pi_bm.remove_from_model()
###Output
_____no_output_____
###Markdown
Removal of unreported side-products
###Code
rxn = Reaction('BPNT_c')
rxn.add_metabolites({model.metabolites.h2o_c:-1,
model.metabolites.pap_c:-1,
model.metabolites.amp_c:1,
model.metabolites.pi_c:1})
rxn.name = '3,5-bisphosphate nucleotidase'
rxn.id = 'BPNT_c'
rxn.notes = {'RHEA':[10040,10041,10042,10043],'KEGG':['R00188'],'MXNREF':['MNXR965'],'EC NUMBER': ['3.1.3.7'],
'GENE ASSOCIATION':['MCA2983'],'METACYC':['META:325-BISPHOSPHATE-NUCLEOTIDASE-RXN'],'BIGG': ['BPNT']}
rxn.gene_reaction_rule = 'MCA2983'
print rxn.notes
model.add_reaction(rxn)
###Output
_____no_output_____
###Markdown
Nitrogen Metabolism Corrected/Added In Type I methanotrophs grown on medium containing ammonia, the **reductive amination of pyruvate (via alanine dehydrogenase (in M. capsulatus Bath)** or **alpha-ketoglutarate (via glutamate dehydrogenase)** was prevalent under high ammonia growth conditions. In contrast, when grown under ammonium limitation (<0.5 mM) or on medium containing nitrate (in the absence of ammonium) these methanotrophs assimilated ammonia via the **glutamate cycle**. **Four predicted ammonium transporters** have been identified in the genome of M. capsulatus Bath (Murrell and Dalton, 1983a; Trotsenko and Murrell, 2008).Type II (alphaproteobacterial) methanotrophs use the glutamate cycle and the enzymes glutamine synthetase (GS) and the glutamine-oxoglutarate amidotransferase (GOGAT, also known as glutamate synthase).http://methanotroph.org/wiki/metabolic-pathways/---------------------Ammonia assimilation was studied using continuous cultures of three obligate methanotrophs. The type X organism, Methylococcus capsulatus (Bath), assimilated ammonia during growth on dinitrogen or nitrate via the glutamine synthetase/glutamate synthase pathway but utilized the alanine dehydrogenase pathway when grown in the presence of excess ammonia. Repression and derepression of these ammonia assimilation enzymes was demonstrated during the switch- over of continuous cultures from nitrogen-free (N2-fixing) medium to medium containing high concentrations of ammonia. The properties of alanine dehydrogenase and glutamate synthase in this organism are discussed.Murrell, J. C., & Dalton, H. (1983). Ammonia Assimilation in Methylococcus-Capsulatus (Bath) and Other Obligate Methanotrophs. Journal of General Microbiology, 129(1 983), 1197–1206. doi:10.1099/00221287-129-4-1197
###Code
# Alanine Dehydrogenase inversion
rxn = model.reactions.ALAD__L_c
print rxn
invert_reaction(rxn)
print rxn
# Glutamate Cycle
rxn = model.reactions.GLNS_c
print rxn
invert_reaction(rxn)
print rxn
rxn = model.reactions.GLUDx_c
print rxn
invert_reaction(rxn)
print rxn
###Output
GLUDx_c: akg_c + h_c + nadh_c + nh3_c --> glu__L_c + h2o_c + nad_c
GLUDx_c: glu__L_c + h2o_c + nad_c --> akg_c + h_c + nadh_c + nh3_c
###Markdown
Oxidation of Ammonia soluble MMO (functioning as an AMO)
###Code
rxn = Reaction('AMOs')
rxn.name = 'soluble ammonia monooxygenase'
rxn.subsystem = 'ec00910:Nitrogen metabolism,ec01120:Microbial metabolism in diverse environments'
rxn.notes['SUBSYSTEM'] = ['ec00910:Nitrogen metabolism,ec01120:Microbial metabolism in diverse environments']
rxn.lower_bound = 0.
rxn.upper_bound = 1000.
# Confidence Score
rxn.notes.update({'CONFIDENCE SCORE':['4']})
# Localization
rxn.notes.update({'LOCALIZATION':['Cytosol']})
# EC number from KEGG
rxn.notes.update({'EC Number':['1.14.99.39']})
# Gene-Reaction-Rule update based on KEGG (using the old locus tags for now)
rxn.notes.update({'GENE ASSOCIATION': ['( MCA1194 and MCA1195 and MCA1198 and MCA1196 and MCA1200 and MCA1202 and MCA1205 )']})
rxn.gene_reaction_rule = '( MCA1194 and MCA1195 and MCA1198 and MCA1196 and MCA1200 and MCA1202 and MCA1205 )'
# Substrate and cofactor usage from BRENDA
rxn.notes.update({'COFACTOR':['dinuclear FeIV cluster, NADH']})
# NADH - As suggested by 10.1146/annurev.biochem.76.061505.175355
# Inhibitor from BRENDA
rxn.notes.update({'INHIBITOR':['Cu2+']})
# Check if RXN is mass and charge balanced!
print (rxn.check_mass_balance())
# Check RXN-Directionality
print rxn
# Update ID to BiGG
rxn.id = 'AMOs_c'
rxn.add_metabolites({model.metabolites.nh3_c: -1.0,
model.metabolites.o2_c: -1.0,
model.metabolites.nadh_c: -1.0,
model.metabolites.ham_c: 1.0,
model.metabolites.h2o_c: 1.0,
model.metabolites.nad_c: 1.0})
model.add_reaction(rxn)
###Output
{}
AMOs
###Markdown
particulate MMO (functioning as an AMO)
###Code
# Add nitric oxide-Metabolite in the Periplasm
no_p = model.metabolites.no_c.copy()
no_p.compartment = 'p'
no_p.id = 'no_p'
# Add Dinitrogen Oxide-Metabolite in the Periplasm
n2o_p = model.metabolites.n2o_c.copy()
n2o_p.compartment = 'p'
n2o_p.id = 'n2o_p'
# Add Nitrate-Metabolite in the Periplasm
no3_p = model.metabolites.no3_c.copy()
no3_p.compartment = 'p'
no3_p.id = 'no3_p'
# Add Ammonia-Metabolite in the Periplasm
nh3_p = model.metabolites.nh3_c.copy()
nh3_p.compartment = 'p'
nh3_p.id = 'nh3_p'
# Add Hydroxylamine-Metabolite in the Periplasm
ham_p = model.metabolites.ham_c.copy()
ham_p.compartment = 'p'
ham_p.id = 'ham_p'
model.add_metabolites([nh3_p,ham_p,no_p,no3_p,n2o_p])
rxn = Reaction('AMOp')
rxn.name = 'particulate ammonia monooxygenase'
rxn.subsystem = 'ec00910:Nitrogen metabolism,ec01120:Microbial metabolism in diverse environments'
rxn.notes['SUBSYSTEM'] = ['ec00910:Nitrogen metabolism,ec01120:Microbial metabolism in diverse environments']
rxn.lower_bound = 0.
rxn.upper_bound = 1000.
# Confidence Score
rxn.notes.update({'CONFIDENCE SCORE':['4']})
# Localization
rxn.notes.update({'LOCALIZATION':['Inner Membrane']})
# EC number from KEGG
rxn.notes.update({'EC Number':['1.14.99.39']})
# Gene-Reaction-Rule update based on KEGG (using the old locus tags for now)
rxn.notes.update({'GENE ASSOCIATION': ['(((MCA1796 and MCA1797 and MCA1798) or (MCA2853 and MCA2854 and MCA2855)) and MCA0295)']})
rxn.gene_reaction_rule = '(((MCA1796 and MCA1797 and MCA1798) or (MCA2853 and MCA2854 and MCA2855)) and MCA0295)'
# Substrate and cofactor usage from BRENDA
rxn.notes.update({'COFACTOR':['2 Cu2+, 1-2 Fe']})
# NADH - As suggested by 10.1146/annurev.biochem.76.061505.175355
# Inhibitor from BRENDA
rxn.notes.update({'INHIBITOR':['']})
# Check if RXN is mass and charge balanced!
print (rxn.check_mass_balance())
# Check RXN-Directionality
print rxn
# Update ID to BiGG
rxn.id = 'AMOp_im'
rxn.add_metabolites({model.metabolites.nh3_p: -1.0,
model.metabolites.o2_p: -1.0,
model.metabolites.q8h2_im: -1.0,
model.metabolites.ham_p: 1.0,
model.metabolites.h2o_p: 1.0,
model.metabolites.q8_im: 1.0})
model.add_reaction(rxn)
###Output
{}
AMOp
###Markdown
Ammonia and Hydroxylamine and NO2 Diffusion
###Code
rxn = Reaction('NH3_im')
rxn.name = 'Diffusion NH3 between Periplasm and Cytosol'
rxn.subsystem = 'Diffusion'
rxn.notes['SUBSYSTEM'] =['Diffusion']
rxn.lower_bound = -1000.
rxn.upper_bound = 1000.
rxn.objective_coefficient = 0.
rxn.add_metabolites({model.metabolites.nh3_p: -1.0,
model.metabolites.nh3_c: 1.0})
# Confidence Score:
rxn.notes.update({'CONFIDENCE SCORE':['1']})
# Localization
rxn.notes.update({'LOCALIZATION':['Inner Membrane']})
# Check if RXN is mass and charge balanced!
print (rxn.check_mass_balance())
# Check RXN-Directionality
print rxn
# Update ID to BiGG
rxn.id = 'NH3_im'
model.add_reaction(rxn)
rxn = Reaction('HAM_im')
rxn.name = 'Diffusion Hydroxylamine between Periplasm and Cytosol'
rxn.subsystem = 'Diffusion'
rxn.notes['SUBSYSTEM'] =['Diffusion']
rxn.lower_bound = -1000.
rxn.upper_bound = 1000.
rxn.objective_coefficient = 0.
rxn.add_metabolites({model.metabolites.ham_p: -1.0,
model.metabolites.ham_c: 1.0})
# Confidence Score:
rxn.notes.update({'CONFIDENCE SCORE':['1']})
# Localization
rxn.notes.update({'LOCALIZATION':['Inner Membrane']})
# Check if RXN is mass and charge balanced!
print (rxn.check_mass_balance())
# Check RXN-Directionality
print rxn
# Update ID to BiGG
rxn.id = 'HAM_im'
model.add_reaction(rxn)
# Add Nitrite-Metabolite in the Periplasm
no2_p = model.metabolites.no2_c.copy()
no2_p.compartment = 'p'
no2_p.id = 'no2_p'
model.add_metabolites([no2_p])
rxn = Reaction('NO2_im')
rxn.name = 'Diffusion Hydroxylamine between Periplasm and Cytosol'
rxn.subsystem = 'Diffusion'
rxn.notes['SUBSYSTEM'] =['Diffusion']
rxn.lower_bound = -1000.
rxn.upper_bound = 1000.
rxn.objective_coefficient = 0.
rxn.add_metabolites({model.metabolites.no2_p: -1.0,
model.metabolites.no2_c: 1.0})
# Confidence Score:
rxn.notes.update({'CONFIDENCE SCORE':['1']})
# Localization
rxn.notes.update({'LOCALIZATION':['Inner Membrane']})
# Check if RXN is mass and charge balanced!
print (rxn.check_mass_balance())
# Check RXN-Directionality
print rxn
# Update ID to BiGG
rxn.id = 'NO2_im'
model.add_reaction(rxn)
###Output
{}
NO2_im
###Markdown
Cytochrome P-460
###Code
rxn = Reaction('CYP460')
rxn.name = 'Cytochrome P460 - Hydroxylamine Dehydrogenase'
rxn.subsystem = 'ec00910:Nitrogen metabolism,ec01120:Microbial metabolism in diverse environments'
rxn.notes['SUBSYSTEM'] = ['ec00910:Nitrogen metabolism,ec01120:Microbial metabolism in diverse environments']
rxn.lower_bound = 0.
rxn.upper_bound = 1000.
# Confidence Score
rxn.notes.update({'CONFIDENCE SCORE':['4']})
# Localization
rxn.notes.update({'LOCALIZATION':['Inner Membrane']})
# EC number from KEGG
rxn.notes.update({'EC Number':['1.7.2.6']})
# Gene-Reaction-Rule update based on KEGG (using the old locus tags for now)
rxn.notes.update({'GENE ASSOCIATION': ['MCA0524']})
rxn.gene_reaction_rule = 'MCA0524'
# Substrate and cofactor usage from BRENDA
rxn.notes.update({'COFACTOR':['1 Cu2+, 1 Fe']})
# NADH - As suggested by 10.1146/annurev.biochem.76.061505.175355
# Inhibitor from BRENDA
rxn.notes.update({'INHIBITOR':['']})
# Check if RXN is mass and charge balanced!
print (rxn.check_mass_balance())
# Check RXN-Directionality
print rxn
# Update ID to BiGG
rxn.id = 'CYP460_im'
rxn.add_metabolites({model.metabolites.h_p: 5.0,
model.metabolites.no2_p: 1.0,
model.metabolites.ficytcc555_p: -2.0,
model.metabolites.ham_p: -1.0,
model.metabolites.h2o_p: -1.0,
model.metabolites.focytcc555_p: 2.0})
model.add_reaction(rxn)
###Output
{}
CYP460
###Markdown
HAOCofactor unknown.
###Code
rxn = Reaction('HAO')
rxn.name = 'Hydroxylamine oxydoreductase'
rxn.subsystem = 'ec00910:Nitrogen metabolism,ec01120:Microbial metabolism in diverse environments'
rxn.notes['SUBSYSTEM'] = ['ec00910:Nitrogen metabolism,ec01120:Microbial metabolism in diverse environments']
rxn.lower_bound = 0.
rxn.upper_bound = 0.
# Confidence Score
rxn.notes.update({'CONFIDENCE SCORE':['4']})
# Localization
rxn.notes.update({'LOCALIZATION':['Inner Membrane']})
# EC number from KEGG
rxn.notes.update({'EC Number':['1.7.3.4']})
# Gene-Reaction-Rule update based on KEGG (using the old locus tags for now)
rxn.notes.update({'GENE ASSOCIATION': ['MCA0955 and MCA0956']})
rxn.gene_reaction_rule = 'MCA0955 and MCA0956'
# Substrate and cofactor usage from BRENDA
rxn.notes.update({'COFACTOR':['1 Fe']})
# NADH - As suggested by 10.1146/annurev.biochem.76.061505.175355
# Inhibitor from BRENDA
rxn.notes.update({'INHIBITOR':['']})
# Check if RXN is mass and charge balanced!
print (rxn.check_mass_balance())
# Check RXN-Directionality
print rxn
# Update ID to BiGG
rxn.id = 'HAO_im'
rxn.add_metabolites({model.metabolites.h_p: 5.0,
model.metabolites.no2_p: 1.0,
model.metabolites.ficytcc555_p: -2.0,
model.metabolites.ham_p: -1.0,
model.metabolites.h2o_p: -1.0,
model.metabolites.focytcc555_p: 2.0})
model.add_reaction(rxn)
###Output
{}
HAO
###Markdown
Reduction of Nitrate and Nitrite (Denitrification) nirD/nirB present in the reactions NTRIR2x_c, NTRIR2y_c = EC 1.7.1.4 Assimilatory nitrite reductase nasA Nitrate reductase 1.7.99.4Removed all reactions that had a different cofactor than Ubiquinol
###Code
rxn = model.reactions.NO3R1bpp_c
rxn.add_metabolites({model.metabolites.no2_p: 1,
model.metabolites.no2_c: 0,
model.metabolites.no3_c: 0,
model.metabolites.h2o_c: 0,
model.metabolites.no3_p: -1,
model.metabolites.h2o_p: 1,
model.metabolites.q8h2_im: -1,
model.metabolites.MNXM35_c: 0,
model.metabolites.MNXM24_c: 0,
model.metabolites.q8_im: 1},combine=False)
print rxn
rxn.id = 'NO3R1_im'
rxn.name = 'Nitrate reductase (Ubiquinol-8)'
rxn.notes['MXNREF'] = ['MNXR55771']
rxn.notes['RHEA'] = ['29147', '29148', '29149', '29150']
model.remove_reactions([model.reactions.NITR_c,model.reactions.MNXR84803_c,model.reactions.MNXR6576_c])
###Output
_____no_output_____
###Markdown
Nitric-oxide reductaseRemoved all instances that had a different cofactor than Cytochrome H
###Code
model.remove_reactions([model.reactions.MNXR19023_c,model.reactions.NHFRBO_c,
model.reactions.MNXR9366_c,model.reactions.MNXR75760_c])
rxn = model.reactions.MNXR70905_c
print rxn
rxn.add_metabolites({model.metabolites.no_p: -2,
model.metabolites.n2o_p: 1,
model.metabolites.focytcc553_p: -2,
model.metabolites.ficytcc553_p: 2,
model.metabolites.h2o_p: 1,
model.metabolites.h2o_c: 0,
model.metabolites.n2o_c: 0,
model.metabolites.h_c: 0,
model.metabolites.no_c: 0,
model.metabolites.ficytC_c: 0,
model.metabolites.focytC_c: 0,
model.metabolites.h_p: -2},combine=False)
print rxn
rxn.id = 'NOR_im'
rxn.name = 'Nitric oxide reductase (cytochrome c)'
model.reactions.NO3R1_im.check_mass_balance()
###Output
_____no_output_____
###Markdown
NiR 1.7.2.1 or 1.7.2.2 dissimilatory nitrite reductase
###Code
rxn = model.reactions.MNXR70768_c
print rxn
rxn.add_metabolites({model.metabolites.no2_p: -1,
model.metabolites.nh3_p: 1,
model.metabolites.focytcc553_p: -6,
model.metabolites.ficytcc553_p: 6,
model.metabolites.h2o_c: 0,
model.metabolites.nh3_c: 0,
model.metabolites.no2_c: 0,
model.metabolites.h_c: 0,
model.metabolites.no_c: 0,
model.metabolites.h2o_p: 2,
model.metabolites.ficytC_c: 0,
model.metabolites.focytC_c: 0,
model.metabolites.h_p: -8},combine=False)
print rxn
rxn.id = 'NITR_AM_im'
rxn.name = 'Nitrite reductase (cytochrome; ammonia-forming)'
# Gene-Reaction-Rule update based on Metacyc
rxn.notes.update({'GENE ASSOCIATION': ['MCA2059']})
rxn = model.reactions.MNXR84802_c
print rxn
rxn.add_metabolites({model.metabolites.no2_p: -1,
model.metabolites.no_p: 1,
model.metabolites.focytcc553_p: -1,
model.metabolites.ficytcc553_p: 1,
model.metabolites.h2o_p: 1,
model.metabolites.fdxrd_c: 0,
model.metabolites.h2o_c: 0,
model.metabolites.no2_c: 0,
model.metabolites.h_c: 0,
model.metabolites.no_c: 0,
model.metabolites.nh3_c: 0,
model.metabolites.fdxox_c: 0,
model.metabolites.h_p: -2},combine=False)
print rxn
rxn.id = 'NITR_NO_im'
rxn.name = 'Nitrite reductase (cytochrome; NO-forming)'
# Gene-Reaction-Rule update based on Metacyc
rxn.notes.update({'GENE ASSOCIATION': ['MCA2059']})
###Output
MNXR84802_c: 6.0 fdxrd_c + 7.0 h_c + no2_c --> 6.0 fdxox_c + 2.0 h2o_c + nh3_c
MNXR84802_c: focytcc553_p + 2 h_p + no2_p --> ficytcc553_p + h2o_p + no_p
###Markdown
Nitrate Transport System
###Code
rxn = model.reactions.no3_out
rxn.reaction = 'no3_e <->'
rxn.id = 'EX_no3_e'
rxn.name = 'Nitrate exchange'
rxn = Reaction('no3t_om')
rxn.add_metabolites({model.metabolites.no3_e:-1,model.metabolites.no3_p:1})
rxn.id = 'no3t_om'
rxn.name = 'Diffusion of Nitrate between Extracellular and Periplasm'
model.add_reaction(rxn)
for x in model.essential_reactions():
if x.id.startswith('MNX'):
print x
model.reactions.nh3_in.lower_bound = 0
model.reactions.nh3_in.upper_bound = 0
model.reactions.EX_no3_e.lower_bound = -1000
model.reactions.EX_no3_e.upper_bound = 1000
solution = model.solve()
from cameo.util import TimeMachine
with TimeMachine() as tm:
for x in model.reactions:
if x.id.startswith('MNX'):
x.lower_bound = 0
x.upper_bound = 0
solution2 = model.solve()
target_filename_json = relative_directory + '/Reconstructions/MethylococcusModel8.json'
target_filename_xml = relative_directory + '/Reconstructions/MethylococcusModel8.xml'
cobra.io.write_legacy_sbml(model, target_filename_xml, use_fbc_package=False)
cobra.io.save_json_model(model, target_filename_json)
###Output
_____no_output_____ |
assets/uploads/chollet_6_3.ipynb | ###Markdown
Uso Avançado de RNN> **3 Técnicas** para melhorar o desempenho e o poder de generalização das RNN:- *Recurrent dropout*: para cobater overfitting- *Stacking recurrent layers*: para aumentar o poder representacional da rede- *Bidirectional recurrent layers*: apresentar a mesma informação para redes diferentes, aumentando a acurácia e mitigando problemas. Case: Previsão de temperaturaDiferente dos casos anteriores, em que analisamos sequência de texto, iremos trabalhar agora com séries temporais, outra aplicação de deep learning para sequências.
###Code
from google.colab import drive
drive.mount('/content/drive')
import os
data_dir = '/content/drive/My Drive/Deep_Learning/'
fname = os.path.join(data_dir, 'jena_climate_2009_2016.csv')
# Abrindo o arquivo
f = open(fname)
data = f.read()
f.close()
lines = data.split('\n')
header = lines[0].split(',')
lines = lines[1:]
print(header)
print(len(lines))
###Output
['"Date Time"', '"p (mbar)"', '"T (degC)"', '"Tpot (K)"', '"Tdew (degC)"', '"rh (%)"', '"VPmax (mbar)"', '"VPact (mbar)"', '"VPdef (mbar)"', '"sh (g/kg)"', '"H2OC (mmol/mol)"', '"rho (g/m**3)"', '"wv (m/s)"', '"max. wv (m/s)"', '"wd (deg)"']
420551
###Markdown
Precisamos converter o arquivo em um array do Numpy
###Code
import numpy as np
#criando uma matriz de zeros
float_data = np.zeros((len(lines), len(header) - 1))
#preenchendo com os dados do dataset
for i, line in enumerate(lines):
values = [float(x) for x in line.split(',')[1:]]
float_data[i, :] = values
###Output
_____no_output_____
###Markdown
Plotando a temperatura ao longo do tempo (*timeseries*)
###Code
from matplotlib import pyplot as plt
temp = float_data[:, 1]
plt.plot(range(len(temp)), temp)
###Output
_____no_output_____
###Markdown
Visualizando apenas os 10 primeiros dias:
###Code
plt.plot(range(1440), temp[:1440])
###Output
_____no_output_____
###Markdown
- As temperaturas possuem boa periodicidade quando olhamos anualmente.- Entretanto, a coisa fica mais caótica quando olhamos diariamente, sendo mais complexo prever a temperatura de um dia do que de um mês. Preparação dos dados> Settings:1. lookback = 720—Observations will go back 5 days.2. steps = 6—Observations will be sampled at one data point per hour.3. delay = 144—Targets will be 24 hours in the future.Precisamos:- normalizar os dados: $\frac{x-\bar{x}}{\sigma_x}$- fazer batches dos dados
###Code
# Preparando os dados
# usando apenas os 200000 primeiros dados para a média e o desvio padrão
mean = float_data[:200000].mean(axis=0)
float_data -= mean
std = float_data[:200000].std(axis=0)
float_data /= std
###Output
_____no_output_____
###Markdown
Precisamos fazer uma *feature engineering* e obter as seguintes variáveis:- **data** —The original array of floating-point data, which you normalized in listing 6.32.- **lookback** —How many timesteps back the input data should go.- **delay** —How many timesteps in the future the target should be.- **min_index** and **max_index** —Indices in the data array that delimit which timestepsto draw from. This is useful for keeping a segment of the data for validationand another for testing.- **shuffle** —Whether to shuffle the samples or draw them in chronological order.- **batch_size** —The number of samples per batch.- **step** —The period, in timesteps, at which you sample data. You’ll set it to 6 inorder to draw one data point every hour.
###Code
def generator(data, lookback, delay, min_index, max_index,
shuffle=False, batch_size=128, step=6):
if max_index is None:
max_index = len(data) - delay - 1
i = min_index + lookback
while 1:
if shuffle:
rows = np.random.randint(
min_index + lookback, max_index, size=batch_size)
else:
if i + batch_size >= max_index:
i = min_index + lookback
rows = np.arange(i, min(i + batch_size, max_index))
i += len(rows)
samples = np.zeros((len(rows),
lookback // step,
data.shape[-1]))
targets = np.zeros((len(rows),))
for j, row in enumerate(rows):
indices = range(rows[j] - lookback, rows[j], step)
samples[j] = data[indices]
targets[j] = data[rows[j] + delay][1]
yield samples, targets
###Output
_____no_output_____
###Markdown
Com a função *generator* iremos selecionar as amostras de treino, validação e teste. As seleções de validação e teste serão *out of time*, ou seja, em períodos posteriores.
###Code
#setting das constantes
lookback = 1440
step = 6
delay = 144
batch_size = 128
train_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=0,
max_index=200000,
shuffle=True,
step=step,
batch_size=batch_size)
val_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=200001,
max_index=300000,
step=step,
batch_size=batch_size)
test_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=300001,
max_index=None,
step=step,
batch_size=batch_size)
# Garantindo que a amostra de validação seja feita no batch subsequente à amostra de treino
val_steps = (300000 - 200001 - lookback) // batch_size
# Garantindo que a amostra de teste seja feita no batch subsequente à amostra de validação
test_steps = (len(float_data) - 300001 - lookback) // batch_size
###Output
_____no_output_____
###Markdown
Baseline e Sanity CheckÉ sempre importante termos uma baseline para compararmos os resultados e fazermos um *sanity check*.No caso da previsão de tempo, podemos assumir que:- as temperaturas são contínuas.- a temperatura em 24h será a mesma que agora.Para avaliar esse baseline, usamos o erro absoluto médio (MAE).
###Code
def evaluate_naive_method():
batch_maes = []
for step in range(val_steps):
samples, targets = next(val_gen)
preds = samples[:, -1, 1]
mae = np.mean(np.abs(preds - targets))
batch_maes.append(mae)
print(np.mean(batch_maes))
evaluate_naive_method()
###Output
0.2897359729905486
###Markdown
> Considerando que fizemos a normalização dos dados, temos:$0.29 \times \sigma_{Temperature} = 2.57^o C$Temos que fazer uma previsão cujo MAE seja menor!
###Code
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model = Sequential()
model.add(layers.Flatten(input_shape=(lookback // step, float_data.shape[-1])))
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit_generator(train_gen,
steps_per_epoch=500,
epochs=20,
validation_data=val_gen,
validation_steps=val_steps)
evaluate_naive_method()
# Visualização dos resultados
import matplotlib.pyplot as plt
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
- Nesse treino, utilizamos a sequência de dados como um todo, sem considerar como cada medida T em um tempo t influência o a temperatura T+1.- Da mesma maneira que com a sequência de palavras no review, a ordem e a causalidade importante!> Uilizaremos uma **GRU** (***gated recurrent unit***, "unidade recorrente fechada"), que são similares a LSTM, mas são mais baratas computacionalmente. Baseline recorrente - uma primeira tentativa
###Code
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model = Sequential()
model.add(layers.GRU(32, input_shape=(None, float_data.shape[-1])))
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit_generator(train_gen,
steps_per_epoch=500,
epochs=20,
validation_data=val_gen,
validation_steps=val_steps)
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Dropout para evitar overfittingPara evitar o **overfitting** usar o dropout é uma boa opção, uma vez que ele 'quebra' algumas correlações entre os dados de treinamento, evitando o superajuste.> Fazer isso em redes recorrentes não é trivial, pois necessitamos das informações prévias para o treinamento recorrente.**Yarin Gal**: a máscara de dropout deve ser *a mesma em todos os timesteps*. Uma máscara de dropout constante deve ser aplicada às ativações internas da camada recorrente, numa espécie de "***dropout recorrente***".No Keras, cada camada recorrente pode ter dois argumentos de dropout:- dropout, um valor decimal que diz a taxa de dropout para as unidades de input da camada- recurrent_dropout, que especifica a taxa de dropout das unidades recorrentes.
###Code
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model = Sequential()
model.add(layers.GRU(32,
dropout=0.2,
recurrent_dropout=0.2,
input_shape=(None, float_data.shape[-1])))
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit_generator(train_gen,
steps_per_epoch=500,
epochs=40,
validation_data=val_gen,
validation_steps=val_steps)
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Empilhando Camadas Recorrentesbla bla
###Code
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model = Sequential()
model.add(layers.GRU(32,
dropout=0.1,
recurrent_dropout=0.5,
return_sequences=True,
input_shape=(None, float_data.shape[-1])))
model.add(layers.GRU(64, activation='relu',
dropout=0.1,
recurrent_dropout=0.5))
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit_generator(train_gen,
steps_per_epoch=500,
epochs=40,
validation_data=val_gen,
validation_steps=val_steps)
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Camadas Bidirecionaisbla bla bla
###Code
def reverse_order_generator(data, lookback, delay, min_index, max_index,
shuffle=False, batch_size=128, step=6):
if max_index is None:
max_index = len(data) - delay - 1
i = min_index + lookback
while 1:
if shuffle:
rows = np.random.randint(
min_index + lookback, max_index, size=batch_size)
else:
if i + batch_size >= max_index:
i = min_index + lookback
rows = np.arange(i, min(i + batch_size, max_index))
i += len(rows)
samples = np.zeros((len(rows),
lookback // step,
data.shape[-1]))
targets = np.zeros((len(rows),))
for j, row in enumerate(rows):
indices = range(rows[j] - lookback, rows[j], step)
samples[j] = data[indices]
targets[j] = data[rows[j] + delay][1]
yield samples[:, ::-1, :], targets
train_gen_reverse = reverse_order_generator(
float_data,
lookback=lookback,
delay=delay,
min_index=0,
max_index=200000,
shuffle=True,
step=step,
batch_size=batch_size)
val_gen_reverse = reverse_order_generator(
float_data,
lookback=lookback,
delay=delay,
min_index=200001,
max_index=300000,
step=step,
batch_size=batch_size)
model = Sequential()
model.add(layers.GRU(32, input_shape=(None, float_data.shape[-1])))
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit_generator(train_gen_reverse,
steps_per_epoch=500,
epochs=20,
validation_data=val_gen_reverse,
validation_steps=val_steps)
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(loss))
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
bla bla bla
###Code
from keras.datasets import imdb
from keras.preprocessing import sequence
from keras import layers
from keras.models import Sequential
# Number of words to consider as features
max_features = 10000
# Cut texts after this number of words (among top max_features most common words)
maxlen = 500
# Load data
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
# Reverse sequences
x_train = [x[::-1] for x in x_train]
x_test = [x[::-1] for x in x_test]
# Pad sequences
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
model = Sequential()
model.add(layers.Embedding(max_features, 128))
model.add(layers.LSTM(32))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
history = model.fit(x_train, y_train,
epochs=10,
batch_size=128,
validation_split=0.2)
###Output
_____no_output_____
###Markdown
blasmsdklfsdkjmfkdsjfndzkndfmzdx
###Code
from keras import backend as K
K.clear_session()
model = Sequential()
model.add(layers.Embedding(max_features, 32))
model.add(layers.Bidirectional(layers.LSTM(32)))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
history = model.fit(x_train, y_train, epochs=10, batch_size=128, validation_split=0.2)
###Output
_____no_output_____
###Markdown
ndsklfdszdskjfjsd,zkjdjsfnds
###Code
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model = Sequential()
model.add(layers.Bidirectional(
layers.GRU(32), input_shape=(None, float_data.shape[-1])))
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit_generator(train_gen,
steps_per_epoch=500,
epochs=40,
validation_data=val_gen,
validation_steps=val_steps)
###Output
_____no_output_____
###Markdown
mais blasescritose descritos
###Code
###Output
_____no_output_____ |
src/xml_parser_notebook.ipynb | ###Markdown
Writeing sample data to csv
###Code
df = gia.mk_bill_df('2019-08-01')
df.to_csv(path_or_buf='test.csv', sep='|', na_rep='N//A', date_format='yyyy-mm-dd')
coll = gia.get_collection('2019-08-01')
coll = json.loads(coll.text)['packages']
tags = {}
for bill in coll:
bill_id = bill['packageId']
pac = gia.get_package_data(bill_id)
root = ET.fromstring(pac.text)
tags[bill_id] = [child.tag for child in root]
un_tags = set()
for key in tags:
for tag in tags[key]:
un_tags.add(tag)
###Output
_____no_output_____
###Markdown
Possible child tags for bill package
###Code
un_tags
###Output
_____no_output_____ |
code/preprocessing/.ipynb_checkpoints/preprocessing_v2-checkpoint.ipynb | ###Markdown
Table of Contents1 Package import2 Data Loading3 Preprocessing3.1 parsing3.1.1 typo_parser3.1.2 email_address_parser3.1.3 bytedata_parser3.1.4 structure_parser3.1.5 reference_parser3.2 main structural_email4 Main block5 Saved processed data6 module test Package import
###Code
import pandas as pd
import numpy as np
import os
from glob import glob
from tqdm import tqdm
import matplotlib.pyplot as plt
import mailparser
import re
###Output
_____no_output_____
###Markdown
Data Loading- from file into DataFrame
###Code
def load_data_folder(path):
"""
@param folders: the train or test directory
@return: document list with [doc_path, doc, label, original_idx]
"""
folders = glob(path+"/**") # explore all the folder under the directory
docs = []
for classes in folders:
label = classes.split("\\")[-1]
doc_paths = glob(classes+"\\**")
for doc_path in doc_paths:
original_idx = doc_path.split("\\")[-1]
with open(doc_path, encoding="UTF-8") as f:
text = f.read()
docs.append([doc_path, text, label, original_idx])
print(f"\nLoaded folder under {path}: \n")
for folder in folders:
print(folder)
return docs
corpus_train_docs = load_data_folder(path="../../data/train")
corpus_test_docs = load_data_folder(path="../../data/test")
###Output
Loaded folder under ../../data/train:
../../data/train\alt.atheism
../../data/train\comp.graphics
../../data/train\comp.os.ms-windows.misc
../../data/train\comp.sys.ibm.pc.hardware
../../data/train\comp.sys.mac.hardware
../../data/train\comp.windows.x
../../data/train\misc.forsale
../../data/train\rec.autos
../../data/train\rec.motorcycles
../../data/train\rec.sport.baseball
../../data/train\rec.sport.hockey
../../data/train\sci.crypt
../../data/train\sci.electronics
../../data/train\sci.med
../../data/train\sci.space
../../data/train\soc.religion.christian
../../data/train\talk.politics.guns
../../data/train\talk.politics.mideast
../../data/train\talk.politics.misc
../../data/train\talk.religion.misc
Loaded folder under ../../data/test:
../../data/test\alt.atheism
../../data/test\comp.graphics
../../data/test\comp.os.ms-windows.misc
../../data/test\comp.sys.ibm.pc.hardware
../../data/test\comp.sys.mac.hardware
../../data/test\comp.windows.x
../../data/test\misc.forsale
../../data/test\rec.autos
../../data/test\rec.motorcycles
../../data/test\rec.sport.baseball
../../data/test\rec.sport.hockey
../../data/test\sci.crypt
../../data/test\sci.electronics
../../data/test\sci.med
../../data/test\sci.space
../../data/test\soc.religion.christian
../../data/test\talk.politics.guns
../../data/test\talk.politics.mideast
../../data/test\talk.politics.misc
../../data/test\talk.religion.misc
###Markdown
Preprocessing parsing
###Code
corpus_train = pd.DataFrame(corpus_train_docs, columns=["doc_path", "text", "label", "original_idx"])
corpus_train = corpus_train.reset_index().rename(columns={"index":"global_index"})
corpus_test = pd.DataFrame(corpus_test_docs, columns=["doc_path", "text", "label", "original_idx"])
corpus_test = corpus_test.reset_index().rename(columns={"index":"global_index"})
print("original_idx duplicate count:", corpus_train.shape[0] - corpus_train.original_idx.drop_duplicates().shape[0], " on ", corpus_train.shape[0])
print("original_idx duplicate count:", corpus_test.shape[0] - corpus_test.original_idx.drop_duplicates().shape[0], " on ", corpus_test.shape[0])
###Output
original_idx duplicate count: 1060 on 11083
original_idx duplicate count: 770 on 7761
###Markdown
typo_parser
###Code
def typo_parser(x):
"""
1. replace irrelevant symbol "|" or "*"
2. remove extra space " "
3. replace extra \n "\n\n" into "\n"
4. replace "> *>" into ">>" for further analysis
@param string: email body string
@return: cleaned email body string, extracted emails
# test_string = 'www.\n com\n\n or ?\n>\n >>\n \n > > >|> (note) \n> \n I\nam not good enough with regex>'
# typo_parser(test_string)
"""
# x = re.sub('([,:;?!\.”\)])\n', '\g<1> ', x) # add space for symbol like .\n or ?\n
# x = re.sub('(\w)\n(\w)', '\g<1> \g<2>', x) # add space for symbol like word\nword
x = re.sub('\n', ' \n ', x) # add space for between \n
x = re.sub("[\*|\|\^]", "", x) # replace irrelevant symbol "|" or "*"
x = re.sub(">[ >]*>", ">>", x)# compress > [?] >
x = re.sub("\[.*?\]", "", x, flags=re.S) # separate for typo like [a)
x = re.sub("\(.*?\)", "", x, flags=re.S)
x = re.sub("\n[ \n]*\n", "\n", x) # compress \n
return x
###Output
_____no_output_____
###Markdown
email_address_parser
###Code
def email_address_parser(string):
"""
extract and remove email from the body
@param string: email body string
@return: cleaned email body string, extracted emails
"""
emails = None
emails = re.findall(" ?[\S]+@[\S]+ ?", string)
string = re.sub(" ?[\S]+@[\S]+ ?", " ", string)
return string, emails
###Output
_____no_output_____
###Markdown
bytedata_parser
###Code
def bytedata_parser(string, threshold=50):
"""
Since 99% of english words length ranged from [1,20], but consider special symbol there, we set the threshold with 50 for only parse bytdata like photo
If length of span larger than threshold, then we will not treat it as a word.
sep can only use space
"""
bytedata = None
clean_string = " ".join([word for word in re.split(" ", string) if len(word)<=threshold])
## sentence length is the same
# clean_string = "\n".join([word for word in re.split("\n", clean_string) if len(word)<=threshold])
bytedata = [word for word in re.split(" ", string) if len(word)>threshold]
return clean_string, bytedata
###Output
_____no_output_____
###Markdown
structure_parser
###Code
def structure_parser(string):
"""
@param parser: email string
@return: structural information for email header, body, others
"""
error_message = None
header = {}
body = ""
others = []
try:
mail = mailparser.parse_from_string(string)
if mail.has_defects: # [first line error]
remove_first_line_string = "\n".join(string.split("\n")[1:])
mail = mailparser.parse_from_string(remove_first_line_string)
# print("remove_first_line_string update for ")
header, body = mail.headers, mail.body
others = [mail.date, mail.delivered_to, mail.to_domains, error_message]
except Exception as error:
error_message = error
return header, body, others
###Output
_____no_output_____
###Markdown
reference_parser
###Code
def extra_parser(x):
"""
remove_flag and extra space
"""
x = re.sub("(?:In article)?.*writes:" , "", x, flags=re.S)
x = re.sub(" {2,}", " ", x) # compress space
return x
def reference_parser(string, match_type=2):
"""
Consider reply with referencing previous email, we need to separate them to make prediction separately.
@param
string: email body string
match_type: 0 with return only main body, 1 with return main body + previous one reference, 2 with more reference
@return:
reply, previous_one, previous_two in the email
@ test with the following code
string = " \n\n\n\n >>>zero email \n\n >>first email\n >second email\n reply email \n"
reply, previous_one, previous_two = reference_parser(string, match_type=2)
print("## reply\n", repr(reply))
print("## previous_one\n", repr(previous_one))
print("## previous_two\n", repr(previous_two))
"""
previous_one, previous_two, reply = '', '', ''
# extract reply with out containing >
reply = " ".join([s for s in string.split("\n") if ">" not in s])
reply = extra_parser(reply)
# add "\n" before string to matchign [^>]{1}
if match_type>0:
previous_one = " ".join(re.findall("[^>]{1}>{1}([^>]{1}[\S ]*)\n", "\n" + string)) # matching >
previous_one = extra_parser(previous_one)
if match_type>1: # flag reference_two
previous_two = " ".join(re.findall("[^>]{1}>{2}([^>]{1}[\S ]*)\n", "\n" + string)) # matching >>
previous_two = extra_parser(previous_two)
# previous_two_more_pt = "[^>]{1}>{2,}([^>]{1}[\S ]*)\n" # matching >> or >>> more
return reply, previous_one, previous_two
###Output
_____no_output_____
###Markdown
main structural_email
###Code
def structural_email(data, bytedata_parser_threshold=50, reference_parser_match_type=2):
"""
This is a parser pipeline, parser order matters.
1. string => structure email to separate => header, body, others
2. body => remove typo and some irrelevant words => body
3. body => parse and remove email from body => body_no_email
4. body_no_email => parse and remove binary data like BMP or picture from body => body_no_binary_no_email
5. body_no_binary_no_email => separate email reference and reply => reply, previous_one, previous_two
@param data: data text series including all the training set or test set
@return: structural information
"""
print("Preprocessing for unstructure email...")
header_info = []
body_info = []
others_info = []
for string in tqdm(data):
header, body, others = structure_parser(string)
body = typo_parser(body)
body_no_email, emails = email_address_parser(body)
body_no_binary_no_email, bytedata = bytedata_parser(body_no_email, threshold=bytedata_parser_threshold)
reply, previous_one, previous_two = reference_parser(body_no_binary_no_email, match_type=reference_parser_match_type)
header_info.append(header)
body_info.append([reply, previous_one, previous_two])
others_info.append(others+[emails]+[bytedata])
a1 = pd.DataFrame.from_dict(header_info)
a2 = pd.DataFrame(body_info, columns=["reply", "reference_one", "reference_two"])
a3 = pd.DataFrame(others_info, columns=["date", "delivered_to", "to_domains", "error_message", "contained_emails", "long_string"])
structure_email = pd.concat([a1, a2, a3], axis=1)
return structure_email
###Output
_____no_output_____
###Markdown
Main block
###Code
structural_train = structural_email(corpus_train["text"])
structural_test = structural_email(corpus_test["text"])
train = pd.concat([corpus_train, structural_train], axis=1)
test = pd.concat([corpus_test, structural_test], axis=1)
all_cols = train.columns.tolist()
print(all_cols)
###Output
['global_index', 'doc_path', 'text', 'label', 'original_idx', 'From', 'Subject', 'Summary', 'Keywords', 'Expires', 'Distribution', 'Organization', 'Supersedes', 'Lines', 'X-Newsreader', 'NNTP-Posting-Host', 'Reply-To', 'Nntp-Posting-Host', 'In-Reply-To', 'News-Software', 'X-Mailer', 'Originator', 'Article-I.D.', 'X-News-Reader', 'X-Sender', 'X-Disclaimer', 'Nntp-Posting-User', 'X-Bytes', 'X-Xxmessage-Id', 'X-Xxdate', 'X-Useragent', 'In-reply-to', 'OD-Comment-To', 'ReplyTo', 'Disclaimer', 'Comments', 'Posting-Front-End', 'X-Reader', 'Mime-Version', 'Content-Type', 'Content-Transfer-Encoding', 'X-UserAgent', 'X-NewsSoftware', 'Nntp-Software', 'Oganization', 'Apparently-To', 'X-Comment-To', 'X-Gateway', 'X-Advert', 'Cc', 'X-News-Software', 'X-Posted-From', 'Follow-Ups', 'X-Auth-User', 'X-FTN-To', 'X-Gated-By', 'X-Standard-Disclaimer', 'Moderator', 'X-XXMessage-ID', 'X-XXDate', 'To', 'Posted-Date', 'Received-Date', 'Orginization', 'X-Md4-Signature', 'Return-Receipt-To', 'X-Mail-Reader', 'Content-Length', 'X-Copyright', 'Original-To', 'X-Received', 'X-To', 'Return-Path', 'Nntp-Posting-Host-[nntpd-23809]', 'Organisation', 'X-Date', 'Nntp-Posting-Host-[nntpd-8755]', 'Nntp-Posting-Host-[nntpd-19510]', 'Nntp-Posting-Host-[nntpd-29970]', 'X-Software', 'X-AltNet-ID', 'MIME-Version', 'Bcc', 'Status', 'Nntp-Posting-Host-[nntpd-681]', 'Weather', 'Moon-Phase', 'X-Last-Updated', 'X-Face', 'X-Maildoor', 'X-Newssoftware', 'Nf-ID', 'Nf-From', 'X-Address', 'X-Fax', 'X-Phone', 'IMPORTANT-INFO', 'X-Added', 'Original-Sender', 'X-Alt.reply-Address', 'X-X-From', 'Mmdf-Warning', 'Followups-to', 'X-Newsposter', 'X-Header', 'X-Cc', 'Oanization', 'reply', 'reference_one', 'reference_two', 'date', 'delivered_to', 'to_domains', 'error_message', 'contained_emails', 'long_string']
###Markdown
Saved processed data
###Code
train.to_json('../../data/structured_train.json')
test.to_json('../../data/structured_test.json')
###Output
_____no_output_____
###Markdown
module test
###Code
def checking_text(idx, write_in_local=True):
x = train[train["global_index"] == idx]
string = x["text"].iloc[0]
body = x["reply"].iloc[0]
x_path = x["doc_path"].iloc[0]
x_label = x["label"].iloc[0]
if write_in_local:
with open("./module_checking_sample.txt", "w", encoding="utf-8") as f:
f.write(x_label+"\n\n")
f.write(x_path+"\n\n")
f.write(string)
return string, body, x_path, x_label
module_test = True
if module_test:
# 可以分开一个pyfile, 并且把这里的过程保存下来, 然后写在report中
# idx = 22
idx = 9187
string, reply, x_path, x_label = checking_text(idx)
header, body, others = structure_parser(string)
print("\nrepr(header): \n", repr(header))
print("\nrepr(body): \n", repr(body))
print("\nrepr(others): \n", repr(others))
body = typo_parser(body)
print("\nrepr(body): \n", repr(body))
body_no_email, emails = email_address_parser(body)
print("\nrepr(body): \n", repr(body))
print("\nrepr(emails): \n", repr(emails))
print("\nrepr(body_no_email): \n", repr(body_no_email))
body_no_binary_no_email, bytedata = bytedata_parser(body_no_email, threshold=25)
print("\nrepr(bytedata): \n", repr(bytedata))
print("\nrepr(body_no_binary_no_email): \n", repr(body_no_binary_no_email))
reply, previous_one, previous_two = reference_parser(body_no_binary_no_email, match_type=2)
print("\nrepr(reply): \n", repr(reply))
print("\nrepr(previous_one): \n", repr(previous_one))
print("\nrepr(previous_two): \n", repr(previous_two))
with open('regex_sample.txt','r') as f:
sample = f.read()
parsed_f = structural_email(pd.Series(sample))
parsed_f.to_json("regex_sample_parsed.json")
###Output
100%|███████████████████████████████████████████████████████████████████████████████████| 1/1 [00:00<00:00, 333.46it/s] |
CitiBike_SubscribersVsCustomer_Rides.ipynb | ###Markdown
I wanted to find the average trip duration for each age and plot it but I could not figure out how to do it so I am planning to plot Subscriber against weekday
###Code
x = (df['Age'])
y = df['Trip Duration']
pl.scatter(x,y)
pl.title('Trip Duration as a function of Age')
pl.xlabel('Age', fontsize = 12)
pl.ylabel('Trip Duration', fontsize = 12)
df.drop(['Trip Duration' , 'Gender', 'Age'], axis=1, inplace=True)
df.head()
df.describe()
fig = pl.figure(figsize(6,6))
#instad of plotting with matplotlib i.e. plot() i use the plot method in pandas
norm_c = 1
((df['date'][df['User Type'] == 0].groupby([df['date'].dt.weekday]).count()) / norm_c).plot(kind="bar",
color='IndianRed',
label='Customers')
norm_s = 1
ax = ((df['date'][df['User Type'] == 1].groupby([df['date'].dt.weekday]).count()) / norm_s).plot(kind="bar",
color='SteelBlue',
alpha=0.5,
label='Subscribers')
tmp = ax.xaxis.set_ticklabels(['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'], fontsize=20)
pl.legend()
pl.title('Rides taken by Users based on days of week')
pl.xlabel('Days of week')
pl.ylabel('Number of rides')
###Output
_____no_output_____
###Markdown
Figure 1a: Distribution of Citibike bikers by Customer types in July 2017, absolute counts
###Code
# Calculating errors
fig=pl.figure(figsize(8,8))
counts_c = df.date[df['User Type'] == 0].groupby([df.date.dt.weekday]).count()
norm_c = 1
error_c = np.sqrt(counts_c)
ax=((counts_c) / norm_c).plot(kind="bar",color='IndianRed', yerr=[
((error_c) / norm_c, (error_c) / norm_c)], label='customers bikers')
counts_s = df.date[df['User Type']==1].groupby([df.date.dt.weekday]).count()
norm_s = 1
error_s=np.sqrt(counts_s)
((counts_s) / norm_s).plot(kind="bar", alpha=0.5, yerr=[((error_s) / norm_s, (error_s) / norm_s)],
color='SteelBlue', label='subscribers bikers')
ax.xaxis.set_ticklabels(['Mon','Tue','Wed','Thu','Fri','Sat','Sun'], fontsize=20)
ax.set_ylabel ("Number of rides")
ax.set_xlabel ("Day of the week")
pl.title('Rides taken by Users based on days of week')
pl.legend(['Customer bikers','Subscriber bikers'],fontsize=12)
###Output
_____no_output_____
###Markdown
Figure 1b: Distribution of Citibike bikers by Customer types in July 2017, absolute counts, with statistical errors
###Code
fig = pl.figure(figsize(8,8))
norm_c = counts_c.sum()
error_c = np.sqrt(counts_c)
((counts_c) / norm_c).plot(kind="bar", color='IndianRed',
yerr=[((error_c) / norm_c, (error_c) / norm_c)],
label='Customer bikers')
norm_s = counts_s.sum()
ax = ((counts_s) / norm_s).plot(kind="bar", alpha=0.5,
yerr=[((error_s) / norm_s, (error_s) / norm_s)],
color='SteelBlue', label='Subscriber bikers')
ax.xaxis.set_ticklabels(['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'], fontsize=20)
ax.set_ylabel ("Fraction of rides")
ax.set_xlabel ("Day of the week")
pl.title('Fraction of rides taken by Customers to Subscribers based on days of week')
pl.legend(['customers bikers','subscribers bikers'],fontsize=10)
###Output
_____no_output_____ |
section_4/01_probability_basic.ipynb | ###Markdown
確率の基礎確率は世界を「起こりやすさ」として捉えます。現実世界の現象を表現するために、とても有用な概念です。 ●確率とは?「確率」(Probability)とは、はある現象が起きることが期待される度合いのことです。 確率は以下の式で表されます。 $$P(A)=\frac{a}{n}$$この式において、$P(A)$は事象$A$が起きる確率、$a$は事象Aが起きる場合の数、$n$は全ての場合の数です。コインを投げを例として考えましょう。コインを投げて落ちたときに上になる面は表と裏の2通りです。どちらの面が上になるのも、同じ程度に期待されるとします。 このとき、場合の数は2です。そして、表の面が出るという事象$A$の場合の数は1です。 従って、確率は以下の通りになります。$$P(A)=\frac{a}{n}=\frac{1}{2}$$$\frac{1}{2}$なので、表が上になるという事象は50%期待されることになります。 次にサイコロの例を考えます。サイコロで3が出るという事象Aが起きる確率は、事象Aの場合の数が1で、全ての場合の数が6なので、以下の通りになります。 $$P(A)=\frac{a}{n}=\frac{1}{6}$$$\frac{1}{6}$なので、約16.7%期待されることになります。次に、2つのサイコロを振って、目の合計が4になる確率を求めます。 目の合計が4になるという事象Aは、(1, 3)、(2, 2)、(3, 1)の3つあります。 場合の数は、全部で$6\times 6=36$通りです。 この場合の確率は、以下の通りにです。$$P(A)=\frac{a}{n}=\frac{3}{36}=\frac{1}{12}$$$\frac{1}{12}$なので約8.3%です。2つのサイコロを振って合計が4になるのは、8.3%程度期待できることになります。 ●余事象事象 $A$ に対して「$A$が起こらないという事象」を$A$の「余事象」といいます。$A$の余事象は、$\bar{A}$などと表します。余事象$\bar{A}$が起きる確率ですが、事象$A$が起きる確率$P(A)$を使って以下のように求めることができます。 $$P(\bar{A})=1-P(A)$$先程の例では、2つのサイコロを振って目の合計が4になる確率は$\frac{1}{12}$でした。これを使って、「2つのサイコロを振って目の合計が4以外になる確率」を以下の通りに求めることができます。$$P(\bar{A})=1-\frac{1}{12}=\frac{11}{12}$$約91.7%の確率で、目の合計は4以外になることになります。目の合計が4以外になる全ての場合を挙げるのには手間がかかりますが、余事象を使うことで確率は簡単に求めることができます。 ●乱数とは?例えばサイコロを投げる場合、上の面が決まるまで1-6のどれが出るのか分かりません。「乱数」とは、このような未確定の数値です。 以下のコードは、NumPyの`random.randint( )`を使って、サイコロのように1-6の値をランダムに返すコードです。`randint( )`関数に整数$n$を引数として渡すと、$0$から$n-1$までの整数の乱数を返します。
###Code
import numpy as np
r_int = np.random.randint(6) + 1 # 0から5までの乱数に、1を加える
print(r_int) # 1から6までがランダムに表示される
# 練習用
###Output
_____no_output_____
###Markdown
NumPyの`random.rand()`関数を使うと、0から1までの間の小数をランダムに取得することができます。
###Code
import numpy as np
r_dec = np.random.rand() # 0から1の間の小数を、ランダムに返す
print(r_dec)
# 練習用
###Output
_____no_output_____
###Markdown
●均一な乱数先述の`random.rand()`関数は、0から1の間の小数を均等な確率で返します。この関数に整数`a`を引数として渡すと、そのような小数を`a`個得ることができます。 以下のコードは、多数の均一な乱数をx座標、y座標として、散布図にプロットします。実行することで、`random.rand()`により得られる乱数が均一であることが確認できます。
###Code
import numpy as np
import matplotlib.pyplot as plt
n = 1000 # サンプル数
x = np.random.rand(n) # 0-1の均一な乱数
y = np.random.rand(n) # 0-1の均一な乱数
plt.scatter(x, y) # 散布図のプロット
plt.grid()
plt.show()
# 練習用
###Output
_____no_output_____
###Markdown
●偏った乱数NumPyの`random.randn( )`関数は、後のレクチャーで解説する「正規分布」という分布に従う確率で乱数を返します。正規分布では、中央で確率が高く、両端で確率が低くなります。 以下のコードは、正規分布に従う多数の乱数をx座標、y座標として散布図にプロットします。
###Code
import numpy as np
import matplotlib.pyplot as plt
n = 1000 # サンプル数
x = np.random.randn(n) # 正規分布に従う乱数
y = np.random.randn(n) # 正規分布に従う乱数
plt.scatter(x, y) # 散布図のプロット
plt.grid()
plt.show()
# 練習用
###Output
_____no_output_____
###Markdown
●確率への収束(事象の発生数/試行数)はやがて確率に収束していきます。 以下のコードは、サイコロを何度も振って5が出た回数を数え、(5が出た回数/振った回数)の推移を表示するコードです。 試行を重ねるにつれて、(5が出た回数/試行数)が確率(約16.7%)に収束していくことを確認しましょう。
###Code
import numpy as np
import matplotlib.pyplot as plt
x = []
y = []
total = 0 # 試行数
num_5 = 0 # 5が出た回数
n = 10000 # サイコロを振る回数
for i in range(n):
if np.random.randint(6)+1 == 5: # 0-5までのランダムな数に1を加えて1-6に
num_5 += 1
total += 1
x.append(i)
y.append(num_5/total)
plt.plot(x, y)
plt.plot(x, [1/6]*n, linestyle="dashed") # yは1/6がn個入ったリスト
plt.grid()
plt.show()
# 練習用
###Output
_____no_output_____
###Markdown
確率の基礎確率は世界を「起こりやすさ」として捉えます。現実世界の現象を表現するために、とても有用な概念です。 ●確率とは?「確率」(Probability)とは、はある現象が起きることが期待される度合いのことです。 確率は以下の式で表されます。 $$P(A)=\frac{a}{n}$$この式において、$P(A)$は事象$A$が起きる確率、$a$は事象Aが起きる場合の数、$n$は全ての場合の数です。コインを投げを例として考えましょう。コインを投げて落ちたときに上になる面は表と裏の2通りです。どちらの面が上になるのも、同じ程度に期待されるとします。 このとき、場合の数は2です。そして、表の面が出るという事象$A$の場合の数は1です。 従って、確率は以下の通りになります。$$P(A)=\frac{a}{n}=\frac{1}{2}$$$\frac{1}{2}$なので、表が上になるという事象は50%期待されることになります。 次にサイコロの例を考えます。サイコロで3が出るという事象Aが起きる確率は、事象Aの場合の数が1で、全ての場合の数が6なので、以下の通りになります。 $$P(A)=\frac{a}{n}=\frac{1}{6}$$$\frac{1}{6}$なので、約16.7%期待されることになります。次に、2つのサイコロを振って、目の合計が4になる確率を求めます。 目の合計が4になるという事象Aは、(1, 3)、(2, 2)、(3, 1)の3つあります。 場合の数は、全部で$6\times 6=36$通りです。 この場合の確率は、以下の通りにです。$$P(A)=\frac{a}{n}=\frac{3}{36}=\frac{1}{12}$$$\frac{1}{12}$なので約8.3%です。2つのサイコロを振って合計が4になるのは、8.3%程度期待できることになります。 ●余事象事象 $A$ に対して「$A$が起こらないという事象」を$A$の「余事象」といいます。$A$の余事象は、$\bar{A}$などと表します。余事象$\bar{A}$が起きる確率ですが、事象$A$が起きる確率$P(A)$を使って以下のように求めることができます。 $$P(\bar{A})=1-P(A)$$先程の例では、2つのサイコロを振って目の合計が4になる確率は$\frac{1}{12}$でした。これを使って、「2つのサイコロを振って目の合計が4以外になる確率」を以下の通りに求めることができます。$$P(\bar{A})=1-\frac{1}{12}=\frac{11}{12}$$約91.7%の確率で、目の合計は4以外になることになります。目の合計が4以外になる全ての場合を挙げるのには手間がかかりますが、余事象を使うことで確率は簡単に求めることができます。 ●乱数とは?例えばサイコロを投げる場合、上の面が決まるまで1-6のどれが出るのか分かりません。「乱数」とは、このような未確定の数値です。 以下のコードは、NumPyの`random.randint( )`を使って、サイコロのように1-6の値をランダムに返すコードです。`randint( )`関数に整数$n$を引数として渡すと、$0$から$n-1$までの整数の乱数を返します。
###Code
import numpy as np
r_int = np.random.randint(6) + 1 # 0から5までの乱数に、1を加える
print(r_int) # 1から6までがランダムに表示される
# 練習用
###Output
_____no_output_____
###Markdown
NumPyの`random.rand()`関数を使うと、0から1までの間の小数をランダムに取得することができます。
###Code
import numpy as np
r_dec = np.random.rand() # 0から1の間の小数を、ランダムに返す
print(r_dec)
# 練習用
###Output
_____no_output_____
###Markdown
●均一な乱数先述の`random.rand()`関数は、0から1の間の小数を均等な確率で返します。この関数に整数`a`を引数として渡すと、そのような小数を`a`個得ることができます。 以下のコードは、多数の均一な乱数をx座標、y座標として、散布図にプロットします。実行することで、`random.rand()`により得られる乱数が均一であることが確認できます。
###Code
import numpy as np
import matplotlib.pyplot as plt
n = 1000 # サンプル数
x = np.random.rand(n) # 0-1の均一な乱数
y = np.random.rand(n) # 0-1の均一な乱数
plt.scatter(x, y) # 散布図のプロット
plt.grid()
plt.show()
# 練習用
###Output
_____no_output_____
###Markdown
●偏った乱数NumPyの`random.randn( )`関数は、後のレクチャーで解説する「正規分布」という分布に従う確率で乱数を返します。正規分布では、中央で確率が高く、両端で確率が低くなります。 以下のコードは、正規分布に従う多数の乱数をx座標、y座標として散布図にプロットします。
###Code
import numpy as np
import matplotlib.pyplot as plt
n = 1000 # サンプル数
x = np.random.randn(n) # 正規分布に従う乱数
y = np.random.randn(n) # 正規分布に従う乱数
plt.scatter(x, y) # 散布図のプロット
plt.grid()
plt.show()
# 練習用
###Output
_____no_output_____
###Markdown
●確率への収束(事象の発生数/試行数)はやがて確率に収束していきます。 以下のコードは、サイコロを何度も振って5が出た回数を数え、(5が出た回数/振った回数)の推移を表示するコードです。 試行を重ねるにつれて、(5が出た回数/試行数)が確率(約16.7%)に収束していくことを確認しましょう。
###Code
import numpy as np
import matplotlib.pyplot as plt
x = []
y = []
total = 0 # 試行数
num_5 = 0 # 5が出た回数
n = 10000 # サイコロを振る回数
for i in range(n):
if np.random.randint(6)+1 == 5: # 0-5までのランダムな数に1を加えて1-6に
num_5 += 1
total += 1
x.append(i)
y.append(num_5/total)
plt.plot(x, y)
plt.plot(x, [1/6]*n, linestyle="dashed") # yは1/6がn個入ったリスト
plt.grid()
plt.show()
# 練習用
###Output
_____no_output_____ |
examples/Mindboggle_Prediction_and_Visualization.ipynb | ###Markdown
Mindboggle DKT Cortical Prediction and VisualizationIn this notebook we demonstrate how to perform inference with pretrained MeshNet and UNet models. **MeshNet** can be to **1.5x faster** and **>30x smaller** while maintaining comparable performance to UNet. Model Performance| Model | Macro DICE | Inference Speed | Model Size | Classes| -----------| ----------- |----------- |----------- |----------- || MeshNet Large | .6742 | 19 subvolumes/sec | 9mb | 31| UNet | .6771 | 13 subvolumes/sec | 288 mb | 31---Authors: [Kevin Wang](https://github.com/ssktotoro/), [Alex Fedorov](https://github.com/Entodi/), [Sergey Kolesnikov](https://github.com/Scitator)[](https://github.com/catalyst-team/catalyst) Colab setupFirst of all, do not forget to change the runtime type to GPU. To do so click `Runtime` -> `Change runtime type` -> Select `\"Python 3\"` and `\"GPU\"` -> click `Save`. After that you can click `Runtime` -> `Run all` and watch the tutorial. Setup Environment
###Code
%%bash
git clone https://github.com/catalyst-team/neuro.git
pip install -r neuro/requirements/requirements.txt
cd neuro/
import torch
import nibabel as nib
from neuro.predictor import Predictor
from neuro.model import MeshNet, UNet
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
DatasetWe'll be using the Mindboggle 101 dataset for a multiclass 3d segmentation task.The dataset can be downloaded off osf with the following command from osfclient after you register with osf.`osf -p 9ahyp clone .`Otherwise you can download it using a Catalyst utility `download-gdrive` which downloads a version from the Catalyst Google Drive`usage: download-gdrive {FILE_ID} {FILENAME}`
###Code
%%bash
mkdir Mindboggle_data
mkdir -p data/Mindboggle_101/
osf -p 9ahyp clone Mindboggle_data/
cp -r Mindboggle_data/osfstorage/Mindboggle101_volumes/ data/Mindboggle_101/
find data/Mindboggle_101 -name '*.tar.gz'| xargs -i tar zxvf {} -C data/Mindboggle_101
find data/Mindboggle_101 -name '*.tar.gz'| xargs -i rm {}
###Output
_____no_output_____
###Markdown
Run the prepare data script that limits the labels to the DKT cortical labels (31 labels). We can use of course use more labels.`usage: python ../neuro/scripts/prepare_data.py ../data/Mindboggle_101 {N_labels)`
###Code
%%bash
python neuro/scripts/prepare_data.py data/Mindboggle_101/ 31
###Output
_____no_output_____
###Markdown
Download Models
###Code
%%bash
download-gdrive 11i-gPKoLzEUVqVJ0UNCjG30HFuMXODkG meshnet_mindboggle_large_train.30_full.pth
download-gdrive 1gVjj1gYoPLk8BjZHXsznS6fbbWJaN3Gb unet_mindboggle_train.30_full.pth
###Output
_____no_output_____
###Markdown
Prepare Models for inferenceBecause our models classify subvolumes we adopt a majority voting method that ensures every voxel is classified and focuses on important voxels. First we classify all non-overlapping 38x38x38 subvolumes in a regular grid partitioning the volume space. This ensure a prediction for every voxel. Next we randomly sample overlapping subvolumes from a gaussian distribution in the center of the brain like in training until the required number of subvolumes is reached. For every voxel the class with the majority vote is the prediction. Here we use 512 subvolumes for demonstration though more subvolumes can increase the DICE score.
###Code
volume_shape = [256, 256, 256]
subvolume_shape = [38, 38, 38]
n_subvolumes = 512
n_classes = 31
device_name = "cuda:0" if torch.cuda.is_available() else "cpu"
device = torch.device(device_name)
meshnet_large_model = MeshNet(n_channels=1, n_classes=n_classes, large=True)
meshnet_large_model.load_state_dict(torch.load('meshnet_mindboggle_large_train.30_full.pth', map_location=device)['model_state_dict'])
meshnet_large_model.to(device)
meshnet_large_predictor = Predictor(meshnet_large_model, volume_shape, subvolume_shape, n_subvolumes, n_classes)
unet_model = UNet(n_channels=1, n_classes=n_classes)
unet_model.load_state_dict(torch.load('unet_mindboggle_train.30_full.pth', map_location=device)['model_state_dict'])
unet_model.to(device)
unet_predictor = Predictor(unet_model, volume_shape, subvolume_shape, n_subvolumes, n_classes)
###Output
_____no_output_____
###Markdown
Segment Mindboggle TestBrain with Timing
###Code
%time meshnet_predicted_segmentation = meshnet_large_predictor.predict('data/Mindboggle_101/NKI-TRT-20_volumes/NKI-TRT-20-5/t1weighted.nii.gz')
%time unet_predicted_segmentation = unet_predictor.predict('data/Mindboggle_101/NKI-TRT-20_volumes/NKI-TRT-20-5/t1weighted.nii.gz')
img = nib.load('data/Mindboggle_101/NKI-TRT-20_volumes/NKI-TRT-20-5/t1weighted.nii.gz')
img = img.get_fdata()
labels = nib.load('data/Mindboggle_101/NKI-TRT-20_volumes/NKI-TRT-20-5/labels.DKT31.manual+aseg_labels.nii.gz')
labels = labels.get_fdata()
###Output
_____no_output_____
###Markdown
Visualize Predictions
###Code
def show_slices(slices, raw=True):
if raw:
cmap = 'jet'
else:
cmap = 'nipy_spectral'
fig, axes = plt.subplots(1, len(slices), figsize=(15,15))
for i, slice in enumerate(slices):
axes[i].imshow(slice, cmap='nipy_spectral')
show_slices(
[img[100, :,:].T[::-1][:, ::-1],
img[:, 100,:].T[::-1],
img[:, :, 100].T[::-1]
])
show_slices(
[labels[120, :,:].T[::-1][:, ::-1],
labels[:, 120,:].T[::-1],
labels[:, :, 120].T[::-1]
], raw=False)
show_slices(
[meshnet_predicted_segmentation[120, :,:].cpu().numpy().T[::-1][:, ::-1],
meshnet_predicted_segmentation[:, 120,:].cpu().numpy().T[::-1],
meshnet_predicted_segmentation[:, :,120].cpu().numpy().T[::-1]
], raw=False)
show_slices(
[unet_predicted_segmentation[120, :,:].cpu().numpy().T[::-1][:, ::-1],
unet_predicted_segmentation[:, 120,:].cpu().numpy().T[::-1],
unet_predicted_segmentation[:, :,120].cpu().numpy().T[::-1]
], raw=False)
###Output
_____no_output_____ |
genomics/jupyter/load_and_refit.ipynb | ###Markdown
Step 2: Refit. In this notebook, we calculate the parameters used for exact CV by refitting the model initially fit in step one, the notebook ``fit_model_and_save``.For expository purposes this notebook calculates the refit for only one weight vector. To compute exact CV, one would perform the corresponding computation for all leave-k-out weight vectors.
###Code
from copy import deepcopy
import inspect
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
import sys
import time
np.random.seed(3452453)
import paragami
from aistats2019_ij_paper import regression_mixture_lib as rm_lib
from aistats2019_ij_paper import saving_gmm_utils
from aistats2019_ij_paper import mse_utils
import plot_utils_lib
# Load the initial fit.
# This file was produced by the notebook ``fit_model_and_save``.
initial_fit_infile = '../fits/initial_fit.npz'
full_fit, gmm, regs, metadata = \
saving_gmm_utils.load_initial_optimum(initial_fit_infile)
timepoints = metadata['timepoints']
###Output
Initializing FitDerivatives.
Using provided t_jac.
Using provided full_hess.
###Markdown
First, choose some timepoints to leave out.
###Code
# Simulate passing arguments in on the command line.
class Args():
def __init__(self):
pass
args = Args()
args.num_times = 1
args.which_comb = 1
args.max_num_timepoints = 7
###Output
_____no_output_____
###Markdown
The number of points left out (that is, $k$) is given by ``num_times``, which is {{args.num_times}}. The largest timepoint we leave out is given by ``max_num_timepoints``, which is {{args.max_num_timepoints}}. Because later timepoints are not affected by the smoothing, there is no reason to leave them out. There are a certain number of ways to leave $k$ out of {{args.max_num_timepoints}} timepoints, and ``which_comb`` chooses one of them in the order given by the function ``itertools.combinations``. Of course, when $k=1$, ``which_comb`` simply chooses which timepoint to leave out. ``mse_utils.get_indexed_combination`` maps ``which_comb`` to particular timepoints in a consistent way.Full exact CV would run this script for all {{args.max_num_timepoints}} choose $k$ values of ``which_comb``.Because we have repeated measurements at each timepoint, leaving out a single timepoint will correspond to leaving out multiple row of the observation matrix. Those rows are determined by ``mse_utils.get_time_weight``, which also returns a weight vector setting these observations' weights to zero.
###Code
lo_inds = mse_utils.get_indexed_combination(
num_times=args.num_times, which_comb=args.which_comb,
max_num_timepoints=args.max_num_timepoints)
new_time_w, full_lo_inds = mse_utils.get_time_weight(lo_inds, timepoints)
print('Left out timepoint: {}'.format(lo_inds))
print('Left out observations: {}'.format(full_lo_inds))
print('Leave-k-out weights: {}'.format(new_time_w))
###Output
Left out timepoint: [1]
Left out observations: [3 4 5]
Leave-k-out weights: [1 1 1 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1]
###Markdown
We now re-optimize with the new weights.Note that we could either start the optimization at the initial optimum (a "warm start") or do a fresh start from k-means. A fresh start is more time consuming but a more stringent test for the accuracy of the IJ. We calculate both, but report results from the fresh start in the paper. In the notebook ``examine_and_save_results``, you can choose to examine either set of results.Here, for consistency with the paper, we re-initialize with k-means.
###Code
regs.time_w = deepcopy(new_time_w)
reg_params_w = regs.get_optimal_regression_params()
gmm.set_regression_params(reg_params_w)
init_gmm_params = \
rm_lib.kmeans_init(gmm.transformed_reg_params,
gmm.num_components, 50)
init_x = gmm.gmm_params_pattern.flatten(init_gmm_params, free=True)
opt_time = time.time()
gmm_opt, init_x2 = gmm.optimize(init_x, gtol=1e-2)
print('\tUpdating preconditioner...')
kl_hess = gmm.update_preconditioner(init_x2)
print('\tRunning preconditioned optimization...')
gmm.conditioned_obj.reset()
reopt, gmm_params_free_w = gmm.optimize_fully(init_x2, verbose=True)
print(gmm_opt.message)
opt_time = time.time() - opt_time
print('Refit time: {} seconds'.format(opt_time))
###Output
Iter 0: f = -153.38003431
Iter 1: f = -152.49438715
Iter 2: f = -153.69147895
Iter 3: f = -153.83779915
Iter 4: f = -154.02397812
Iter 5: f = -153.41393391
Iter 6: f = -154.10396420
Iter 7: f = -154.14366282
Iter 8: f = -154.14261201
Iter 9: f = -154.16417745
Iter 10: f = -154.18307547
Iter 11: f = -154.20711481
Iter 12: f = -154.22118064
Iter 13: f = -154.27402715
Iter 14: f = -154.28739474
Iter 15: f = -154.33849929
Iter 16: f = -154.03580241
Iter 17: f = -154.35421130
Iter 18: f = -154.36910489
Iter 19: f = -154.36872458
Iter 20: f = -154.37238982
Iter 21: f = -154.37722095
Iter 22: f = -154.38186985
Iter 23: f = -154.38410992
Updating preconditioner...
Running preconditioned optimization...
Preconditioned iteration 1
Running preconditioned optimization.
Iter 0: f = -154.38410992
Iter 1: f = -154.38423176
Iter 2: f = -154.38584092
Iter 3: f = -154.21889674
Iter 4: f = -154.42200228
Iter 5: f = -154.39603234
Iter 6: f = -154.39957947
Iter 7: f = -154.41374585
Iter 8: f = -154.43397491
Iter 9: f = -154.43484046
Iter 10: f = -154.43484816
Iter 11: f = -154.43484816
Preconditioned iteration 2
Getting Hessian and preconditioner.
Running preconditioned optimization.
Iter 12: f = -154.43484816
Iter 13: f = -154.43484816
Converged.
Optimization terminated successfully.
Refit time: 24.85831880569458 seconds
###Markdown
We now save the results.
###Code
gmm_params_w = \
full_fit.comb_params_pattern['mix'].fold(
gmm_params_free_w, free=True)
refit_comb_params = {
'mix': gmm_params_w,
'reg': reg_params_w }
refit_comb_params_free = \
full_fit.comb_params_pattern.flatten(refit_comb_params, free=True)
save_filename = \
'../fits/refit__num_times{}__which_comb{}.npz'.format(
args.num_times, args.which_comb)
print('Saving to {}'.format(save_filename))
saving_gmm_utils.save_refit(
outfile=save_filename,
comb_params_free=refit_comb_params_free,
comb_params_pattern=full_fit.comb_params_pattern,
initial_fit_infile=initial_fit_infile,
time_w=new_time_w,
lo_inds=lo_inds,
full_lo_inds=full_lo_inds)
###Output
Saving to ../fits/refit__num_times1__which_comb1.npz
|
Cluster/Emotion.ipynb | ###Markdown
表情数据聚类
###Code
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from PIL import Image
###Output
_____no_output_____
###Markdown
工具函数Iris中积累下来的一些实用函数
###Code
def merge(dfs, title, xlabel="K-value", ylabel="adjusted rand score"):
df = pd.concat(dfs, axis=1)
plt.figure(figsize=(10,5))
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
sns.lineplot(data = df)
def plot_line(title:str, y:list, x:list, line_name:str, xlabel="K-value", ylabel="adjusted rand score"):
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
df = pd.DataFrame({line_name: y}, index=x)
sns.lineplot(data = df)
return df
def plot_count(title:str, x:list, line_name:str, xlabel="K-value", ylabel="adjusted rand score"):
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
df = pd.DataFrame({xlabel: x})
sns.countplot(x=xlabel, data = df)
return df
###Output
_____no_output_____
###Markdown
数据获取读取csv文件,编写Dataset
###Code
data_label = pd.read_csv("data/expression-recognition/data_label.csv")
labels = data_label.label
data_label.head()
###Output
_____no_output_____
###Markdown
查看数据集统计信息
###Code
# 统计类别个数
for i in sorted(labels.unique()):
print('class {} : {}'.format(i, len(labels[labels==i])))
_ = plot_count("number state", labels, "", 'class', 'number')
datapath = "./data/expression-recognition/data"
###Output
_____no_output_____
###Markdown
数据预处理把图像展开成向量
###Code
def load_data(data_label, datapath):
n_rows = len(data_label)
# n_rows = 5
serieses = []
for i in range(n_rows):
filepath = os.path.join(datapath, data_label.iloc[i].pic_name)
image_array = np.array(Image.open(filepath))
image_array = image_array.reshape((1, -1)).squeeze()
serieses.append(pd.Series(image_array))
df = pd.DataFrame(serieses)
return df
df = load_data(data_label, datapath)
df
labels = data_label.label
images = df
from sklearn.cluster import KMeans, DBSCAN, Birch, AffinityPropagation
from sklearn.metrics import adjusted_rand_score
from sklearn.decomposition import PCA
sns.set_style('darkgrid')
###Output
_____no_output_____
###Markdown
降维
###Code
candidate_components = range(10, 300, 30)
explained_ratios = []
for c in candidate_components:
pca = PCA(n_components=c)
X_pca = pca.fit_transform(images)
explained_ratios.append(np.sum(pca.explained_variance_ratio_))
plt.figure(figsize=(10, 6), dpi=144)
plt.grid()
plt.plot(candidate_components, explained_ratios)
plt.xlabel('Number of PCA Components')
plt.ylabel('Explained Variance Ratio')
plt.title('Explained variance ratio for PCA')
plt.yticks(np.arange(0.5, 1.05, .05))
plt.xticks(np.arange(0, 300, 20))
###Output
_____no_output_____
###Markdown
从300往上看选几好
###Code
candidate_components = range(250, 330, 10)
explained_ratios = []
for c in candidate_components:
pca = PCA(n_components=c)
X_pca = pca.fit_transform(images)
print("{} components, explained ratio {}".format(c,np.sum(pca.explained_variance_ratio_)))
###Output
250 components, explained ratio 0.9492242576293239
260 components, explained ratio 0.9510127521826616
270 components, explained ratio 0.9526553088223257
280 components, explained ratio 0.9542400003145649
290 components, explained ratio 0.9557354106960472
300 components, explained ratio 0.9571381806661176
310 components, explained ratio 0.9584894142257558
320 components, explained ratio 0.9597866220265104
###Markdown
看样子290差不多了,95.5够用了
###Code
data = PCA(n_components=290).fit_transform(images)
###Output
_____no_output_____
###Markdown
K-Means
###Code
def k_means_clustering(data, labels, title="", start=2, end=10):
scores = []
ks = []
for i in range(start, end+1):
ks.append(i)
pre = KMeans(n_clusters=i).fit_predict(data)
score = adjusted_rand_score(labels, pre)
print("adjusted rand score is {:.4f} while k = {}".format(score, i))
scores.append(score)
df = plot_line(title, scores, ks, "KMeans " + title)
return df
_ = k_means_clustering(data, labels, "290 components k-means")
###Output
adjusted rand score is 0.0185 while k = 2
adjusted rand score is 0.0181 while k = 3
adjusted rand score is 0.0159 while k = 4
adjusted rand score is 0.0150 while k = 5
adjusted rand score is 0.0140 while k = 6
adjusted rand score is 0.0133 while k = 7
adjusted rand score is 0.0114 while k = 8
adjusted rand score is 0.0108 while k = 9
adjusted rand score is 0.0111 while k = 10
###Markdown
使用Auto Encoder进行降维PCA效果非常差,需要使用Auto Encoder来降维
###Code
import torch
import torchvision
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from torchvision.utils import save_image
from PIL import Image
import numpy as np
import pandas as pd
import os
torch.cuda.is_available()
###Output
_____no_output_____
###Markdown
路径参数
###Code
FILEPATH = "./data/expression-recognition"
IMAGE_ROOT = os.path.join(FILEPATH, "data")
LABEL_PATH = os.path.join(FILEPATH, "data_label.csv")
MODEL_PATH = "./models/"
###Output
_____no_output_____
###Markdown
数据准备
###Code
label_df = pd.read_csv(LABEL_PATH)
label_df
class EmotionDataset(Dataset):
def __init__(self, df:pd.DataFrame, imageroot:str, transforms=None):
super().__init__()
self.df = df
self.imageroot = imageroot
self.transforms = transforms
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
filename = os.path.join(self.imageroot, self.df.iloc[idx].pic_name)
image = Image.open(filename)
# label = self.df.iloc[idx].label
if self.transforms:
image = self.transforms(image)
return image
orig_dataset = EmotionDataset(label_df, IMAGE_ROOT, transforms.ToTensor())
stan_dataset = EmotionDataset(label_df, IMAGE_ROOT, transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]))
orig_dataset[0].size()
###Output
_____no_output_____
###Markdown
超参数设置
###Code
BATCH_SIZE = 512
EPOCHS = 20
LEARNING_RATE = 0.001
###Output
_____no_output_____
###Markdown
模型构建
###Code
class AutoEncoder(nn.Module):
def __init__(self):
super().__init__()
self.encoder = nn.Sequential(
# input : 1, 48, 48
nn.Conv2d(1, 4, 2, stride=2),
nn.ReLU(True),
# 4, 24, 24
nn.Conv2d(4, 16, 4, stride=2),
nn.ReLU(True),
# 16, 10, 10
nn.MaxPool2d(2, stride=2),
# 16, 5, 5
nn.Conv2d(16, 8, 3, stride=2, padding=1),
nn.ReLU(True),
# 8, 3, 3
nn.MaxPool2d(2, 1)
# 8, 2, 2
)
self.decoder = nn.Sequential(
# input: 8, 2, 2
nn.ConvTranspose2d(8, 16, 3, stride=2),
# nn.ConvTranspose2d(8, 16, 3, stride=2, padding=1),
nn.ReLU(True),
# 16, 5, 5
nn.ConvTranspose2d(16, 16, 2, stride=2),
nn.ReLU(True),
# 16, 10, 10
nn.ConvTranspose2d(16, 4, 6, stride=2),
nn.ReLU(True),
# 4, 24, 24
nn.ConvTranspose2d(4, 1, 2, stride=2),
nn.Tanh()
# 1, 48, 48
)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
###Output
_____no_output_____
###Markdown
实例化模型和dataloader
###Code
stan_loader = DataLoader(stan_dataset, batch_size=BATCH_SIZE, shuffle=True)
model = AutoEncoder().cuda()
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE, weight_decay=1e-5)
###Output
_____no_output_____
###Markdown
训练
###Code
model.train()
for epoch in range(40, 40+EPOCHS):
losses = []
for batch_id, data in enumerate(stan_loader):
data = data.cuda()
# =================forward===================
output = model(data)
loss = criterion(output, data)
# =================backward==================
optimizer.zero_grad()
loss.backward()
optimizer.step()
# =================log=======================
losses.append(loss.item())
avg_loss = np.mean(losses)
print('epoch [{}/{}], loss:{:.4f}'.format(epoch+1, EPOCHS, avg_loss))
saved_model = os.path.join(MODEL_PATH, "epoch{}_loss{:.4f}.pkl".format(epoch+1, avg_loss))
torch.save(model.state_dict(), saved_model)
model.eval()
arr = None
for batch_id, data in enumerate(stan_loader):
data = data.cuda()
out = model.encoder(data)
out = out.cpu().detach().numpy()
n_samples = out.shape[0]
out = out.reshape(n_samples, -1)
if batch_id == 0:
arr = out
else:
arr = np.concatenate((arr, out), axis=0)
arr.shape
small_demo = test_batch[0:5].cuda()
model.eval()
demo_output = model.encoder(small_demo)
demo_array = demo_output.cpu().detach().numpy()
reshaped = demo_array.reshape(5, -1)
k_means_clustering(arr, label_df.label, "test")
reshaped.shape
###Output
_____no_output_____ |
mass_scraper/MassScraper.ipynb | ###Markdown
Kat's Scraper Notebook Code Fellows 401d8 Python MidtermScrapes Indeed for salary information for a given keyword in a given city. Keywords are a list and multiple arguments are acceptable, but note that adding additional keywords drastically increases the time it takes for the scrape to run, since it is currently searching OR, not AND.
###Code
import requests
import bs4
from bs4 import BeautifulSoup
import urllib3
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Define city and keywordsAccepts one city and multiple keywords in a list. Multi-word keywords must be separated with a plus sign.
###Code
city = 'Seattle'
keywords = ['UX+designer']
###Output
_____no_output_____
###Markdown
The scraper itself, using BeautifulSoupCreates a dataframe from the results of the scrap. Also handles cleaning up of some data in the salary field, since Indeed salary fields come in a variety of formats.
###Code
url_template = 'https://www.indeed.com/jobs?q={}&l={}&fromage=any&limit=100'
max_results = 100
df = pd.DataFrame(columns=['ux'])
requests.packages.urllib3.disable_warnings()
for keyword in keywords:
for start in range(0, max_results):
url = url_template.format(keyword, city)
http = urllib3.PoolManager()
response = http.request('GET', url)
soups = BeautifulSoup(response.data.decode('utf-8'), 'html.parser')
for b in soups.find_all('div', attrs={'class': ' row result'}):
try:
salary = b.find('span', attrs={'class': 'no-wrap'}).text
except AttributeError:
salary = 'NA'
df = df.append({'ux': salary}, ignore_index=True)
df.ux.replace(regex=True,inplace=True,to_replace='\n',value='')
df.ux.replace(regex=True,inplace=True,to_replace='$',value='')
df.ux.replace(regex=True,inplace=True,to_replace=' a year',value='')
df.ux.replace(regex=True,inplace=True,to_replace='(Indeed est.)',value='')
###Output
_____no_output_____
###Markdown
CleanupThe next three cells clean up some data for us. We eliminate rows where there is no salary, remove 'a year', comma separation, and dollar signs. We also eliminate any rows that contain 'a day,' 'an hour,' or 'a month,' since we only want to work with annual salaries.
###Code
df = df.query('ux != "NA"')
df = df[df.ux.str.contains('a day') == False]
df = df[df.ux.str.contains('an hour') == False]
df = df[df.ux.str.contains('a month') == False]
df.ux = df.ux.str.replace('a year', '').str.replace(',', '').str.replace('$', '')
###Output
_____no_output_____
###Markdown
Here we just take a peek at the data to confirm the above reformatting is working correctly.
###Code
df.head()
###Output
_____no_output_____
###Markdown
Taking lowest in the rangeSince most of the salaries are listed as a range, we assume the worst-case scenario by splitting the salary on the dash and assigning the first index as a float to a list.
###Code
cleaned_salaries = []
for i in df.ux:
a = i.split('-')
cleaned_salaries.append(float(a[0]))
###Output
_____no_output_____
###Markdown
Reassigning salariesThis replaces the salary column in the dataframe with the values from the list we made above.
###Code
df.ux = cleaned_salaries
# df
###Output
_____no_output_____
###Markdown
Write to CSVWe write our results to a CSV because this scrape is kind of large and it takes foreverrrrr. We want to do things with this data, but we don't want to have to run the scrapes repeatedly.
###Code
df.to_csv('uxresults.csv', encoding='utf-8', index=False)
###Output
_____no_output_____
###Markdown
Let's chart some salaries!First, read them into dataframes from the CSVs we made.
###Code
c_plus = pd.read_csv('cplusresults.csv')
python = pd.read_csv('pythonresults.csv')
javascript = pd.read_csv('javascriptresults.csv')
java = pd.read_csv('javaresults.csv')
php = pd.read_csv('phpresults.csv')
csharp = pd.read_csv('csharpresults.csv')
datascience = pd.read_csv('datascienceresults.csv')
softwaredev = pd.read_csv('softwaredevresults.csv')
webdev = pd.read_csv('webdevresults.csv')
dba = pd.read_csv('DBAresults.csv')
ux = pd.read_csv('uxresults.csv')
###Output
_____no_output_____
###Markdown
Concatenating them into relevant dataframes
###Code
languages = pd.concat([c_plus, python, javascript, java, php, csharp], axis=1)
languages.head()
jobs = pd.concat([datascience, softwaredev, webdev, dba, ux])
###Output
_____no_output_____
###Markdown
Get median values for each
###Code
median_languages = languages.median()
median_jobs = jobs.median()
###Output
_____no_output_____
###Markdown
Let's just plot the distributions of some languages
###Code
python = pd.read_csv('pythonresults.csv')
plt.hist(python.python, bins=20)
plt.ylabel('Python')
javascript = pd.read_csv('javascriptresults.csv')
plt.hist(javascript.javascript, bins=20)
plt.ylabel('Javascript')
c_plus = pd.read_csv('cplusresults.csv')
plt.hist(c_plus.Cplus, bins=20)
plt.ylabel('C++')
###Output
_____no_output_____ |
Python-Standard-Library/Algorithm/Operator.ipynb | ###Markdown
1 Logical Operator
###Code
from operator import *
a = -1
b = 5
print('not_(a)', not_(a))
print('truth(a)', truth(a))
print('is_(a, b)', is_(a,b))
print('is_not(a, b)', is_not(a,b))
from operator import *
a = 1
b = 5.0
print('a =', a)
print('b =', b)
for func in (lt, le, eq, ne, ge, gt):
print('{}(a, b): {}'.format(func.__name__, func(a, b)))
###Output
a = 1
b = 5.0
lt(a, b): True
le(a, b): True
eq(a, b): False
ne(a, b): True
ge(a, b): False
gt(a, b): False
###Markdown
2 Arithmetic Operators
###Code
from operator import *
a = -1
b = 5.0
c = 2
d = 6
print('a =', a)
print('b =', b)
print('c =', c)
print('d =', d)
print('\nPositive/Negative:')
print('abs(a):', abs(a))
print('neg(a):', neg(a))
print('neg(b):', neg(b))
print('pos(a):', pos(a))
print('pos(b):', pos(b))
print('\nArithmetic:')
print('add(a, b) :', add(a, b))
print('floordiv(a, b):', floordiv(a, b))
print('floordiv(d, c):', floordiv(d, c))
print('mod(a, b) :', mod(a, b))
print('mul(a, b) :', mul(a, b))
print('pow(c, d) :', pow(c, d))
print('sub(b, a) :', sub(b, a))
print('truediv(a, b) :', truediv(a, b))
print('truediv(d, c) :', truediv(d, c))
print('\nBitwise:')
print('and_(c, d) :', and_(c, d))
print('invert(c) :', invert(c))
print('lshift(c, d):', lshift(c, d))
print('or_(c, d) :', or_(c, d))
print('rshift(d, c):', rshift(d, c))
print('xor(c, d) :', xor(c, d))
###Output
a = -1
b = 5.0
c = 2
d = 6
Positive/Negative:
abs(a): 1
neg(a): 1
neg(b): -5.0
pos(a): -1
pos(b): 5.0
Arithmetic:
add(a, b) : 4.0
floordiv(a, b): -1.0
floordiv(d, c): 3
mod(a, b) : 4.0
mul(a, b) : -5.0
pow(c, d) : 64
sub(b, a) : 6.0
truediv(a, b) : -0.2
truediv(d, c) : 3.0
Bitwise:
and_(c, d) : 2
invert(c) : -3
lshift(c, d): 128
or_(c, d) : 6
rshift(d, c): 1
xor(c, d) : 4
###Markdown
3 Sequence Operators
###Code
from operator import *
a = [1, 2, 3]
b = ['a', 'b', 'c']
print('a =', a)
print('b =', b)
print('\nConstructive:')
print(' concat(a, b):', concat(a, b))
print('\nSearching:')
print(' contains(a, 1) :', contains(a, 1))
print(' contains(b, "d"):', contains(b, "d"))
print(' countOf(a, 1) :', countOf(a, 1))
print(' countOf(b, "d") :', countOf(b, "d"))
print(' indexOf(a, 5) :', indexOf(a, 1))
print('\nAccess Items:')
print(' getitem(b, 1) :',
getitem(b, 1))
print(' getitem(b, slice(1, 3)) :',
getitem(b, slice(1, 3)))
print(' setitem(b, 1, "d") :', end=' ')
setitem(b, 1, "d")
print(b)
print(' setitem(a, slice(1, 3), [4, 5]):', end=' ')
setitem(a, slice(1, 3), [4, 5])
print(a)
print('\nDestructive:')
print(' delitem(b, 1) :', end=' ')
delitem(b, 1)
print(b)
print(' delitem(a, slice(1, 3)):', end=' ')
delitem(a, slice(1, 3))
print(a)
###Output
a = [1, 2, 3]
b = ['a', 'b', 'c']
Constructive:
concat(a, b): [1, 2, 3, 'a', 'b', 'c']
Searching:
contains(a, 1) : True
contains(b, "d"): False
countOf(a, 1) : 1
countOf(b, "d") : 0
indexOf(a, 5) : 0
Access Items:
getitem(b, 1) : b
getitem(b, slice(1, 3)) : ['b', 'c']
setitem(b, 1, "d") : ['a', 'd', 'c']
setitem(a, slice(1, 3), [4, 5]): [1, 4, 5]
Destructive:
delitem(b, 1) : ['a', 'c']
delitem(a, slice(1, 3)): [1]
###Markdown
4 Combining Operators and Custom Classes
###Code
from operator import *
class MyObj:
"""Example for operator overloading"""
def __init__(self, val):
super(MyObj, self).__init__()
self.val = val
def __str__(self):
return 'MyObj({})'.format(self.val)
def __lt__(self, other):
"""compare for less-than"""
print('Testing {} < {}'.format(self, other))
return self.val < other.val
def __add__(self, other):
"""add values"""
print('Adding {} + {}'.format(self, other))
return MyObj(self.val + other.val)
a = MyObj(1)
b = MyObj(2)
print('Comparison:')
print(a<b)
print('\nArithmetic:')
print(a+b)
###Output
Comparison:
Testing MyObj(1) < MyObj(2)
True
Arithmetic:
Adding MyObj(1) + MyObj(2)
MyObj(3)
|
ex_notebook_2.ipynb | ###Markdown
Setup: ModulesBe sure to run this code block first. Imports all of the modules necessary for the rest of the notebook. Note that the ALS module also has scipy as a dependency, even though we do not need to import it within this notebook.This example notebook was developed and tested using the following packages/versions. Other versions may also work. - python (3.8.8) - numpy (1.20.1) - pandas (1.2.4) - scipy (1.6.2) - matplotlib (3.3.4) - ipython (7.22.0) - ipympl (0.7.0) - ALS (1.2.0) - StaticCell (1.0.0)
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython.display import display
import ALS
import StaticCell as SC
###Output
_____no_output_____
###Markdown
Setup: User Model Step 1: Define the user model. This is the step that requires the most coding on behalf of the user. I prefer to code the user model in a separate text editor (Sublime Text), save it as a module, and then import it. The 'ex_model_1.py' file contains the example model_H2O2_depletion with comments providing an explanation how to structure the user model. It is recommended to follow the template provided there, although any function that has the correct arguments / returns (see below) will work.**Important:** See 'create_model_code.py' for an optional utility function that converts models written in an A+B==>C format (ex: Kintecus) to code representing a system of differential rate equations. The code generated by this utility function can be used when creating the user model function to reduce typing errors / save time. See the comments at the top of 'create_model_code.py' for additional details / documentation.*function* **user_model**(*t, model_params*)**Parameters:**>**t : *ndarray***>>Time axis (ms) over which to integrate the model. You may assume that the points are evenly spaced in ascending order.>**model_params : *dict***>>Keys (strings) are the names of the parameters used by the model; values (floats) are the parameter values. Any parameters that will be fit or included in a monte carlo simulation of systematic error should be included.>>Only one parameter is required: 'X0' is the key and the initial radical concentration immediately after photolysis is its value.**Returns:** It is only required to return species for which there is observable data to fit; returning other species from the model is optional, but could be useful when plotting model output. The keys of *m* and *c* must be the same.>**m : *dict***>>Keys (strings) are the names of species returned by the model; values (floats) are their masses (amu).>**c : *dict***>>Keys (strings) are the names of species returned by the model; values (ndarray) are concentrations (molc/cm3) corresponding to times in *t*.
###Code
from ex_model_2 import model_H2O2_depletion
###Output
_____no_output_____
###Markdown
Step 2: Instantiate the StaticCell object.*class* **StaticCell**(*user_model*)**Parameters:**>**user_model : *function***>>The user model function defined in Step 1.
###Code
model_StaticCell = SC.StaticCell(model_H2O2_depletion)
###Output
_____no_output_____
###Markdown
Setup: Model Step 1: Calculations for fixed parameters.This is a workspace for any fixed model parameters that require calculation. Feel free to modify as necessary; there is no recommended format as this will be model-specific. Variables computed here are used in later steps.**Important:** If converting from a Kintecus model, code for the rate constants can be automatically generated using the 'create_model_code.py' utility function. See the comments at the top of that file for documentation / an example. The example corresponds to the running example in this notebook. Using this utility function may reduce typing errors / save time.
###Code
# Fix initial H2O2 concentration and H2O2 scale factor - ignore uncertainties for now
c_H2O2_0 = 5.25e14
c_H2O2_0_err = 0
T = 298 # K
P = 30 # Torr
M = (P*133.3224)/(1.38e-23*T)/1e6 # molc/cm3
# Helper function to compute bimolecular rate constants
def calc_k_bi(A, E_R):
k_T = A*np.exp(-E_R/T)
return k_T
# Helper function to compute rate constant uncertainty factor (JPL format)
def calc_f(f_298, g):
f_T = f_298*np.exp(np.abs(g*(1/T - 1/298.0)))
return f_T
# Calculate bimolecular rate constants and their uncertainties (cm3/molc/s)
# OH + H2O2 --> HO2 + H2O
# JPL 2015, temperature independent over 200 - 300 K
k1 = 1.8e-12
k1_err = k1*(calc_f(1.15, 45) - 1)
# OH + HO2 --> H2O + O2
# JPL 2015, T = 252-420 K
k2 = calc_k_bi(4.8e-11, -250)
k2_err = k2*(calc_f(1.15, 50) - 1)
# HO2 + HO2 --> H2O2 + O2
# JPL 2015, T = 222-1120 K, M is air
k3a = calc_k_bi(3e-13, -460)
k3a_err = k3a*(calc_f(1.15, 100) - 1)
k3b = calc_k_bi(2.1e-33*M, -920)
k3b_err = k3b*(calc_f(1.2, 200) - 1)
k3 = k3a + k3b
k3_err = (k3a_err**2 + k3b_err**2)**0.5
###Output
_____no_output_____
###Markdown
Step 2: Organize model parameters into a DataFrame.The *df_model_params* DataFrame will be passed to the StaticCell methods. Rows correspond to each model parameter; the indices are parameter names and must match each parameter in the user model. The columns are detailed below. It is recommended to use the code template in the cell below.**Important:** If converting from a Kintecus model, code for the rate constants can be automatically generated using the 'create_model_code.py' utility function. See the comments at the top of that file for documentation / an example. The example corresponds to the running example in this notebook. Using this utility function may reduce typing errors / save time.**Columns:**>**val : *float***>>Value of the parameter.>**err : *float***>>Absolute uncertainty in the parameter (1 standard error). Used to vary the value of the parameter during monte carlo simulations of systematic model uncertainty. Ignored if set to 0 or if *fit* is True. **Currently ignored by StaticCell**>**fit : *bool***>>If True, then this parameter will be optimized during a fit. If False, then the parameter will be fixed during a fit. **Currently ignored by StaticCell.**
###Code
df_model_params = {}
df_model_params['k_OH_wall'] = {'val':15, 'err':0, 'fit':True }
df_model_params['k_HO2_wall'] = {'val':4, 'err':0, 'fit':True }
df_model_params['k1'] = {'val':k1, 'err':k1_err, 'fit':False}
df_model_params['k2'] = {'val':k2, 'err':k2_err, 'fit':False}
df_model_params['k3'] = {'val':k3, 'err':k3_err, 'fit':False}
df_model_params = pd.DataFrame.from_dict(df_model_params, orient='index')
print('Inputted Model Params DataFrame:')
display(df_model_params)
###Output
Inputted Model Params DataFrame:
###Markdown
Step 3: Set Initial ConcentrationsThis is where you set your pre-photolysis concentrations. *initial_concentrations* is a Python dictionary containing the pre-photolysis concentrations for all species in the model (including products).
###Code
initial_concentrations = {
'H2O2': c_H2O2_0,
'OH': 0,
'HO2': 0,
'H2O': 0,
'O2' : 0,
}
###Output
_____no_output_____
###Markdown
Step 4: Photolysis ParametersSet up your photolysis parameters in the next block.**df_photolysis_params : *DataFrame***>Each row is a species to be photolyzed, with the following columns:>**xsn : *float***>>The cross section of the species, in cm^2.>**products : *list of strings***>>List of products formed. Must correspond in order with *qyields*.>**qyields : *list of floats***>>List of quantum yields. Must correspond in order with *products*.
###Code
fluence = 3.2e16# in photons cm^-2
df_photolysis_params = {} ## Cross sections in cm^2,
df_photolysis_params['H2O2'] = {'xsn': 8.3e-20, 'products': ['OH'], 'qyields': [2.0]}
df_photolysis_params = pd.DataFrame.from_dict(df_photolysis_params, orient='index')
print('Inputted Photolysis Params DataFrame:')
display(df_photolysis_params)
###Output
Inputted Photolysis Params DataFrame:
###Markdown
Method: Plot the modelIntegrates the user model with the parameters specified in *df_model_params* (no fitting). The output is plotted (and optionally saved) in concentration units. All species returned by the user model are plotted, regardless of the *fit* field in *df_data*.See the file 'ex_init_model_2.csv' for an example of how the output is formatted if *save_fn* is specified.*method* **StaticCell.StaticCell.plot_model**(*t_start, t_end, tbin, df_model_params, , delta_xtick=20.0, save_fn=None*)**Parameters:**>**t_prephoto : *float***>>Amount of time to plot pre-photolysis. Must be an integer multiple of *tbin* \* 0.02 ms and cannot be less than -20 ms.>**t_react : *float***>>How long the reaction goes before the the next photolysis cycle. Must be an integer multiple of *tbin* \* 0.02 ms and must be greater than *t_start*.>**tbin : *int***>>The time axis step size will be *tbin* \* 0.02 ms.>**df_model_params : *DataFrame***>>Contains the model parameters. See setup above for formatting.>**initial_concentrations : *dictionary***>>Contains the pre-photolysis concentrations of all species, including products and intermediates.>**df_photolysis_params : *DataFrame***>>Contains the photolysis parameters. See setup above for formatting.>**fluence : *float***>>Laser fluence in photons cm^-2>**photolysis_cycles : *int***>>Number of photolysis pulses.>**delta_xtick : *float, optional***>>Tick marks and labels for the time axis include zero and are spaced by *delta_xtick* (ms).>**save_fn : *str, optional***>>The points in the plots are saved to *save_fn* if this parameter is specified. First column is the time axis (ms) and the remaining columns contain the concentrations (molc/cm3) for each species.
###Code
%matplotlib widget
model_StaticCell.plot_model(-20, 60, 10, df_model_params, initial_concentrations, df_photolysis_params, fluence=3.2e16, photolysis_cycles=3, delta_xtick=40.0, save_fn='ex_init_model_2.csv')
###Output
_____no_output_____ |
examples/model_to_get_started/model_to_get_started.ipynb | ###Markdown
Model to get started* File name: model_to_get_started.ipynb* Last edited: 2020-06-24* Created by: Stefan Bruche (TU Berlin) ```pythonimport aristopy as ar Create basic energy system instancees = ar.EnergySystem( number_of_time_steps=3, hours_per_time_step=1, interest_rate=0.05, economic_lifetime=20) Add a gas source, two different conversion units and sinksgas_source = ar.Source( ensys=es, name='gas_source', commodity_cost=20, outlet=ar.Flow('Fuel'))gas_boiler = ar.Conversion( ensys=es, name='gas_boiler', basic_variable='Heat', inlet=ar.Flow('Fuel', 'gas_source'), outlet=ar.Flow('Heat', 'heat_sink'), capacity_max=150, capex_per_capacity=60e3, user_expressions='Heat == 0.9 * Fuel')chp_unit = ar.Conversion( ensys=es, name='chp_unit', basic_variable='Elec', inlet=ar.Flow('Fuel', 'gas_source'), outlet=[ar.Flow('Heat', 'heat_sink'), ar.Flow('Elec', 'elec_sink')], capacity_max=100, capex_per_capacity=600e3, user_expressions=['Heat == 0.5 * Fuel', 'Elec == 0.4 * Fuel'])heat_sink = ar.Sink( ensys=es, name='heat_sink', inlet=ar.Flow('Heat'), commodity_rate_fix=ar.Series('heat_demand', [100, 200, 150]))elec_sink = ar.Sink( ensys=es, name='elec_sink', inlet=ar.Flow('Elec'), commodity_revenues=30) Run the optimizationes.optimize(solver='cbc', results_file='results.json') Plot some resultsplotter = ar.Plotter('results.json')plotter.plot_operation('heat_sink', 'Heat', lgd_pos='lower center', bar_lw=0.5, ylabel='Thermal energy [MWh]')plotter.plot_objective(lgd_pos='lower center')``` Create *aristopy* modelFirst, we need to import the *aristopy* package. If the import fails, you might need to recheck the installation instructions.
###Code
# Import the required packages (jupyter magic only required for jupyter notebooks)
%reload_ext autoreload
%autoreload 2
%matplotlib inline
import aristopy as ar
###Output
_____no_output_____
###Markdown
An *aristopy* model consists of an instance of the EnergySystem class and the added components. To create an energy system, we need to specify the number of considered time steps and the number of hours per time step. Additionally, the interest rate and the economic lifetime of the installed components are required to calculate the net present value (objective function value).
###Code
# Create basic energy system instance
es = ar.EnergySystem(number_of_time_steps=3, hours_per_time_step=1,
interest_rate=0.05, economic_lifetime=20)
###Output
_____no_output_____
###Markdown
To instantiate a Component instance (Source, Sink, Conversion, Bus, Storage), we need to specify the EnergySystem instance, where it is added to and set a name for the component. Next, we add flows on the inlets and outlets. A Flow instance represents a connection point of a component and is used to create links with other components. Additionally, the flow introduces a commodity to the component and triggers the creation of an associated commodity variable (usually with the same name). The number of required or accepted inlet and outlet flows and component commodities depends on the component type (see table below). You can add multiple flows on an inlet or outlet for setting different commodities or linking components, by arranging them in a list. | Component type | Nbr. of inlet flows | Nbr. of outlet flows | Nbr. of commodities || :--- | :---: | :---: | :---: || Source | 0 | $\ge$ 1 | 1 || Sink | $\ge$ 1 | 0 | 1 || Conversion | $\ge$ 1 | $\ge$ 1 | $\ge$ 1 || Storage | $\ge$ 1 | $\ge$ 1 | 1 || Bus | $\ge$ 1 | $\ge$ 1 | 1 |
###Code
# Add a gas source
gas_source = ar.Source(ensys=es, name='gas_source', outlet=ar.Flow('Fuel'),
commodity_cost=20)
###Output
_____no_output_____
###Markdown
The conversion instances usually have different commodities on their inlets and outlets. That's why we need to specify the name of the basic variable for conversion components. This basic variable is used to restrict capacities, set operation rates, and calculate CAPEX and OPEX.
###Code
# Add a gas boiler conversion unit
gas_boiler = ar.Conversion(ensys=es, name='gas_boiler',
basic_variable='Heat',
inlet=ar.Flow(commodity='Fuel', link='gas_source'),
outlet=ar.Flow('Heat', 'heat_sink'),
capacity_max=150, capex_per_capacity=60e3,
user_expressions='Heat == 0.9 * Fuel')
###Output
_____no_output_____
###Markdown
We can use the keyword argument **user_expressions** to specify commodity conversion rates, limit capacities, and set other internal component constraints manually. Here we can use the names (identifiers) of the commodity variables created by adding flows, and, if applicable, variables with standard names, e.g.:* CAP - component capacity variable* BI_EX - binary existence variable* BI_OP - binary operation variable* ... (see file utils.py in your aristopy directory) The expressions are simply added as a list of strings. The options for mathematical operators are: ``sum, sin, cos, exp, log, ==, >=, The indexes (sets) of the variables and parameters are processed automatically behind the scenes.
###Code
# Add a CHP unit
chp_unit = ar.Conversion(ensys=es, name='chp_unit', basic_variable='Elec',
inlet=ar.Flow('Fuel', 'gas_source'),
outlet=[ar.Flow('Heat', 'heat_sink'), ar.Flow('Elec', 'elec_sink')],
capacity_max=100, capex_per_capacity=600e3,
user_expressions=['Heat == 0.5 * Fuel',
'Elec == 0.4 * Fuel'])
###Output
_____no_output_____
###Markdown
Time series data can be introduced as an aristopy Series instance and might be applied to set commodity rates, and time-dependent commodity cost or revenues, or generally for the scripting of user expressions.
###Code
# Add a sink with fixed heat demand
heat_sink = ar.Sink(ensys=es, name='heat_sink', inlet=ar.Flow('Heat'),
commodity_rate_fix=ar.Series('heat_demand', [100, 200, 150]))
elec_sink = ar.Sink(ensys=es, name='elec_sink', inlet=ar.Flow('Elec'),
commodity_revenues=30)
###Output
_____no_output_____
###Markdown
**Note:** Alternatively, we could use the *time_series_data* and *user_expressions* keyword arguments so set the required fixed commodity rate of the heat sink.```pythonheat_sink = ar.Sink(ensys=es, name='heat_sink', inlet=ar.Flow('Heat'), time_series_data=ar.Series('heat_demand', [100, 200, 150]), user_expressions='Heat == heat_demand')``` Run optimizationTo run the optimization, we need to call the EnergySystem method *optimize*. The most important input to this method is the name of the applied solver. You have to ensure the solver is available on your machine and can be detected with this name. The solver output is suppressed for convenience in this notebook (*tee=False*). The results of the model run are written to a JSON-file with a specified name.
###Code
es.optimize(solver='cbc', tee=False, results_file='results.json')
###Output
_____no_output_____
###Markdown
Basic information about the building and solving process of the optimzation model are stored in the Python dictionary *run_info* of the EnergySystem instane.
###Code
es.run_info
###Output
_____no_output_____
###Markdown
The pyomo ConcreteModel instance of the energy system can be accessed with the attribute *model*. All of the conventional pyomo functions can be applied here (e.g., pprint of the objective function).
###Code
es.model.Obj.pprint()
###Output
Obj : Size=1, Index=None, Active=True
Key : Active : Sense : Expression
None : True : maximize : -249.24420685079974*(gas_source.Fuel[0,0] + gas_source.Fuel[0,1] + gas_source.Fuel[0,2])/0.00034246575342465754 - 60000.0*gas_boiler.CAP - 600000.0*chp_unit.CAP + 373.8663102761996*(elec_sink.Elec[0,0] + elec_sink.Elec[0,1] + elec_sink.Elec[0,2])/0.00034246575342465754
###Markdown
The component variables and constraints are stored in separate pyomo Block models. They can be accessed via attribute block directly on the components. All components are also added to EnergySystem's dictionary components and can be reached with their specified name.
###Code
gas_boiler.block.Heat.pprint()
# return dictionary of variable 'Elec' for component 'chp_unit'
es.components['chp_unit'].block.Elec.get_values()
###Output
_____no_output_____
###Markdown
Plot resultsThe Plotter class is used to read the exported optimization results from the JSON-file and to provide basic plotting routines. Additional keyword arguments are available to customize the plotting output, e.g., set labels, figure size, legend position, etc. (see dictionary *props* of the Plotter class).
###Code
# Create instance of Plotter class and read in file 'results.json'
plotter = ar.Plotter('results.json')
###Output
_____no_output_____
###Markdown
The method *plot_operation* returns a mixed bar and line plot that visualizes the operation of a component on thebasis of a selected commodity.
###Code
plotter.plot_operation('heat_sink', 'Heat', lgd_pos='lower center',
bar_lw=0.5, ylabel='Thermal energy [MWh]',
show_plot=True)
###Output
_____no_output_____
###Markdown
The method *plot_objective* returns a bar chart that summarizes the cost contributions of each component to theoverall objective function value.
###Code
plotter.plot_objective(lgd_pos='lower center', show_plot=True)
###Output
_____no_output_____ |
1.1 Charts - Timeseries.ipynb | ###Markdown
Bokeh Tutorial 1.1 Charts - Timeseries **Exercise: Visualize the evolution of the temperature anomaly monthly average over time with a timeseries chart**- Data: 'data/Land_Ocean_Monthly_Anomaly_Average.csv'Tips: import pandas as pd pd.read_csv() pd.to_datetime()
###Code
import pandas as pd
from bokeh.charts import TimeSeries, output_notebook, show
# Get data
# Process data
# Output option
# Create timeseries chart
# Show chart
###Output
_____no_output_____
###Markdown
**Exercise: Style your plot** Ideas:- Add a title- Add axis labels- Change width and height- Deactivate toolbox or customize available tools- Change line colorCharts arguments can be found: http://bokeh.pydata.org/en/latest/docs/user_guide/charts.htmlgeneric-arguments
###Code
# Style your timeseries chart
# Show new chart
###Output
_____no_output_____
###Markdown
**Exercise: Add the moving annual average to your chart** Tips: pd.rolling_mean()
###Code
# Compute moving average
# Create chart with moving average
# Show chart with moving average
###Output
_____no_output_____ |
documentation/source/usersGuide/usersGuide_26_iterators.ipynb | ###Markdown
User's Guide, Chapter 26: Stream Iteration and FilteringWe learned enough about streams in :ref:`Chapter 6 ` to be able to get started, but you've preservered and hopefully are ready to learn more about how to get the most out of getting through a score. So this chapter will delve deeper into the concept of iteration, that is, going through an object one step at a time, and filtering out elements so only those in classes or areas you want are found. Let's review and describe the concept of iteration in Python (or most programming languages) for a second.Suppose you had a list like this:
###Code
letterList = ['a', 'b', 'c']
###Output
_____no_output_____
###Markdown
Now you could get your ABCs out of it in this way:
###Code
alphabet = ''
alphabet += letterList[0]
alphabet += letterList[1]
alphabet += letterList[2]
alphabet
###Output
_____no_output_____
###Markdown
But it's far easier, especially for a big list, to _iterate_ over it using a `for` loop:
###Code
alphabet = ''
for letter in letterList:
alphabet += letter
alphabet
###Output
_____no_output_____
###Markdown
We can _iterate_ over a list because lists are _iterable_ (or, conversely, for the tautology department, because we can _iterate_ over a list, we call it _iterable_) and there are some functions and methods that do great things on iterable objects, such as join them:
###Code
''.join(letterList)
###Output
_____no_output_____
###Markdown
Or give the minimum value from a numeric list:
###Code
min([10, 20, 30, -3423, 40])
###Output
_____no_output_____
###Markdown
Or give the length of an iterable:
###Code
len(letterList)
###Output
_____no_output_____
###Markdown
In Python, there's a special type of _iterable_ object called a _generator_ which gives out objects as they are needed. One generator that we have seen already is the `range()` function:
###Code
zeroToFifty = range(51)
zeroToFifty
###Output
_____no_output_____
###Markdown
We can find the first number in that range that is divisible by 5:
###Code
for n in zeroToFifty:
print(n)
if n != 0 and n % 5 == 0:
break
###Output
0
1
2
3
4
5
###Markdown
At this point we've stopped going through the `range` object and no more numbers are ever made or stored in memory -- this point doesn't matter to much for a set of numbers up to 50, but for numbers up to millions, or, as we will see, a repertory of scores of hundreds of thousands of notes, saving a few seconds here and there really adds up.Streams, as we have seen, are iterable:
###Code
s = stream.Part(id='restyStream')
s.append(note.Note('C#'))
s.append(note.Rest(quarterLength=2.0))
s.append(note.Note('D', quarterLength=1.5))
s.append(note.Rest(quarterLength=1.0))
for thing in s:
print(thing, thing.quarterLength)
###Output
<music21.note.Note C#> 1.0
<music21.note.Rest half> 2.0
<music21.note.Note D> 1.5
<music21.note.Rest quarter> 1.0
###Markdown
When you iterate over a Stream, it is actually creating a lightweight object called a `StreamIterator` to help make things easier. We can create one directly by calling `.iter()` on any stream:
###Code
sIter = s.iter()
sIter
###Output
_____no_output_____
###Markdown
.. note:: Prior to v.7, a `StreamIterator` could be created by accessing the property `.iter`. Although `.iter()` is now the recommended form, both usages will be supported until v.9.
###Code
This information tells us that `sIter` is an iterator going over the `Part` object with id `restyStream` and it is currently ready to give out the first object, number 0. We can get the next thing in the Stream by calling `next()` on the Stream.
###Output
_____no_output_____
###Markdown
next(sIter) next(sIter) sIter
###Code
But for the most part, you'll want to use the built in way of going through an iterable, that is, with a `for` loop:
###Output
_____no_output_____
###Markdown
for el in sIter: print(el, el.quarterLength)
###Code
## Filtering elements in iteration
So this does exactly what iterating directly on the Stream does -- but it's good to know that a `StreamIterator` is silently being generated so that you can see what else these Iterators do. Most importantly, a `StreamIterator` can add filters to it. Let's add a `ClassFilter` from the :ref:`moduleStreamFilters` module:
###Output
_____no_output_____
###Markdown
restFilter = stream.filters.ClassFilter('Rest')restIterator = sIter.addFilter(restFilter)for el in restIterator: print(el, el.quarterLength)
###Code
Now when we go through sIter, we are only getting those objects that match all of the filters on it. We can also filter by offset. Let's create a new iterator and add an :class:`~music21.stream.filters.OffsetFilter` to it.
###Output
_____no_output_____
###Markdown
sIter2 = s.iter()offsetFilter = stream.filters.OffsetFilter(offsetStart=0.5, offsetEnd=4.0)offsetIterator = sIter2.addFilter(offsetFilter)for el in offsetIterator: print(el, el.offset)
###Code
.. note::
prior to Music21 v.6, `sIter.addFilter()` would modify `sIter` in place and not return a new iterator.
Thus in v.5.7, you would have written the last three lines of the code as:
>>> sIter2.addFilter(offsetFilter)
>>> for el in sIter2:
... print(el, el.offset)
The changed behavior in v.6 did not affect most users, but it was one of the
biggest backward incompatible changes -- it was worth breaking code to finally
get this right.
###Output
_____no_output_____
###Markdown
Multiple filters can be chained together to get something more powerful:
###Code
for el in s.iter().addFilter(restFilter).addFilter(offsetFilter):
print(el, el.offset)
###Output
<music21.note.Rest half> 1.0
###Markdown
Other filters that `music21` has in the :ref:`moduleStreamFilters` include:* :class:`~music21.stream.filters.IsFilter` which returns elements that are exactly the same as the objects passed in (useful for getting the context of an object in a stream)* :class:`~music21.stream.filters.IsNotFilter`, even more useful, for getting everything but an object or list of objects* :class:`~music21.stream.filters.IdFilter` for finding items by Id.* :class:`~music21.stream.filters.ClassNotFilter` for finding items other than a list of classes.* and :class:`~music21.stream.filters.GroupFilter` for finding elements which have a particular group name. Filter Shortcuts Filtering elements by offset or by class is so common, that `music21` has some shortcuts for adding filters to it, like this:
###Code
sIter4 = s.iter()
restIterator = sIter4.getElementsByClass('Rest')
restOffsetIterator = restIterator.getElementsByOffset(0.5, 4.0)
for el in restOffsetIterator:
print(el, el.offset)
###Output
<music21.note.Rest half> 1.0
###Markdown
Easier still, since each of these methods returns a new filter object, you can chain them right in the for loop:
###Code
for el in s.iter().getElementsByClass('Rest').getElementsByOffset(0.5, 4.0):
print(el, el.offset)
###Output
<music21.note.Rest half> 1.0
###Markdown
And you can even skip the `s.iter()` step for getting an iterator for the most common of these filters, and `music21` will recognize what you want to do and create the iterator for you:
###Code
for el in s.getElementsByClass('Rest').getElementsByOffset(0.5, 4.0):
print(el, el.offset)
###Output
<music21.note.Rest half> 1.0
###Markdown
The shortcut methods that `music21` exposes on Iterators include:* :meth:`~music21.stream.iterator.StreamIterator.getElementById` which adds an `IdFilter`* :meth:`~music21.stream.iterator.StreamIterator.getElementsByClass` which adds a `ClassFilter`* :meth:`~music21.stream.iterator.StreamIterator.getElementsByGroup` which adds a `GroupFilter`* :meth:`~music21.stream.iterator.StreamIterator.getElementsByOffset` which adds an `OffsetFilter`And there are also properties (that is, written without parentheses) which add certain filters:* :attr:`~music21.stream.iterator.StreamIterator.notes` which filters out everything but `Note` and `Chord` objects* :attr:`~music21.stream.iterator.StreamIterator.notesAndRests` which filters out everything except `GeneralNote` objects* :attr:`~music21.stream.iterator.StreamIterator.parts` which returns all the `Part` objects* :attr:`~music21.stream.iterator.StreamIterator.voices`* :attr:`~music21.stream.iterator.StreamIterator.voices` which returns all the `Voice` objects* :attr:`~music21.stream.iterator.StreamIterator.spanners` which returns all the `Spanner` objects Custom Filters Creating your own filter is pretty easy too. The easiest way is to create a function that takes in an element and returns True or False depending on whether the object matches the filter.We will create a filter to see if the element has a `.pitch` attribute and then if that pitch attribute has a sharp on it:
###Code
def sharpFilter(el):
if (hasattr(el, 'pitch')
and el.pitch.accidental is not None
and el.pitch.accidental.alter > 0):
return True
else:
return False
sharpIterator = s.iter().addFilter(sharpFilter)
for el in sharpIterator:
print(el)
###Output
<music21.note.Note C#>
###Markdown
Recursive and Offset Iterators`Music21` comes with two other iterators that let you do powerful operations. The most commonly used is the :class:`~music21.stream.iterators.RecursiveIterator` which burrows down into nested Streams to get whatever you want. Let's load in a nested stream:
###Code
bach = corpus.parse('bwv66.6')
for thing in bach:
print(thing)
###Output
<music21.metadata.Metadata object at 0x7fe5169300d0>
<music21.stream.Part Soprano>
<music21.stream.Part Alto>
<music21.stream.Part Tenor>
<music21.stream.Part Bass>
<music21.layout.StaffGroup <music21.stream.Part Soprano><music21.stream.Part Alto><music21.stream.Part Tenor><music21.stream.Part Bass>>
###Markdown
Right, we remember that often the actual notes of a piece can be hidden inside Parts, Measures, and Voices. A recursive iterator gets to them, and they're created by calling `recurse()` on a stream.
###Code
recurseIter = bach.recurse()
recurseIter
###Output
_____no_output_____
###Markdown
Let's add a filter for only Es to it, and look into it. Instead of checking to see if each element has a `.name` attribute we'll put a `try...except` clause around it, and if it does not have the `.name` attribute (and thus raises and `AttributeError` we will return False.
###Code
def eSharpFilter(el):
try:
if el.name == 'E#':
return True
else:
return False
except AttributeError:
return False
eSharpIterator = recurseIter.addFilter(eSharpFilter)
for el in eSharpIterator:
print(el, el.measureNumber)
###Output
<music21.note.Note E#> 9
<music21.note.Note E#> 3
<music21.note.Note E#> 7
<music21.note.Note E#> 7
<music21.note.Note E#> 2
<music21.note.Note E#> 6
###Markdown
Note that the measure numbers don't keep increasing. That's because the recurse iterator finishes one part before returning to the next. We can use the fancy `.getContextByClass` to figure out what part it is in:
###Code
for el in eSharpIterator:
pId = el.getContextByClass(stream.Part).id
print(el, el.measureNumber, pId)
###Output
<music21.note.Note E#> 9 Soprano
<music21.note.Note E#> 3 Alto
<music21.note.Note E#> 7 Alto
<music21.note.Note E#> 7 Tenor
<music21.note.Note E#> 2 Bass
<music21.note.Note E#> 6 Bass
###Markdown
(as an aside, `.measureNumber` is just a shortcut for `.getContextByClass(stream.Measure).number`, so we are actually looking up two contexts) If you want to recurse into a stream and get elements of a certain class, you can do `s.recurse().getElementsByClass(chord.Chord)` but there's another simpler way of doing it: `s[chord.Chord]` (with square brackets). As this example shows:
###Code
chopin = corpus.parse('chopin/mazurka06-2')
for ch in chopin.measures(1, 5)[chord.Chord]:
print(ch)
# note that each of these is a chord in one voice in
# one hand of the piano. To see how to get chords between
# both hands, see the chordify() chapter.
###Output
<music21.chord.Chord G#2 D#3>
<music21.chord.Chord G#2 D#3>
<music21.chord.Chord G#2 D#3>
<music21.chord.Chord G#2 D#3>
<music21.chord.Chord G#2 D#3>
<music21.chord.Chord G#2 D#3>
<music21.chord.Chord G#2 D#3>
<music21.chord.Chord G#2 D#3>
<music21.chord.Chord G#2 D#3>
<music21.chord.Chord G#2 D#3>
<music21.chord.Chord G#2 D#3>
###Markdown
(when Chopin likes a chord, he **really** likes a chord!) Another great iterator is the OffsetIterator, which returns lists of elements grouped by offset. Let's add some more things to our Stream before we see how it works.
###Code
s.insert(0, clef.TrebleClef())
s.insert(0, key.KeySignature(3))
s.insert(1, instrument.Trumpet())
# normal iterator
for el in s:
print(el, el.offset)
###Output
<music21.clef.TrebleClef> 0.0
<music21.key.KeySignature of 3 sharps> 0.0
<music21.note.Note C#> 0.0
Trumpet 1.0
<music21.note.Rest half> 1.0
<music21.note.Note D> 3.0
<music21.note.Rest quarter> 4.5
###Markdown
Unlike with the normal `StreamIterator` or the `RecursiveIterator`, there is no method on `Stream` to create an offset iterator, so we will create one directly:
###Code
oIter = stream.iterator.OffsetIterator(s)
for elementGroup in oIter:
print(elementGroup[0].offset, elementGroup)
###Output
0.0 [<music21.clef.TrebleClef>, <music21.key.KeySignature of 3 sharps>, <music21.note.Note C#>]
1.0 [<music21.instrument.Trumpet 'Trumpet'>, <music21.note.Rest half>]
3.0 [<music21.note.Note D>]
4.5 [<music21.note.Rest quarter>]
###Markdown
From Iterator to StreamFrom either a `StreamIterator` or a `RecursiveIterator` a new `Stream` object can be generated by calling `.stream()` on it. On a `RecursiveIterator`, this does not put the elements into substreams.
###Code
onlyESharps = bach.recurse().addFilter(eSharpFilter)
esharpStream = onlyESharps.stream()
esharpStream.show('text')
esharpStream.derivation
###Output
_____no_output_____
###Markdown
This can be useful if you'd like to do plots on the resulting stream, though this one is a bit too obvious...
###Code
esharpStream.plot('pitchclass')
###Output
_____no_output_____
###Markdown
But maybe this one could tell someone something:
###Code
esharpStream.plot('pianoroll')
###Output
_____no_output_____ |
Customer_Support/bin/Phase_2_Build_ML_models_with_Python_3x6_h2o_ai.ipynb | ###Markdown
Phase 2 - Machine Learning with H2O.ai Build - 4 new ML models using H2O.ai framework- GLM - Generalized Linear Model- Random Forest- GBM- XGBoost Recap Info about model evaluation - accuracy metric vs recall- The global metric accuracy will be used to evaluate the models between all frameworks (xgb, lgbm, sklearn, h2o.ai and Apache Spark) The last notebook build ml models using python will provide some additional techniques, such as:- Unbalanced classification and class weight- Smote technique for oversampling the training dataset- Standard Scale vs. default data and - Finally, exchange the global metric accuracy and use recall metric Recall metric is a better metric than accuracy to evaluate this type of scenario (customer churn) Additional info: http://docs.h2o.ai/h2o/latest-stable/h2o-docs/performance-and-prediction.html Starting process...
###Code
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
## Metrics - Classification
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
## H2O
import h2o
## ML Models
from h2o.estimators import (H2OGeneralizedLinearEstimator, H2OGradientBoostingEstimator,
H2ORandomForestEstimator, H2OXGBoostEstimator)
###Output
_____no_output_____
###Markdown
H2O - connection to h2o cluster
###Code
## connect to h2o cluster and remove all object
h2o.connect(ip='192.168.56.102')
h2o.remove_all()
###Output
Connecting to H2O server at http://192.168.56.102:54321 ... successful.
###Markdown
Load and prepare the dataset to build ML models
###Code
## Load dataset
df = pd.read_csv('../data/WA_Fn-UseC_-Telco-Customer-Churn.csv')
## Filter columns and set values
df.loc[(df.tenure==0) & (df.TotalCharges == ' '), ['TotalCharges', 'tenure']] = 0
df['TotalCharges'] = df['TotalCharges'].astype('float')
target = 'Churn'
current_features = ['tenure', 'MonthlyCharges', 'TotalCharges', 'gender', 'PaymentMethod' , 'Churn', 'Contract']
df = df[current_features]
df.head(3)
###Output
_____no_output_____
###Markdown
Load dataset into H2O cluster
###Code
target = 'Churn'
features = df.columns.to_list()
features.remove(target)
X = df[features]
y = df[target]
SEED = 42
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=SEED)
## Label Encode - will be used later to evaluate model performance
target_1_0 = lambda x: ['No', 'Yes'].index(x)
y_true_test = y_test.apply(target_1_0).values
train_X = X_train.copy()
train_X[target] = y_train
test_X = X_test.copy()
test_X[target] = y_test
## Convert to h2o Frame
train_h2o = h2o.H2OFrame(train_X, destination_frame='train.hex')
test_h2o = h2o.H2OFrame(test_X, destination_frame='test.hex')
###Output
Parse progress: |█████████████████████████████████████████████████████████| 100%
Parse progress: |█████████████████████████████████████████████████████████| 100%
###Markdown
H2O - Build Machine Learning Models H2O - RANDOM FOREST- Accuracy: 71,78%
###Code
## Random Forest
model_rf = H2ORandomForestEstimator(seed = SEED)
model_rf.train(
x = features,
y = target,
training_frame = train_h2o,
model_id = 'fit_rf.model'
)
ypred_RF_df = model_rf.predict(test_h2o).as_data_frame()
# model_rf.model_performance()
print('Random Forest')
y_pred_RF = ypred_RF_df['predict'].apply(target_1_0).values
# print_confusion_matrix(y_true, y_pred)
print('Accuracy score: ', accuracy_score(y_true_test, y_pred_RF))
###Output
drf Model Build progress: |███████████████████████████████████████████████| 100%
drf prediction progress: |████████████████████████████████████████████████| 100%
Random Forest
Accuracy score: 0.7178494623655914
###Markdown
H2O GBM - Accuracy: 77,07%
###Code
model_gbm = H2OGradientBoostingEstimator(seed = SEED)
model_gbm.train(
x = features,
y = target,
training_frame = train_h2o,
model_id = 'fit_gbm.model'
)
# model_gbm.model_performance()
ypred_GBM_df = model_gbm.predict(test_h2o).as_data_frame()
y_pred = ypred_GBM_df['predict'].copy().apply(target_1_0).values
print('GBM')
# print_confusion_matrix(y_true, y_pred)
print('Accuracy score: ', accuracy_score(y_true_test, y_pred))
###Output
gbm Model Build progress: |███████████████████████████████████████████████| 100%
gbm prediction progress: |████████████████████████████████████████████████| 100%
GBM
Accuracy score: 0.770752688172043
###Markdown
H2O - GLM (generalized linear model)- Accuracy: 76,64%
###Code
model_glm = H2OGeneralizedLinearEstimator(seed = SEED, family='binomial')
model_glm.train(
x = features,
y = target,
training_frame = train_h2o,
model_id = 'fit_glm.model'
)
# model_glm.model_performance()
ypred_GLM_df = model_glm.predict(test_h2o).as_data_frame()
y_pred = ypred_GLM_df['predict'].copy().apply(target_1_0).values
print('GLM')
# print_confusion_matrix(y_true, y_pred)
print('Accuracy score: ', accuracy_score(y_true_test, y_pred))
###Output
glm Model Build progress: |███████████████████████████████████████████████| 100%
glm prediction progress: |████████████████████████████████████████████████| 100%
GLM
Accuracy score: 0.7664516129032258
###Markdown
H2O - XGB- Accuracy: 79,18%
###Code
model_xgb = H2OXGBoostEstimator(seed = SEED)
model_xgb.train(
x = features,
y = target,
training_frame = train_h2o,
model_id = 'fit_xgb.model'
)
# model_xgb.model_performance()
ypred_XGB_df = model_xgb.predict(test_h2o).as_data_frame()
y_pred = ypred_XGB_df['predict'].copy().apply(target_1_0).values
print('XGB')
# print_confusion_matrix(y_true, y_pred)
print('Accuracy score: ', accuracy_score(y_true_test, y_pred))
###Output
xgboost Model Build progress: |███████████████████████████████████████████| 100%
xgboost prediction progress: |████████████████████████████████████████████| 100%
XGB
Accuracy score: 0.7918279569892474
###Markdown
Export model- H2O xgb have the highest accuracy score
###Code
## H2O export model
export_model_path = h2o.save_model(model=model_xgb, path="./ML_models/model_xgb_v1/", force=True)
print('Export done!')
###Output
Export done!
###Markdown
Load the model and run prediction again to test the results
###Code
h2o_model_xgb = h2o.load_model(export_model_path)
# model_xgb.model_performance()
ypred_XGB_df = h2o_model_xgb.predict(test_h2o).as_data_frame()
y_pred = ypred_XGB_df['predict'].copy().apply(target_1_0).values
print('XGB')
# print_confusion_matrix(y_true, y_pred)
print('Accuracy score: ', accuracy_score(y_true_test, y_pred))
###Output
xgboost prediction progress: |████████████████████████████████████████████| 100%
XGB
Accuracy score: 0.7918279569892474
###Markdown
Evaluation report Confusion matrix - associated with XGB- Accuracy: 79,18% Plot Feature importance - H2O - xgb
###Code
model_xgb.varimp_plot()
###Output
_____no_output_____
###Markdown
Summary with h2o- The xgb achieved the best accuracy and was exported to be used later The most important features, characteristics that influence customer churn are:- Contract_Month-to_Month- MonthlyCharges- TotalCharges and- tenure Let´s move on with ML model built using Apache Spark framework in the next notebook
###Code
# !jupyter nbconvert --to html Phase_2_Build_ML_models_with_Python_3x6_h2o_ai.ipynb
###Output
_____no_output_____ |
vision/visualisation_solution.ipynb | ###Markdown
Part II: Visualise saliency maps- Import an already trained baseline model.- Visualise the gradients of class probabilities w.r.t inputs to obtain saliency maps.- Generate inputs that maximise class probabilities. Exercises:1. Retrieve the gradient of the most probable class w.r.t. to input image using `tf.gradients` and plot saliency maps.2. Iterate the above and take steps into the direction of this gradient starting from a test image.>* The gradient indicates how to modify the input image to make it look more like the class it is taken from, according to the network.>* Note that the network weights are kept fixed, only the input is transformed, i.e. we retrieve gradients, but we never apply them to the network weights. Imports
###Code
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
import tensorflow as tf
# Don't forget to select GPU runtime environment in Runtime -> Change runtime type
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
# we will use Sonnet on top of TF
!pip install -q dm-sonnet
import sonnet as snt
import numpy as np
# Plotting library.
from matplotlib import pyplot as plt
import pylab as pl
from IPython import display
from skimage import data, color
from skimage.transform import rescale, resize, downscale_local_mean
# Reset graph
tf.reset_default_graph()
# Display function
class_mapping = [u'airplane', u'automobile', u'bird', u'cat', u'deer',
u'dog', u'frog', u'horse', u'ship', u'truck']
def gallery(maps, imgs, pclass, gt, scale=4.0):
num_images= maps.shape[0]
maps = np.abs(maps).mean(axis=-1)
ff, axes = plt.subplots(2, num_images,
subplot_kw={'xticks': [],
'yticks': []})
for i in range(0, num_images):
tt_pred = class_mapping[pclass[i]]
tt_gt = class_mapping[gt[i]]
mm = maps[i]/np.amax(maps[i])
mm_rescale = rescale(mm, scale)
axes[0,i].imshow(mm_rescale)
img = (imgs[i]+1.0)/2.0
img_rescale = rescale(img, scale)
axes[1,i].imshow(img_rescale)
plt.setp(axes[0,i].get_xticklabels(), visible=False)
plt.setp(axes[0,i].get_yticklabels(), visible=False)
axes[0,i].set_title('pred={}'.format(tt_pred))
axes[1,i].set_title('gt={}'.format(tt_gt))
plt.show()
###Output
_____no_output_____
###Markdown
Copy the pretrained weights of baseline model on the virtual machine- you need to load all three files from the *baseline* folder (it will take about 5 minutes)- this loads a model with the same architecture that you defined earlier, but fully trained.
###Code
from google.colab import files
uploaded = files.upload()
print(uploaded)
for fn in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(
name=fn, length=len(uploaded[fn])))
###Output
_____no_output_____
###Markdown
Get dataset to be used for visualisation- Cifar-10 equivalent of MNIST for natural RGB images- 60000 32x32 colour images in 10 classes: airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck- train: 50000; test: 10000
###Code
cifar10 = tf.keras.datasets.cifar10
# (down)load dataset
(train_images, train_labels), (test_images, test_labels) = cifar10.load_data()
###Output
_____no_output_____
###Markdown
Retrieve batches from the test set
###Code
# define dimension of the batches to sample from the datasets
BATCH_SIZE_TEST = 5 #@param
dataset_test = tf.data.Dataset.from_tensor_slices((test_images, test_labels))
batched_dataset_test = dataset_test.repeat().batch(BATCH_SIZE_TEST)
iterator_test = batched_dataset_test.make_one_shot_iterator()
(batch_test_images, batch_test_labels) = iterator_test.get_next()
###Output
_____no_output_____
###Markdown
Model on which we will run the visualisation
###Code
class Baseline(snt.AbstractModule):
def __init__(self, num_classes, name="baseline"):
super(Baseline, self).__init__(name=name)
self._num_classes = num_classes
self._output_channels = [
64, 64, 128, 128, 128, 256, 256, 256, 512, 512, 512
]
self._num_layers = len(self._output_channels)
self._kernel_shapes = [[3, 3]] * self._num_layers # All kernels are 3x3.
self._strides = [1, 1, 2, 1, 1, 2, 1, 1, 2, 1, 1]
self._paddings = [snt.SAME] * self._num_layers
def _build(self, inputs, is_training=None, test_local_stats=False):
net = inputs
# instantiate all the convolutional layers
layers = [snt.Conv2D(name="conv_2d_{}".format(i),
output_channels=self._output_channels[i],
kernel_shape=self._kernel_shapes[i],
stride=self._strides[i],
padding=self._paddings[i],
use_bias=True) for i in xrange(self._num_layers)]
# connect them to the graph, adding batch norm and non-linearity
for i, layer in enumerate(layers):
net = layer(net)
bn = snt.BatchNorm(name="batch_norm_{}".format(i))
net = bn(net, is_training=is_training, test_local_stats=test_local_stats)
net = tf.nn.relu(net)
net = tf.reduce_mean(net, reduction_indices=[1, 2], keepdims=False,
name="avg_pool")
logits = snt.Linear(self._num_classes)(net)
return logits
num_classes = 10
# Test preprocessing: only scale to [-1,1].
def test_image_preprocess():
def fn(image):
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = image * 2.0 - 1.0
return image
return fn
# Instantiate the model
with tf.variable_scope("baseline"):
model = Baseline(num_classes)
# Connect the model to data
preprocess_op = test_image_preprocess()
batch_test_images = preprocess_op(batch_test_images)
test_predictions = model(batch_test_images, is_training=False)
# Create saver to restore the pre-trained model
# First remove the scope name from variables name, since the name in the checkpoint doesn't include it
var_list = snt.get_variables_in_scope("baseline",
collection=tf.GraphKeys.GLOBAL_VARIABLES)
var_map = {}
for i in range(0, len(var_list)):
name = var_list[i].name[len("baseline/"):-2]
var_map[name] = var_list[i]
saver = tf.train.Saver(var_map, reshape=True)
# For evaluation, we look at top_k_accuracy since it's easier to interpret; normally k=1 or k=5
def top_k_accuracy(k, labels, logits):
in_top_k = tf.nn.in_top_k(predictions=tf.squeeze(logits),
targets=tf.squeeze(tf.cast(labels, tf.int32)), k=k)
return tf.reduce_mean(tf.cast(in_top_k, tf.float32))
test_acc = top_k_accuracy(1, batch_test_labels, test_predictions)
###Output
_____no_output_____
###Markdown
Visualise saliency maps- We retrieve gradients w.r.t. inputs to obtain a saliency map over the input pixels, i.e. to understand which pixels in an image caused a certain output logit to be maximised.
###Code
# Get the maximum output prediction
maximum_prediction = tf.reduce_max(test_predictions, 1)
# Get the gradient w.r.t. input images
saliency_op = tf.gradients(maximum_prediction, batch_test_images)[:][0]
# Get the predicted class index for visualisation purposes.
pred_class_op = tf.argmax(test_predictions, axis=-1)
# Create the session and initialize variables
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# Restore pre-trained weights
saver.restore(sess, "baseline.ckpt")
# Check if import was done correctly by running eval on cifar test set
# expected_accuracy = 0.94
num_batches = 1000 # 1000 batches * 5 samples per batch = 5000
avg_accuracy = 0.0
for _ in range(num_batches):
accuracy = sess.run(test_acc)
avg_accuracy += accuracy
avg_accuracy /= num_batches
print ("Accuracy {:.3f}".format(avg_accuracy))
# Get saliency maps
smap, inp_img, predicted_class, ground_truth = sess.run(
[saliency_op, batch_test_images,
pred_class_op, tf.squeeze(batch_test_labels)])
# Display
gallery(smap, inp_img, predicted_class, ground_truth)
###Output
_____no_output_____
###Markdown
Not that impressive, right? Let's generate the image that maximises the probability of a given class $c$The previous exercise computed$$\frac{\partial y_{c}}{\partial x}$$Now we modify $x$ to search for $\hat x$ that maximises $\frac{\partial y_{c}}{\partial x}$ using an iterative gradient-descent like approach:$$x_{t+1} = \min(1, \max(-1, x_t + \alpha \frac{\partial y_{c}}{\partial x})), t \in \{0, N\}$$$$x_0 = \text{initial test image from class } c $$Use e.g. $\alpha = 0.1$ and $N=10000$.
###Code
alpha = 0.1
N = 10000
# get saliency maps
smap, inp_img, predicted_class, ground_truth = sess.run(
[saliency_op, batch_test_images,
pred_class_op, tf.squeeze(batch_test_labels)])
for t in range(N):
inp_img = inp_img + alpha * smap
inp_img = np.minimum(1, np.maximum(-1, inp_img))
smap = sess.run(saliency_op,
feed_dict={batch_test_images: inp_img})
# display transformed input image at every 1000 iterations
if t % 1000 == 0:
print ('Transformed input at iter {0:5d} out of {1:5d}'.format(int(t), int(N)))
gallery(smap, inp_img, predicted_class, ground_truth)
###Output
_____no_output_____ |
Chapter3_Exercise5.ipynb | ###Markdown
Chapter 3 - Exercise 5: Tính median của chiều cao (height) dựa vào vị trí (position) Các kiến thức sử dụng trong bài tập: Các xử lý trên mảng1. Lọc các giá trị của mảng theo điều kiện2. Tính toán thông kê trên mảng Cho 2 tập tin heights.txt và positions.txt => chép dữ liệu từ 2 tập tin vào 2 list là heights và positions, sau đó thực hiện các yêu cầu, và đối chiếu với kết quả được cung cấp: 'GK' (goalkeeper), 'M' (midfield), 'A' (attack) and 'D' (defense).
###Code
import numpy as np
heights = [191, 184, 185, 180, 181, 187, 170, 179, 183, 186, 185, 170, 187, 183, 173, 188, 183, 180, 188, 175, 193, 180, 185, 170, 183, 173, 185, 185, 168, 190, 178, 185, 185, 193, 183, 184, 178, 180, 177, 188, 177, 187, 186, 183, 189, 179, 196, 190, 189, 188, 188, 188, 182, 185, 184, 178, 185, 193, 188, 179, 189, 188, 180, 178, 186, 188, 180, 185, 172, 179, 180, 174, 183, 178, 187, 178, 193, 181, 180, 187, 179, 173, 175, 188, 187, 175, 171, 179, 180, 188, 185, 196, 183, 184, 186, 178, 188, 168, 176, 178, 178, 192, 172, 170, 190, 175, 174, 179, 177, 187, 184, 185, 175, 193, 185, 191, 181, 183, 176, 176, 182, 192, 187, 170, 189, 171, 181, 183, 178, 182, 186, 191, 175, 179, 180, 181, 178, 193, 179, 181, 186, 190, 190, 192, 185, 178, 182, 171, 182, 173, 192, 175, 183, 183, 184, 176, 183, 186, 178, 185, 188, 193, 193, 170, 188, 196, 175, 180, 184, 173, 180, 190, 186, 182, 183, 195, 188, 187, 190, 180, 194, 182, 182, 183, 178, 183, 171, 185, 177, 180, 195, 173, 185, 186, 187, 178, 185, 174, 175, 176, 191, 170, 183, 180, 174, 191, 179, 178, 187, 191, 183, 180, 184, 183, 180, 185, 184, 181, 186, 185, 182, 175, 173, 175, 176, 174, 184, 177, 185, 162, 180, 171, 183, 180, 180, 191, 196, 191, 176, 186, 171, 190, 188, 180, 185, 176, 187, 188, 182, 178, 176, 175, 177, 191, 183, 189, 173, 180, 180, 185, 185, 180, 181, 183, 180, 185, 175, 175, 177, 177, 182, 167, 176, 180, 194, 180, 187, 174, 182, 174, 181, 188, 188, 180, 183, 183, 184, 188, 170, 182, 183, 170, 186, 191, 187, 188, 177, 180, 182, 174, 183, 178, 182, 190, 180, 182, 181, 180, 176, 172, 186, 180, 185, 186, 179, 185, 180, 187, 181, 185, 181, 183, 181, 175, 187, 178, 182, 182, 183, 184, 170, 178, 175, 186, 175, 178, 185, 178, 190, 187, 173, 186, 177, 193, 183, 175, 185, 179, 167, 175, 183, 188, 184, 191, 184, 170, 169, 175, 175, 185, 193, 172, 179, 180, 179, 186, 180, 176, 190, 175, 175, 186, 196, 186, 187, 182, 178, 185, 183, 191, 183, 185, 186, 180, 169, 185, 194, 186, 183, 183, 191, 189, 194, 174, 168, 185, 160, 191, 185, 186, 179, 188, 185, 189, 183, 183, 176, 183, 180, 171, 187, 175, 190, 178, 175, 181, 185, 188, 180, 171, 184, 176, 181, 183, 178, 171, 187, 186, 186, 174, 174, 186, 193, 191, 180, 181, 177, 195, 190, 185, 168, 183, 175, 191, 184, 182, 188, 182, 180, 192, 191, 185, 188, 180, 179, 183, 192, 183, 183, 180, 173, 180, 190, 183, 182, 175, 180, 178, 181, 188, 175, 180, 183, 191, 183, 180, 182, 178, 189, 183, 183, 178, 170, 178, 173, 180, 184, 180, 188, 180, 184, 191, 188, 195, 197, 186, 191, 189, 196, 185, 178, 200, 176, 184, 189, 181, 185, 184, 191, 191, 184, 190, 190, 170, 183, 183, 169, 183, 185, 178, 183, 186, 190, 186, 188, 186, 183, 179, 172, 185, 180, 183, 189, 180, 182, 185, 180, 193, 185, 175, 182, 182, 180, 185, 180, 188, 175, 183, 185, 185, 176, 189, 186, 181, 181, 185, 188, 176, 179, 178, 178, 180, 185, 183, 183, 185, 186, 185, 188, 172, 175, 186, 181, 190, 177, 184, 191, 173, 178, 180, 185, 183, 186, 175, 189, 189, 189, 189, 183, 166, 178, 175, 179, 185, 180, 190, 181, 185, 179, 185, 188, 183, 173, 180, 181, 175, 182, 177, 182, 180, 182, 184, 181, 177, 178, 180, 183, 194, 185, 191, 180, 187, 181, 183, 183, 180, 185, 178, 177, 183, 178, 173, 183, 191, 188, 188, 178, 175, 186, 183, 180, 184, 184, 194, 174, 178, 193, 175, 190, 186, 186, 180, 186, 183, 177, 180, 175, 184, 184, 178, 166, 183, 186, 168, 178, 181, 188, 187, 180, 172, 185, 186, 191, 172, 184, 186, 192, 180, 177, 183, 175, 180, 170, 180, 188, 180, 178, 196, 192, 186, 175, 184, 175, 171, 187, 170, 183, 184, 178, 187, 179, 177, 172, 180, 170, 177, 184, 185, 191, 188, 193, 183, 188, 185, 183, 185, 187, 189, 188, 174, 173, 172, 179, 171, 176, 173, 185, 183, 187, 178, 176, 187, 171, 185, 174, 186, 179, 192, 173, 183, 183, 183, 186, 184, 185, 171, 184, 189, 183, 173, 184, 183, 184, 184, 179, 184, 185, 181, 170, 176, 191, 173, 183, 178, 189, 183, 187, 202, 180, 183, 186, 182, 186, 182, 190, 178, 185, 181, 186, 171, 183, 185, 184, 190, 167, 175, 172, 190, 168, 180, 188, 191, 178, 178, 175, 183, 191, 183, 182, 187, 181, 175, 186, 175, 189, 180, 188, 180, 183, 179, 184, 178, 185, 185, 182, 179, 183, 170, 183, 178, 187, 184, 168, 186, 183, 179, 186, 170, 178, 184, 191, 187, 174, 178, 186, 184, 193, 188, 185, 188, 173, 175, 195, 180, 187, 182, 183, 188, 173, 197, 173, 187, 184, 190, 188, 174, 190, 185, 182, 191, 187, 193, 173, 180, 172, 176, 191, 187, 184, 184, 199, 175, 191, 190, 183, 192, 191, 189, 174, 185, 184, 185, 185, 193, 183, 189, 177, 183, 188, 170, 185, 178, 188, 178, 170, 193, 173, 173, 180, 180, 175, 173, 185, 185, 189, 176, 173, 183, 175, 179, 193, 188, 183, 183, 175, 183, 176, 180, 185, 180, 187, 180, 177, 196, 175, 176, 188, 187, 183, 173, 191, 183, 188, 186, 176, 173, 171, 179, 173, 192, 182, 180, 191, 182, 192, 185, 192, 186, 179, 178, 186, 179, 176, 182, 184, 178, 182, 182, 190, 183, 188, 187, 183, 172, 175, 182, 179, 174, 188, 186, 174, 191, 180, 188, 183, 183, 184, 180, 175, 188, 181, 188, 186, 188, 175, 188, 178, 180, 175, 185, 185, 176, 184, 173, 182, 176, 185, 194, 185, 177, 184, 171, 186, 184, 178, 180, 187, 186, 180, 190, 188, 182, 174, 193, 178, 184, 170, 166, 176, 168, 200, 180, 182, 192, 167, 186, 178, 175, 174, 188, 184, 189, 174, 193, 182, 194, 183, 170, 170, 173, 184, 178, 177, 178, 172, 169, 191, 175, 176, 178, 183, 181, 175, 191, 181, 177, 170, 180, 184, 186, 178, 191, 183, 178, 188, 180, 178, 178, 193, 177, 183, 179, 170, 183, 179, 184, 184, 174, 190, 191, 188, 180, 185, 183, 194, 183, 178, 180, 183, 171, 178, 184, 190, 185, 185, 173, 188, 185, 178, 173, 189, 194, 169, 179, 170, 183, 188, 173, 190, 182, 191, 176, 179, 192, 189, 183, 180, 178, 194, 178, 180, 185, 183, 184, 181, 184, 170, 183, 179, 179, 172, 178, 188, 187, 170, 178, 186, 180, 185, 175, 173, 175, 173, 167, 173, 181, 188, 180, 180, 184, 164, 170, 179, 179, 173, 178, 182, 187, 179, 175, 191, 180, 180, 183, 172, 187, 179, 184, 167, 182, 175, 193, 188, 189, 182, 165, 173, 181, 183, 180, 180, 183, 183, 183, 180, 173, 180, 190, 185, 183, 167, 191, 185, 185, 182, 178, 183, 183, 184, 189, 182, 186, 178, 187, 182, 185, 182, 191, 185, 185, 191, 173, 180, 168, 187, 182, 183, 183, 186, 174, 193, 188, 185, 199, 186, 174, 170, 189, 186, 176, 178, 188, 175, 178, 173, 177, 189, 178, 183, 176, 185, 198, 175, 183, 180, 194, 175, 181, 174, 183, 188, 185, 175, 174, 171, 175, 189, 182, 189, 177, 183, 185, 183, 178, 185, 177, 175, 172, 181, 170, 179, 170, 164, 166, 176, 176, 191, 169, 175, 184, 184, 168, 178, 179, 177, 185, 171, 179, 173, 182, 183, 193, 191, 189, 176, 185, 177, 172, 177, 188, 178, 185, 181, 175, 181, 183, 175, 177, 180, 181, 174, 182, 185, 173, 185, 173, 188, 189, 188, 173, 180, 182, 190, 180, 181, 174, 184, 182, 177, 182, 188, 175, 176, 184, 187, 193, 175, 185, 181, 186, 182, 180, 178, 182, 175, 184, 184, 182, 180, 182, 178, 183, 168, 183, 186, 191, 185, 177, 186, 172, 181, 176, 181, 185, 185, 182, 185, 177, 177, 180, 175, 188, 174, 177, 179, 171, 170, 185, 186, 168, 180, 185, 176, 182, 188, 180, 179, 194, 181, 181, 181, 188, 182, 177, 191, 176, 182, 183, 176, 184, 175, 196, 177, 175, 179, 187, 181, 175, 174, 178, 192, 178, 183, 182, 167, 187, 185, 179, 166, 180, 190, 176, 177, 171, 181, 187, 185, 176, 174, 179, 188, 178, 173, 188, 180, 178, 185, 177, 172, 178, 184, 193, 185, 187, 190, 188, 189, 177, 180, 175, 180, 178, 185, 194, 188, 182, 170, 176, 190, 168, 186, 172, 177, 176, 181, 185, 175, 180, 185, 186, 193, 178, 185, 189, 190, 185, 182, 191, 178, 187, 175, 193, 178, 182, 179, 178, 187, 174, 179, 191, 170, 178, 180, 193, 182, 176, 176, 176, 186, 187, 175, 187, 187, 176, 184, 173, 186, 190, 191, 187, 186, 196, 186, 175, 194, 184, 193, 192, 172, 179, 190, 183, 192, 182, 184, 183, 186, 172, 172, 175, 192, 187, 198, 178, 172, 190, 185, 182, 196, 185, 182, 183, 184, 188, 181, 175, 176, 175, 191, 190, 174, 184, 180, 181, 184, 177, 183, 174, 180, 175, 179, 179, 177, 177, 175, 175, 182, 188, 172, 181, 185, 176, 180, 180, 195, 178, 180, 183, 186, 185, 175, 181, 180, 186, 188, 189, 193, 190, 185, 189, 191, 187, 182, 192, 181, 170, 183, 176, 188, 191, 177, 172, 177, 188, 181, 178, 178, 168, 178, 182, 189, 174, 185, 185, 183, 186, 188, 182, 186, 174, 179, 187, 185, 177, 188, 192, 183, 172, 191, 184, 168, 186, 177, 180, 199, 189, 180, 189, 178, 172, 185, 180, 171, 190, 186, 185, 173, 178, 179, 182, 184, 182, 179, 196, 182, 185, 184, 180, 179, 178, 185, 178, 184, 173, 171, 172, 185, 184, 178, 180, 175, 185, 188, 196, 180, 173, 178, 175, 182, 188, 183, 185, 177, 183, 190, 184, 186, 175, 188, 188, 171, 183, 185, 196, 185, 170, 183, 183, 170, 173, 180, 180, 188, 185, 178, 173, 185, 185, 180, 188, 185, 177, 182, 185, 184, 177, 168, 183, 188, 188, 171, 188, 191, 186, 183, 184, 180, 177, 187, 178, 180, 179, 189, 192, 187, 186, 185, 193, 179, 185, 190, 182, 185, 180, 185, 191, 173, 191, 177, 183, 175, 198, 185, 173, 178, 180, 193, 178, 176, 175, 180, 182, 191, 175, 177, 184, 185, 185, 198, 180, 188, 176, 185, 193, 173, 173, 185, 191, 188, 178, 183, 191, 192, 178, 183, 192, 175, 180, 165, 180, 180, 178, 182, 181, 192, 186, 186, 170, 183, 186, 185, 178, 189, 189, 181, 175, 172, 187, 185, 175, 180, 178, 191, 180, 188, 193, 169, 180, 170, 185, 185, 188, 180, 175, 180, 183, 175, 177, 174, 182, 184, 180, 184, 180, 178, 183, 184, 193, 175, 174, 175, 188, 183, 185, 178, 188, 175, 172, 185, 186, 186, 182, 177, 185, 176, 175, 180, 172, 175, 182, 186, 176, 182, 175, 183, 180, 184, 190, 188, 186, 185, 172, 175, 172, 172, 182, 174, 188, 190, 194, 168, 185, 188, 183, 185, 185, 178, 171, 173, 180, 200, 178, 178, 164, 182, 186, 195, 191, 186, 185, 173, 180, 185, 177, 178, 180, 184, 186, 183, 186, 183, 174, 178, 181, 183, 185, 174, 184, 192, 181, 174, 186, 191, 180, 188, 188, 188, 182, 193, 193, 179, 183, 182, 182, 183, 184, 184, 185, 168, 175, 185, 173, 181, 184, 186, 191, 179, 181, 183, 181, 196, 184, 186, 184, 181, 188, 180, 186, 180, 183, 184, 189, 182, 185, 183, 186, 193, 188, 188, 188, 180, 193, 186, 185, 185, 183, 180, 198, 178, 178, 185, 180, 182, 182, 185, 173, 180, 185, 191, 175, 180, 174, 183, 183, 181, 190, 169, 170, 182, 172, 180, 182, 186, 183, 191, 185, 185, 178, 188, 187, 175, 180, 198, 190, 192, 183, 190, 181, 170, 189, 186, 188, 178, 186, 180, 175, 180, 163, 182, 177, 183, 177, 172, 173, 165, 172, 173, 177, 184, 183, 179, 174, 170, 192, 188, 191, 191, 185, 191, 175, 185, 185, 178, 165, 163, 180, 178, 180, 175, 179, 176, 183, 186, 180, 187, 171, 170, 177, 185, 176, 182, 176, 180, 170, 183, 183, 180, 192, 178, 178, 180, 180, 165, 168, 192, 178, 185, 179, 181, 193, 186, 175, 175, 191, 190, 175, 172, 176, 189, 184, 166, 180, 183, 193, 187, 175, 190, 184, 184, 177, 178, 176, 171, 183, 184, 176, 189, 180, 181, 170, 187, 185, 173, 183, 180, 172, 178, 183, 180, 180, 187, 178, 179, 187, 179, 181, 182, 182, 187, 180, 190, 178, 174, 190, 173, 185, 173, 189, 193, 184, 185, 171, 192, 177, 180, 174, 179, 180, 172, 196, 175, 185, 178, 175, 186, 178, 185, 188, 182, 188, 183, 189, 185, 193, 190, 177, 193, 184, 176, 181, 192, 185, 174, 193, 176, 185, 188, 179, 187, 192, 183, 188, 178, 185, 178, 169, 184, 193, 173, 185, 177, 178, 185, 186, 183, 182, 183, 178, 183, 165, 178, 177, 182, 180, 190, 179, 177, 184, 183, 183, 177, 179, 188, 186, 187, 175, 186, 182, 182, 189, 184, 176, 180, 172, 189, 174, 185, 190, 186, 177, 183, 180, 178, 191, 185, 178, 189, 189, 190, 185, 187, 185, 178, 176, 176, 173, 176, 188, 178, 193, 181, 197, 180, 186, 178, 184, 187, 184, 190, 185, 190, 187, 180, 184, 171, 196, 185, 176, 186, 193, 173, 178, 183, 168, 186, 184, 189, 177, 170, 189, 188, 176, 183, 178, 183, 173, 180, 181, 178, 179, 190, 177, 187, 174, 184, 179, 188, 190, 190, 176, 187, 173, 180, 168, 170, 188, 184, 180, 185, 176, 179, 180, 176, 185, 175, 170, 170, 180, 187, 172, 178, 182, 180, 181, 180, 180, 200, 186, 178, 186, 191, 176, 178, 183, 184, 175, 181, 165, 173, 171, 180, 178, 175, 185, 180, 177, 190, 178, 191, 185, 188, 173, 183, 184, 176, 177, 184, 178, 183, 180, 187, 182, 172, 166, 185, 185, 180, 197, 181, 188, 181, 178, 183, 176, 185, 178, 190, 178, 196, 188, 187, 183, 172, 183, 198, 186, 191, 184, 189, 178, 182, 182, 178, 180, 169, 177, 172, 175, 178, 187, 187, 185, 187, 173, 188, 176, 170, 185, 184, 173, 185, 180, 187, 180, 190, 180, 183, 176, 167, 171, 185, 175, 182, 186, 178, 172, 177, 175, 181, 185, 189, 182, 182, 182, 178, 185, 183, 188, 177, 178, 192, 182, 195, 183, 180, 177, 180, 178, 178, 182, 188, 182, 188, 188, 178, 178, 183, 175, 183, 179, 178, 191, 197, 180, 178, 188, 187, 185, 188, 187, 184, 183, 171, 184, 188, 185, 175, 191, 185, 183, 173, 180, 191, 183, 186, 180, 183, 193, 176, 185, 188, 188, 191, 185, 184, 176, 188, 187, 176, 193, 181, 177, 183, 184, 181, 185, 183, 192, 185, 175, 180, 183, 182, 173, 196, 180, 188, 185, 194, 172, 175, 178, 182, 193, 188, 178, 178, 178, 180, 189, 177, 186, 185, 183, 186, 176, 185, 183, 175, 178, 187, 190, 190, 184, 187, 173, 185, 173, 193, 188, 183, 185, 174, 183, 175, 180, 186, 180, 185, 178, 188, 178, 186, 188, 180, 183, 192, 185, 188, 180, 183, 185, 183, 188, 180, 174, 175, 178, 185, 180, 188, 180, 180, 185, 185, 173, 180, 183, 174, 186, 183, 180, 188, 176, 184, 180, 188, 176, 188, 173, 188, 180, 180, 178, 186, 187, 188, 176, 182, 189, 187, 184, 188, 180, 197, 178, 174, 180, 175, 170, 180, 183, 185, 180, 185, 179, 183, 185, 193, 188, 175, 190, 180, 170, 175, 185, 170, 187, 180, 179, 165, 184, 184, 183, 186, 174, 170, 180, 185, 172, 175, 175, 175, 173, 185, 173, 185, 188, 188, 185, 180, 173, 183, 181, 174, 187, 179, 194, 183, 170, 170, 173, 180, 187, 187, 187, 185, 185, 182, 170, 186, 178, 187, 180, 179, 178, 180, 180, 171, 188, 180, 186, 185, 178, 188, 187, 180, 175, 170, 183, 179, 186, 191, 172, 193, 191, 186, 175, 187, 182, 181, 169, 188, 186, 183, 183, 180, 184, 183, 171, 183, 183, 174, 191, 193, 183, 178, 167, 178, 183, 173, 180, 163, 188, 181, 188, 188, 188, 184, 191, 178, 175, 193, 185, 165, 175, 183, 191, 183, 185, 183, 185, 180, 178, 180, 174, 180, 180, 191, 178, 185, 183, 178, 178, 183, 188, 183, 183, 180, 168, 183, 183, 191, 183, 185, 182, 185, 173, 188, 178, 175, 188, 190, 182, 174, 175, 176, 188, 183, 185, 180, 182, 194, 175, 185, 176, 180, 192, 184, 183, 173, 189, 190, 187, 179, 171, 185, 178, 189, 175, 181, 196, 176, 177, 184, 183, 184, 187, 188, 183, 183, 175, 196, 188, 183, 185, 192, 191, 183, 185, 177, 174, 176, 182, 183, 181, 177, 176, 187, 180, 182, 168, 180, 183, 173, 185, 178, 172, 178, 183, 180, 174, 185, 183, 174, 186, 183, 184, 178, 184, 188, 180, 162, 183, 183, 170, 177, 190, 175, 183, 179, 175, 188, 176, 180, 188, 180, 190, 180, 175, 191, 196, 185, 175, 167, 186, 167, 185, 186, 186, 168, 165, 179, 170, 189, 175, 184, 169, 186, 182, 175, 186, 172, 181, 177, 186, 176, 193, 175, 189, 180, 170, 184, 169, 178, 173, 186, 192, 173, 184, 185, 188, 180, 175, 190, 175, 181, 166, 191, 174, 180, 185, 193, 180, 183, 176, 180, 178, 193, 185, 175, 185, 190, 185, 188, 185, 188, 182, 176, 193, 180, 182, 183, 184, 185, 187, 185, 172, 188, 180, 174, 176, 181, 180, 179, 171, 184, 187, 193, 193, 187, 183, 180, 184, 202, 182, 176, 175, 176, 180, 180, 185, 177, 185, 167, 178, 184, 183, 181, 190, 184, 180, 180, 183, 178, 176, 187, 171, 185, 189, 193, 184, 174, 187, 192, 180, 178, 175, 188, 175, 177, 188, 185, 180, 192, 182, 178, 185, 173, 180, 178, 170, 193, 178, 176, 181, 178, 180, 178, 178, 188, 178, 183, 188, 175, 180, 188, 189, 195, 176, 178, 173, 182, 187, 183, 176, 187, 191, 180, 185, 189, 180, 186, 182, 188, 191, 195, 186, 191, 186, 177, 179, 185, 179, 192, 180, 186, 171, 178, 178, 181, 175, 182, 185, 190, 183, 193, 182, 178, 179, 172, 185, 176, 183, 175, 185, 184, 176, 180, 186, 185, 172, 186, 173, 184, 191, 196, 188, 188, 182, 186, 184, 176, 185, 178, 184, 181, 180, 180, 174, 183, 182, 173, 175, 178, 185, 175, 190, 180, 188, 178, 182, 175, 170, 181, 186, 170, 169, 177, 180, 183, 178, 177, 172, 175, 189, 180, 182, 179, 178, 188, 197, 168, 180, 187, 173, 180, 178, 175, 183, 198, 191, 191, 169, 179, 173, 178, 174, 182, 176, 186, 178, 175, 174, 180, 185, 185, 177, 183, 187, 185, 183, 185, 178, 188, 189, 191, 178, 178, 185, 193, 178, 180, 175, 178, 183, 172, 188, 183, 183, 185, 173, 191, 183, 174, 180, 178, 185, 185, 184, 184, 198, 178, 175, 180, 180, 175, 178, 183, 186, 185, 180, 178, 179, 183, 194, 171, 183, 181, 192, 191, 176, 178, 183, 172, 174, 185, 176, 188, 193, 175, 185, 180, 193, 191, 173, 175, 175, 181, 184, 176, 175, 185, 173, 193, 180, 180, 185, 185, 191, 180, 178, 178, 183, 174, 180, 185, 175, 196, 188, 186, 180, 176, 188, 175, 185, 185, 178, 191, 185, 178, 178, 183, 175, 175, 185, 186, 181, 185, 191, 186, 176, 178, 183, 171, 172, 190, 183, 184, 175, 185, 182, 188, 183, 187, 188, 181, 178, 174, 172, 178, 173, 185, 187, 188, 174, 179, 185, 185, 175, 183, 178, 161, 172, 179, 187, 177, 184, 185, 168, 180, 178, 185, 179, 172, 185, 190, 184, 174, 185, 193, 185, 175, 176, 173, 175, 181, 178, 185, 183, 170, 187, 182, 182, 185, 184, 189, 188, 178, 196, 186, 183, 179, 169, 181, 186, 187, 158, 188, 180, 174, 178, 185, 178, 191, 180, 180, 173, 173, 173, 175, 173, 173, 171, 169, 177, 178, 190, 181, 182, 180, 180, 190, 189, 181, 177, 183, 191, 181, 180, 185, 170, 185, 178, 187, 179, 172, 185, 183, 170, 187, 175, 193, 192, 184, 188, 183, 183, 178, 178, 173, 186, 169, 188, 191, 198, 190, 178, 183, 178, 183, 179, 183, 187, 181, 178, 181, 180, 178, 174, 167, 180, 170, 183, 177, 178, 187, 176, 186, 177, 191, 178, 175, 169, 188, 168, 180, 179, 182, 180, 181, 171, 178, 176, 186, 178, 180, 178, 191, 186, 183, 179, 201, 188, 178, 176, 190, 177, 181, 180, 188, 188, 186, 188, 189, 184, 188, 177, 176, 182, 188, 178, 170, 185, 190, 190, 187, 183, 176, 176, 181, 185, 173, 184, 176, 180, 177, 184, 179, 182, 183, 181, 185, 190, 181, 172, 196, 184, 190, 178, 183, 183, 190, 185, 180, 183, 181, 188, 185, 180, 170, 188, 186, 178, 180, 175, 182, 176, 189, 183, 174, 182, 192, 188, 180, 189, 193, 188, 188, 185, 173, 188, 183, 187, 180, 188, 179, 173, 183, 178, 173, 190, 170, 181, 186, 180, 178, 178, 183, 180, 175, 183, 180, 181, 181, 180, 187, 185, 188, 184, 183, 179, 177, 184, 180, 184, 188, 170, 178, 175, 188, 175, 183, 175, 192, 186, 185, 192, 193, 182, 175, 165, 188, 182, 165, 172, 172, 185, 178, 183, 180, 187, 183, 193, 191, 182, 191, 181, 180, 176, 187, 167, 178, 186, 185, 188, 182, 178, 175, 170, 170, 178, 184, 168, 183, 187, 183, 188, 175, 180, 175, 183, 184, 180, 188, 180, 188, 183, 178, 193, 180, 186, 192, 180, 180, 175, 194, 170, 173, 178, 183, 185, 191, 176, 180, 185, 185, 193, 187, 177, 176, 180, 184, 178, 184, 176, 172, 178, 175, 170, 175, 187, 171, 175, 181, 180, 178, 178, 171, 185, 180, 188, 170, 184, 180, 175, 183, 178, 181, 172, 181, 174, 173, 182, 175, 196, 187, 185, 178, 173, 185, 178, 188, 192, 179, 177, 177, 185, 186, 188, 186, 182, 169, 176, 188, 189, 175, 186, 173, 174, 176, 180, 179, 178, 188, 172, 175, 190, 185, 188, 186, 183, 180, 190, 185, 185, 175, 184, 175, 178, 188, 178, 195, 192, 184, 184, 181, 185, 177, 178, 188, 173, 180, 183, 183, 183, 178, 188, 180, 185, 186, 175, 183, 192, 190, 188, 179, 185, 190, 171, 182, 175, 180, 185, 180, 180, 185, 177, 168, 168, 190, 175, 188, 182, 178, 183, 183, 173, 187, 182, 173, 186, 185, 188, 178, 178, 176, 180, 181, 185, 166, 189, 182, 179, 184, 173, 174, 178, 185, 182, 169, 183, 192, 180, 179, 180, 183, 181, 168, 185, 182, 188, 172, 183, 191, 180, 176, 173, 181, 183, 181, 179, 194, 172, 174, 173, 183, 181, 185, 181, 168, 181, 180, 193, 188, 172, 187, 180, 191, 175, 182, 172, 186, 186, 184, 174, 189, 172, 185, 185, 181, 185, 173, 185, 190, 191, 180, 179, 193, 169, 185, 188, 180, 178, 170, 183, 172, 174, 175, 187, 178, 189, 194, 170, 188, 179, 194, 187, 183, 183, 191, 170, 183, 173, 175, 185, 178, 180, 189, 168, 172, 184, 192, 174, 184, 177, 176, 179, 187, 182, 188, 184, 189, 168, 183, 178, 180, 180, 176, 174, 189, 179, 183, 186, 183, 173, 175, 183, 173, 187, 171, 178, 190, 183, 175, 191, 180, 178, 190, 167, 171, 181, 184, 173, 185, 182, 185, 175, 173, 184, 166, 181, 192, 174, 178, 178, 189, 184, 193, 183, 186, 191, 180, 183, 180, 189, 184, 185, 172, 183, 180, 185, 176, 170, 188, 187, 184, 184, 183, 185, 190, 182, 186, 190, 180, 182, 180, 183, 185, 191, 189, 178, 188, 180, 183, 173, 174, 173, 169, 178, 173, 185, 180, 186, 190, 194, 178, 193, 179, 185, 178, 184, 188, 175, 166, 179, 178, 175, 190, 183, 174, 172, 172, 187, 172, 180, 182, 193, 199, 192, 192, 167, 184, 185, 190, 184, 183, 189, 183, 183, 182, 168, 173, 184, 168, 183, 183, 179, 187, 180, 189, 185, 178, 176, 179, 182, 178, 188, 187, 182, 183, 191, 179, 190, 169, 186, 172, 186, 186, 185, 192, 186, 193, 174, 184, 187, 180, 180, 182, 172, 176, 183, 185, 179, 176, 182, 187, 184, 188, 184, 181, 190, 185, 180, 182, 183, 184, 190, 186, 176, 182, 182, 170, 186, 168, 178, 183, 198, 189, 182, 192, 165, 179, 190, 178, 170, 177, 171, 186, 183, 185, 186, 185, 187, 183, 190, 184, 181, 182, 185, 183, 184, 182, 188, 185, 184, 192, 191, 183, 173, 163, 183, 170, 180, 186, 189, 176, 183, 174, 183, 178, 175, 175, 183, 175, 178, 184, 192, 183, 170, 186, 178, 186, 180, 178, 190, 180, 180, 191, 176, 180, 170, 181, 180, 189, 188, 180, 196, 202, 195, 180, 187, 190, 178, 178, 191, 186, 175, 180, 184, 185, 186, 174, 172, 176, 191, 178, 183, 178, 184, 168, 192, 177, 177, 184, 175, 180, 179, 182, 184, 173, 180, 180, 178, 174, 186, 184, 188, 181, 173, 183, 175, 192, 183, 183, 183, 183, 196, 172, 191, 192, 170, 178, 187, 188, 185, 176, 184, 189, 180, 194, 177, 168, 184, 174, 188, 180, 184, 184, 188, 180, 185, 180, 177, 170, 194, 202, 176, 180, 170, 175, 170, 175, 188, 174, 173, 186, 178, 185, 180, 180, 174, 186, 183, 183, 177, 183, 183, 180, 180, 172, 189, 180, 178, 180, 180, 183, 187, 182, 188, 193, 183, 179, 178, 180, 179, 182, 183, 178, 176, 170, 188, 178, 185, 180, 188, 185, 192, 183, 193, 181, 175, 185, 178, 194, 187, 178, 188, 170, 170, 180, 184, 185, 175, 180, 186, 189, 195, 188, 168, 183, 193, 183, 185, 188, 183, 186, 186, 174, 175, 180, 184, 175, 175, 175, 184, 170, 180, 176, 187, 193, 184, 183, 189, 191, 178, 185, 180, 180, 191, 183, 178, 193, 178, 184, 179, 173, 188, 180, 178, 187, 179, 187, 178, 183, 175, 187, 171, 188, 171, 183, 187, 188, 176, 169, 174, 191, 177, 168, 184, 183, 191, 191, 179, 170, 177, 191, 180, 186, 196, 171, 178, 185, 186, 180, 181, 187, 179, 175, 172, 188, 191, 197, 193, 165, 186, 195, 186, 181, 186, 185, 182, 175, 180, 174, 180, 180, 185, 185, 173, 178, 174, 193, 181, 172, 193, 187, 186, 168, 178, 183, 178, 169, 182, 176, 174, 179, 181, 179, 183, 188, 185, 193, 185, 181, 185, 183, 183, 175, 181, 172, 181, 178, 172, 184, 188, 186, 175, 178, 160, 184, 174, 178, 191, 176, 188, 171, 177, 181, 189, 175, 181, 183, 174, 186, 187, 181, 188, 187, 186, 173, 177, 187, 179, 188, 170, 178, 185, 175, 191, 185, 183, 173, 175, 182, 184, 185, 180, 183, 188, 171, 176, 180, 186, 178, 188, 186, 186, 193, 185, 181, 178, 183, 177, 183, 183, 176, 180, 183, 185, 172, 186, 177, 188, 168, 190, 188, 176, 195, 178, 181, 179, 187, 180, 179, 182, 184, 187, 180, 170, 195, 181, 178, 190, 169, 173, 181, 191, 193, 187, 183, 191, 188, 175, 192, 181, 183, 180, 185, 182, 185, 188, 184, 182, 191, 183, 190, 194, 177, 182, 184, 181, 175, 180, 178, 184, 175, 180, 181, 170, 183, 189, 176, 183, 174, 186, 194, 184, 181, 187, 181, 180, 181, 184, 191, 180, 175, 185, 168, 176, 180, 173, 176, 179, 182, 173, 181, 188, 186, 174, 183, 175, 183, 173, 181, 189, 188, 190, 174, 174, 186, 180, 180, 188, 175, 185, 190, 183, 183, 173, 180, 188, 183, 193, 178, 177, 187, 179, 184, 187, 180, 182, 191, 180, 176, 175, 170, 190, 184, 188, 184, 187, 175, 185, 173, 183, 187, 194, 180, 183, 175, 186, 184, 180, 183, 181, 173, 183, 190, 190, 182, 188, 173, 183, 190, 173, 183, 180, 184, 188, 188, 187, 183, 184, 188, 192, 178, 190, 172, 180, 176, 186, 174, 190, 183, 186, 184, 182, 180, 173, 182, 184, 178, 188, 182, 178, 184, 193, 186, 186, 191, 180, 188, 182, 191, 189, 184, 193, 177, 177, 183, 186, 173, 185, 171, 168, 184, 170, 175, 180, 173, 170, 188, 185, 190, 179, 193, 178, 182, 180, 190, 189, 183, 181, 186, 188, 189, 188, 187, 193, 191, 186, 168, 183, 182, 192, 193, 188, 191, 180, 188, 186, 176, 184, 182, 192, 184, 180, 175, 184, 173, 177, 182, 187, 192, 185, 170, 180, 171, 174, 183, 186, 188, 182, 190, 186, 180, 190, 175, 185, 181, 172, 189, 165, 173, 170, 189, 183, 180, 174, 173, 170, 182, 181, 160, 176, 178, 163, 179, 174, 191, 176, 171, 180, 173, 190, 193, 186, 183, 181, 178, 167, 179, 178, 180, 183, 182, 171, 188, 175, 182, 180, 183, 191, 183, 188, 172, 176, 180, 194, 196, 170, 186, 175, 186, 180, 192, 169, 179, 183, 175, 183, 173, 190, 191, 180, 174, 185, 184, 186, 173, 188, 192, 176, 181, 197, 169, 174, 171, 178, 175, 174, 188, 181, 180, 175, 193, 186, 184, 175, 180, 171, 188, 180, 178, 171, 192, 194, 180, 183, 175, 180, 183, 185, 176, 185, 170, 185, 186, 183, 190, 178, 183, 179, 174, 179, 182, 183, 183, 187, 181, 164, 178, 190, 183, 191, 172, 188, 190, 183, 180, 186, 186, 183, 178, 170, 179, 175, 193, 183, 183, 175, 186, 178, 182, 183, 184, 170, 183, 182, 193, 188, 184, 187, 182, 178, 178, 183, 183, 183, 188, 194, 182, 174, 185, 175, 185, 193, 182, 187, 180, 175, 182, 187, 168, 173, 178, 191, 168, 180, 172, 178, 178, 178, 176, 183, 190, 187, 183, 185, 193, 178, 188, 170, 185, 187, 175, 175, 184, 176, 183, 185, 187, 174, 175, 190, 173, 187, 186, 178, 189, 178, 182, 178, 182, 191, 197, 176, 168, 180, 173, 183, 177, 184, 180, 186, 191, 180, 194, 182, 180, 182, 177, 178, 187, 184, 190, 185, 175, 175, 178, 184, 188, 184, 180, 187, 186, 193, 186, 195, 184, 191, 183, 168, 178, 184, 170, 187, 180, 187, 190, 173, 181, 185, 183, 188, 189, 181, 184, 178, 187, 187, 184, 173, 186, 168, 184, 181, 175, 185, 175, 208, 191, 176, 178, 192, 174, 181, 192, 176, 193, 185, 182, 179, 185, 178, 183, 180, 188, 180, 183, 184, 191, 171, 183, 178, 178, 177, 183, 178, 174, 175, 178, 185, 175, 172, 185, 185, 188, 180, 195, 180, 194, 180, 170, 183, 188, 175, 194, 180, 173, 175, 179, 184, 183, 185, 187, 182, 189, 190, 174, 170, 179, 174, 191, 179, 173, 172, 188, 188, 198, 172, 175, 185, 185, 173, 183, 188, 194, 183, 176, 193, 175, 187, 182, 185, 176, 178, 191, 185, 178, 185, 191, 185, 181, 178, 180, 182, 183, 177, 185, 175, 175, 185, 185, 183, 191, 184, 187, 180, 175, 180, 179, 167, 180, 180, 182, 188, 179, 178, 192, 185, 178, 183, 180, 182, 178, 188, 179, 185, 186, 186, 174, 179, 180, 179, 170, 186, 186, 189, 191, 182, 196, 185, 175, 178, 188, 180, 170, 188, 191, 179, 175, 185, 196, 181, 189, 185, 186, 178, 185, 185, 183, 193, 185, 178, 177, 174, 188, 193, 183, 183, 180, 186, 180, 185, 183, 168, 187, 191, 172, 178, 185, 185, 193, 175, 191, 165, 179, 169, 166, 180, 178, 188, 173, 179, 192, 178, 170, 176, 180, 180, 191, 185, 186, 180, 172, 170, 185, 187, 184, 190, 180, 180, 183, 174, 177, 174, 171, 186, 183, 178, 185, 185, 180, 182, 183, 184, 187, 169, 180, 175, 178, 178, 170, 193, 183, 176, 185, 188, 182, 177, 183, 191, 185, 183, 189, 177, 183, 194, 176, 171, 179, 186, 188, 165, 181, 186, 180, 183, 185, 184, 185, 180, 174, 173, 194, 182, 176, 185, 177, 176, 183, 187, 183, 184, 183, 190, 190, 181, 182, 181, 171, 183, 177, 178, 180, 180, 172, 176, 178, 179, 194, 191, 175, 188, 186, 183, 184, 186, 188, 193, 173, 181, 180, 178, 173, 183, 168, 182, 190, 188, 180, 177, 182, 180, 195, 196, 176, 182, 196, 174, 173, 182, 176, 175, 186, 180, 173, 180, 190, 176, 182, 182, 180, 162, 174, 192, 180, 183, 170, 185, 180, 187, 181, 188, 172, 172, 179, 182, 180, 181, 175, 169, 199, 180, 173, 181, 192, 177, 178, 173, 185, 179, 196, 176, 185, 187, 184, 179, 183, 188, 192, 188, 190, 185, 180, 180, 187, 182, 194, 177, 180, 183, 168, 186, 173, 197, 182, 179, 183, 194, 176, 181, 165, 186, 180, 186, 178, 187, 171, 180, 178, 177, 183, 179, 192, 189, 180, 190, 180, 168, 183, 185, 186, 183, 178, 185, 185, 180, 183, 182, 185, 183, 178, 184, 183, 181, 168, 185, 190, 165, 188, 185, 177, 192, 181, 182, 185, 190, 180, 185, 180, 185, 182, 185, 188, 182, 183, 191, 175, 172, 183, 193, 178, 183, 186, 186, 176, 187, 181, 179, 183, 179, 179, 186, 178, 183, 184, 176, 181, 185, 178, 178, 180, 188, 190, 182, 197, 172, 189, 178, 186, 192, 186, 180, 184, 185, 186, 186, 178, 190, 202, 183, 174, 166, 176, 178, 186, 189, 180, 176, 168, 175, 174, 196, 185, 190, 182, 188, 178, 173, 190, 178, 180, 180, 188, 174, 180, 188, 192, 180, 188, 176, 193, 180, 183, 187, 184, 170, 190, 173, 183, 175, 187, 182, 185, 178, 188, 170, 183, 177, 190, 173, 179, 169, 183, 191, 180, 183, 195, 178, 182, 185, 174, 173, 183, 193, 189, 171, 189, 187, 186, 179, 180, 181, 174, 183, 188, 178, 177, 183, 190, 180, 180, 175, 178, 183, 193, 170, 171, 192, 196, 179, 172, 180, 170, 186, 188, 176, 184, 192, 181, 191, 183, 189, 188, 180, 186, 177, 186, 172, 183, 185, 178, 173, 187, 180, 177, 173, 172, 185, 177, 172, 175, 187, 172, 188, 174, 177, 173, 176, 189, 167, 175, 169, 174, 178, 172, 176, 189, 180, 182, 177, 170, 173, 187, 178, 181, 187, 190, 186, 187, 187, 169, 185, 196, 188, 180, 186, 195, 181, 186, 180, 170, 183, 180, 193, 181, 189, 189, 184, 184, 179, 176, 172, 172, 180, 177, 176, 178, 190, 183, 183, 177, 188, 190, 186, 196, 186, 187, 192, 186, 180, 179, 175, 186, 176, 185, 185, 185, 181, 184, 180, 195, 183, 179, 186, 188, 188, 183, 188, 183, 175, 180, 175, 181, 181, 193, 175, 185, 175, 180, 177, 178, 172, 179, 174, 180, 176, 170, 192, 176, 177, 185, 180, 189, 188, 188, 183, 179, 189, 187, 179, 181, 180, 183, 201, 178, 180, 184, 175, 176, 198, 190, 179, 181, 177, 178, 185, 187, 185, 180, 171, 188, 177, 176, 184, 185, 191, 192, 175, 185, 172, 183, 172, 173, 182, 180, 189, 185, 183, 185, 192, 188, 183, 184, 173, 177, 176, 174, 178, 183, 192, 174, 191, 173, 173, 180, 174, 174, 176, 188, 188, 188, 173, 185, 180, 191, 193, 185, 186, 182, 177, 178, 178, 178, 181, 188, 175, 177, 186, 180, 178, 170, 186, 191, 174, 177, 183, 182, 183, 185, 180, 185, 175, 172, 184, 177, 187, 181, 167, 182, 182, 190, 187, 185, 183, 178, 187, 178, 188, 196, 175, 183, 175, 175, 173, 180, 180, 185, 191, 179, 176, 182, 180, 175, 180, 180, 180, 181, 179, 182, 178, 183, 173, 180, 180, 180, 190, 185, 197, 174, 187, 171, 186, 183, 183, 176, 183, 186, 180, 177, 173, 185, 177, 175, 180, 193, 179, 178, 180, 177, 183, 193, 192, 180, 175, 195, 184, 180, 181, 183, 189, 176, 190, 187, 180, 188, 185, 183, 178, 180, 188, 179, 188, 181, 198, 191, 193, 180, 180, 173, 186, 193, 173, 180, 170, 188, 180, 177, 186, 176, 178, 175, 190, 188, 180, 173, 188, 179, 185, 187, 173, 180, 171, 173, 176, 174, 183, 178, 179, 186, 184, 175, 184, 174, 188, 185, 184, 186, 191, 185, 178, 182, 186, 185, 185, 178, 193, 183, 182, 185, 185, 196, 180, 178, 191, 187, 177, 170, 190, 181, 188, 194, 180, 175, 181, 188, 178, 192, 178, 185, 190, 183, 172, 181, 192, 190, 182, 185, 188, 181, 185, 168, 180, 176, 180, 174, 178, 179, 187, 183, 180, 184, 173, 183, 177, 172, 171, 186, 190, 187, 191, 187, 189, 177, 182, 187, 178, 184, 173, 188, 184, 175, 170, 186, 184, 189, 195, 182, 175, 175, 186, 174, 178, 174, 196, 192, 176, 182, 182, 194, 175, 175, 182, 184, 177, 178, 177, 182, 175, 185, 170, 185, 173, 188, 185, 173, 179, 177, 183, 178, 182, 185, 197, 191, 173, 171, 183, 181, 180, 181, 178, 189, 180, 172, 184, 188, 173, 183, 174, 190, 187, 182, 178, 174, 165, 187, 176, 176, 183, 188, 175, 183, 182, 186, 180, 183, 192, 185, 168, 184, 174, 176, 184, 186, 193, 185, 180, 174, 191, 190, 189, 190, 183, 177, 183, 183, 186, 180, 185, 185, 170, 176, 186, 175, 191, 173, 173, 176, 185, 176, 175, 183, 175, 189, 184, 181, 183, 175, 184, 190, 179, 178, 192, 184, 173, 180, 188, 188, 190, 179, 177, 190, 182, 203, 190, 183, 180, 189, 194, 180, 184, 185, 180, 187, 194, 173, 187, 173, 180, 185, 190, 179, 178, 194, 186, 180, 186, 176, 195, 182, 170, 163, 175, 178, 176, 181, 178, 178, 180, 185, 179, 192, 190, 177, 185, 175, 178, 176, 175, 172, 187, 190, 167, 193, 183, 173, 183, 175, 196, 180, 172, 187, 182, 180, 175, 171, 190, 180, 184, 177, 191, 186, 183, 185, 181, 192, 176, 166, 187, 180, 174, 181, 194, 176, 184, 187, 183, 183, 184, 180, 191, 178, 172, 174, 185, 178, 185, 172, 181, 183, 170, 175, 189, 191, 180, 176, 177, 184, 173, 178, 175, 194, 196, 184, 180, 181, 188, 180, 187, 175, 176, 179, 189, 177, 181, 177, 179, 193, 196, 187, 183, 179, 183, 182, 173, 188, 188, 175, 191, 185, 186, 187, 174, 188, 184, 182, 193, 175, 191, 185, 183, 185, 192, 177, 181, 182, 189, 184, 183, 169, 173, 197, 182, 178, 181, 185, 185, 173, 175, 181, 178, 179, 170, 180, 182, 169, 185, 185, 173, 174, 186, 178, 190, 178, 194, 180, 180, 189, 172, 171, 173, 186, 178, 178, 190, 175, 178, 179, 185, 191, 172, 179, 178, 172, 184, 183, 178, 178, 178, 186, 178, 185, 188, 186, 187, 188, 181, 193, 184, 187, 181, 181, 174, 175, 178, 178, 193, 173, 188, 176, 178, 173, 178, 185, 178, 178, 175, 180, 169, 192, 181, 176, 193, 185, 176, 185, 176, 180, 179, 187, 184, 178, 170, 175, 178, 178, 185, 180, 175, 185, 176, 175, 179, 177, 175, 180, 185, 191, 181, 171, 188, 188, 196, 187, 185, 192, 169, 190, 196, 179, 182, 180, 192, 186, 180, 191, 179, 169, 167, 183, 175, 180, 193, 191, 187, 190, 180, 191, 188, 181, 177, 173, 170, 184, 185, 175, 194, 180, 174, 180, 190, 191, 170, 182, 185, 174, 191, 181, 180, 188, 183, 183, 183, 167, 191, 170, 191, 191, 180, 181, 165, 176, 191, 191, 170, 190, 185, 183, 186, 176, 181, 188, 170, 178, 178, 188, 188, 183, 175, 175, 187, 191, 173, 184, 183, 191, 194, 184, 176, 180, 175, 181, 182, 178, 170, 183, 177, 191, 191, 176, 177, 178, 181, 183, 173, 188, 173, 180, 191, 175, 185, 175, 188, 193, 176, 186, 178, 185, 183, 194, 183, 184, 188, 188, 180, 188, 182, 188, 185, 168, 176, 196, 178, 185, 185, 172, 183, 188, 181, 184, 188, 183, 171, 184, 183, 193, 188, 168, 183, 181, 178, 182, 178, 180, 188, 191, 168, 185, 190, 175, 181, 182, 180, 177, 181, 163, 183, 182, 180, 178, 178, 178, 171, 182, 180, 193, 180, 178, 177, 175, 183, 193, 188, 180, 185, 190, 193, 190, 166, 182, 178, 167, 176, 173, 175, 185, 177, 178, 186, 173, 180, 183, 191, 187, 183, 180, 183, 181, 177, 177, 190, 180, 185, 186, 187, 196, 189, 173, 185, 183, 175, 188, 193, 180, 185, 170, 178, 182, 188, 175, 172, 183, 175, 181, 178, 182, 183, 175, 187, 170, 188, 178, 182, 188, 167, 188, 185, 187, 184, 183, 180, 191, 183, 175, 182, 191, 178, 172, 185, 180, 190, 191, 180, 175, 173, 178, 185, 188, 179, 177, 185, 193, 180, 183, 185, 188, 190, 187, 179, 189, 178, 182, 189, 184, 186, 178, 192, 184, 175, 187, 185, 180, 177, 185, 183, 185, 175, 187, 188, 184, 183, 179, 182, 193, 189, 183, 180, 194, 187, 165, 177, 180, 190, 174, 178, 178, 185, 185, 194, 185, 191, 193, 173, 185, 180, 178, 185, 188, 186, 180, 180, 180, 180, 190, 179, 179, 192, 197, 183, 168, 180, 180, 188, 179, 178, 180, 183, 178, 185, 177, 185, 180, 179, 190, 180, 183, 192, 189, 178, 178, 174, 170, 176, 170, 195, 178, 187, 178, 183, 167, 178, 178, 170, 180, 180, 180, 185, 175, 183, 180, 173, 185, 180, 175, 185, 178, 175, 184, 184, 179, 174, 189, 176, 184, 180, 182, 188, 180, 173, 186, 179, 188, 184, 180, 178, 178, 181, 185, 182, 180, 180, 188, 172, 183, 191, 181, 178, 191, 191, 178, 173, 188, 179, 180, 193, 179, 185, 178, 179, 183, 168, 183, 184, 180, 203, 173, 180, 170, 193, 175, 175, 178, 191, 174, 188, 167, 173, 183, 184, 183, 187, 177, 176, 184, 185, 184, 180, 181, 193, 175, 177, 184, 185, 189, 179, 182, 173, 165, 184, 191, 175, 185, 187, 170, 184, 178, 174, 179, 178, 181, 194, 179, 185, 180, 178, 184, 178, 188, 176, 191, 174, 180, 180, 182, 180, 192, 188, 172, 168, 177, 165, 194, 174, 181, 177, 174, 180, 177, 191, 179, 194, 185, 176, 194, 187, 187, 173, 188, 178, 185, 180, 177, 176, 185, 182, 182, 185, 169, 179, 179, 182, 176, 172, 171, 190, 175, 178, 190, 180, 191, 179, 187, 189, 180, 188, 178, 187, 181, 189, 185, 178, 190, 189, 175, 185, 176, 180, 185, 186, 188, 198, 182, 192, 182, 190, 182, 189, 191, 183, 192, 181, 180, 178, 181, 191, 182, 178, 173, 192, 188, 180, 175, 187, 187, 182, 185, 182, 185, 180, 189, 177, 182, 172, 184, 174, 184, 168, 175, 180, 183, 175, 174, 185, 185, 195, 178, 180, 180, 182, 179, 172, 187, 181, 165, 177, 198, 180, 179, 174, 180, 183, 191, 196, 182, 180, 185, 181, 178, 176, 179, 191, 188, 176, 175, 178, 190, 192, 185, 180, 174, 175, 175, 178, 181, 193, 173, 185, 170, 190, 189, 182, 180, 180, 175, 180, 179, 185, 182, 179, 182, 185, 174, 177, 173, 186, 182, 191, 171, 183, 192, 185, 184, 180, 183, 178, 170, 183, 185, 190, 179, 191, 173, 182, 191, 171, 186, 173, 175, 183, 187, 180, 178, 197, 187, 171, 179, 186, 178, 180, 175, 177, 176, 183, 183, 173, 180, 170, 178, 170, 176, 189, 185, 180, 180, 178, 176, 175, 180, 183, 170, 178, 180, 184, 181, 178, 180, 177, 178, 178, 176, 179, 188, 184, 185, 179, 190, 170, 190, 187, 183, 185, 185, 187, 177, 185, 175, 179, 181, 180, 176, 162, 179, 193, 185, 176, 178, 183, 177, 182, 177, 182, 174, 175, 180, 184, 178, 188, 180, 183, 185, 182, 177, 176, 183, 187, 197, 187, 180, 182, 182, 182, 186, 185, 172, 174, 181, 186, 189, 175, 178, 193, 180, 174, 182, 180, 193, 180, 181, 173, 188, 187, 174, 171, 180, 178, 180, 179, 191, 183, 185, 178, 183, 178, 191, 181, 184, 172, 187, 180, 180, 190, 184, 188, 188, 188, 183, 179, 182, 176, 178, 173, 185, 185, 189, 178, 188, 178, 176, 181, 167, 180, 185, 180, 183, 189, 194, 185, 190, 185, 179, 178, 173, 177, 180, 178, 175, 183, 170, 179, 186, 188, 186, 175, 179, 177, 186, 176, 173, 183, 178, 178, 186, 180, 185, 173, 171, 172, 188, 183, 182, 187, 176, 173, 185, 180, 183, 170, 180, 193, 175, 174, 177, 188, 180, 182, 186, 187, 186, 180, 184, 190, 165, 193, 189, 180, 175, 188, 180, 169, 183, 173, 183, 179, 189, 182, 186, 179, 182, 179, 181, 182, 181, 175, 190, 184, 183, 183, 186, 186, 173, 180, 188, 175, 181, 179, 183, 186, 173, 168, 184, 183, 178, 193, 178, 177, 189, 178, 176, 182, 174, 180, 179, 180, 183, 173, 183, 185, 185, 188, 188, 173, 160, 174, 182, 178, 183, 178, 183, 193, 188, 175, 196, 175, 185, 185, 183, 185, 183, 180, 165, 175, 183, 183, 183, 178, 180, 178, 173, 179, 186, 187, 185, 175, 173, 175, 191, 188, 195, 191, 185, 187, 178, 176, 184, 185, 177, 175, 170, 175, 181, 194, 186, 175, 174, 176, 188, 174, 173, 191, 182, 176, 173, 190, 181, 173, 188, 170, 194, 178, 177, 181, 186, 180, 191, 179, 178, 180, 178, 175, 181, 170, 182, 184, 182, 180, 175, 178, 168, 175, 188, 183, 184, 188, 185, 185, 190, 172, 183, 168, 180, 185, 173, 184, 172, 164, 185, 183, 191, 181, 175, 191, 178, 180, 180, 180, 175, 182, 185, 191, 175, 185, 183, 176, 173, 182, 183, 179, 192, 183, 182, 199, 188, 185, 196, 185, 178, 183, 188, 187, 183, 184, 184, 185, 188, 177, 180, 194, 187, 175, 191, 186, 178, 178, 191, 185, 180, 173, 179, 170, 183, 196, 180, 193, 188, 178, 173, 178, 179, 181, 178, 179, 176, 181, 194, 177, 184, 184, 179, 178, 180, 185, 178, 188, 174, 187, 177, 179, 170, 181, 186, 176, 180, 177, 190, 185, 180, 179, 189, 185, 175, 182, 184, 180, 182, 180, 187, 184, 187, 177, 194, 186, 176, 179, 184, 170, 185, 175, 177, 178, 182, 174, 180, 188, 175, 189, 180, 188, 183, 191, 172, 183, 189, 179, 188, 171, 180, 193, 183, 176, 182, 170, 183, 182, 178, 183, 179, 179, 178, 178, 188, 186, 175, 193, 178, 180, 176, 180, 186, 170, 169, 180, 179, 178, 180, 186, 178, 180, 178, 185, 178, 188, 193, 180, 178, 189, 180, 175, 192, 191, 183, 185, 180, 182, 183, 189, 176, 178, 183, 188, 170, 182, 183, 171, 171, 180, 178, 178, 176, 174, 187, 180, 187, 175, 188, 184, 194, 175, 173, 193, 176, 174, 197, 191, 173, 177, 175, 182, 181, 179, 189, 176, 172, 175, 186, 170, 178, 181, 180, 185, 178, 185, 185, 192, 187, 185, 185, 180, 182, 185, 180, 177, 190, 179, 190, 192, 183, 179, 183, 193, 193, 187, 178, 183, 168, 182, 186, 177, 189, 175, 183, 172, 168, 180, 193, 193, 193, 180, 181, 193, 175, 171, 183, 178, 183, 184, 178, 180, 180, 180, 180, 178, 177, 189, 175, 180, 183, 186, 178, 191, 184, 190, 181, 177, 178, 172, 189, 193, 187, 193, 181, 185, 193, 183, 192, 175, 171, 190, 169, 181, 188, 187, 188, 183, 182, 179, 178, 170, 190, 199, 180, 190, 186, 183, 190, 181, 180, 175, 177, 190, 178, 181, 182, 178, 191, 184, 181, 187, 174, 178, 183, 184, 180, 185, 185, 199, 173, 180, 180, 172, 183, 175, 175, 181, 184, 182, 182, 184, 174, 180, 178, 186, 187, 180, 183, 185, 183, 193, 185, 179, 197, 185, 171, 188, 183, 187, 188, 175, 186, 188, 188, 165, 184, 180, 191, 194, 182, 169, 191, 176, 183, 173, 182, 184, 191, 173, 173, 180, 171, 174, 170, 191, 186, 190, 188, 180, 186, 182, 173, 180, 175, 190, 183, 180, 195, 175, 182, 186, 172, 189, 170, 182, 185, 182, 170, 192, 175, 183, 187, 190, 175, 178, 191, 170, 180, 183, 174, 186, 186, 185, 180, 180, 175, 182, 193, 177, 176, 178, 180, 170, 190, 183, 178, 185, 177, 180, 180, 190, 185, 193, 180, 195, 182, 187, 189, 176, 179, 178, 180, 194, 194, 183, 171, 185, 189, 185, 183, 190, 181, 186, 175, 185, 179, 178, 181, 182, 188, 176, 182, 175, 188, 188, 178, 196, 173, 180, 190, 166, 176, 180, 185, 188, 174, 185, 189, 188, 176, 181, 190, 175, 180, 178, 188, 178, 178, 181, 189, 185, 178, 175, 183, 188, 186, 173, 191, 185, 195, 184, 175, 198, 185, 185, 170, 180, 185, 175, 185, 179, 186, 183, 176, 183, 180, 177, 187, 171, 183, 183, 185, 170, 181, 188, 177, 181, 183, 191, 195, 178, 194, 182, 174, 187, 182, 183, 193, 182, 185, 183, 176, 177, 178, 177, 179, 186, 185, 180, 184, 178, 180, 178, 185, 183, 185, 178, 188, 178, 187, 187, 178, 170, 180, 181, 192, 173, 183, 183, 180, 185, 183, 175, 183, 187, 171, 182, 185, 191, 179, 185, 183, 180, 190, 176, 175, 175, 182, 179, 175, 192, 179, 178, 182, 176, 180, 187, 185, 183, 180, 186, 171, 189, 191, 184, 186, 189, 180, 180, 192, 188, 176, 190, 183, 181, 178, 183, 178, 180, 183, 184, 180, 177, 193, 172, 191, 180, 184, 190, 178, 178, 182, 193, 184, 194, 180, 182, 186, 183, 166, 177, 190, 185, 187, 190, 178, 196, 190, 182, 183, 183, 173, 185, 184, 195, 181, 183, 183, 191, 186, 182, 191, 175, 172, 190, 185, 183, 187, 185, 179, 186, 184, 167, 184, 174, 184, 179, 185, 185, 180, 190, 191, 185, 185, 180, 191, 188, 190, 186, 173, 188, 193, 188, 179, 180, 175, 186, 187, 186, 183, 175, 181, 179, 190, 169, 184, 196, 183, 190, 189, 194, 186, 170, 175, 180, 181, 187, 184, 187, 166, 185, 183, 177, 184, 175, 178, 186, 186, 183, 187, 173, 180, 172, 185, 182, 184, 188, 175, 175, 182, 175, 188, 185, 188, 183, 182, 180, 194, 186, 187, 173, 190, 182, 185, 187, 180, 177, 180, 196, 177, 191, 190, 193, 182, 169, 190, 175, 182, 193, 170, 174, 174, 170, 181, 181, 191, 180, 183, 171, 180, 180, 191, 178, 178, 188, 180, 175, 169, 191, 180, 188, 173, 188, 191, 189, 188, 185, 196, 188, 188, 188, 176, 191, 171, 180, 183, 180, 196, 183, 180, 170, 182, 181, 193, 172, 175, 194, 178, 189, 184, 185, 185, 177, 163, 190, 175, 201, 191, 184, 193, 175, 183, 183, 178, 191, 193, 194, 178, 181, 180, 183, 175, 182, 183, 178, 178, 178, 181, 173, 186, 184, 193, 175, 175, 185, 179, 175, 182, 178, 175, 187, 173, 180, 180, 185, 178, 176, 179, 189, 183, 184, 173, 178, 178, 176, 193, 183, 182, 170, 185, 185, 198, 180, 173, 182, 187, 188, 175, 168, 185, 182, 174, 190, 179, 181, 183, 185, 180, 185, 178, 188, 182, 179, 175, 189, 179, 179, 181, 193, 188, 186, 188, 183, 183, 178, 182, 180, 177, 179, 181, 177, 181, 175, 175, 185, 191, 188, 181, 186, 194, 191, 179, 188, 193, 185, 178, 185, 191, 180, 164, 186, 190, 174, 181, 173, 191, 182, 178, 178, 173, 178, 180, 177, 193, 168, 184, 185, 185, 175, 191, 180, 184, 190, 188, 185, 188, 170, 180, 177, 188, 196, 168, 183, 191, 191, 185, 188, 174, 170, 178, 180, 193, 170, 182, 176, 173, 170, 183, 180, 183, 173, 183, 185, 191, 179, 180, 173, 183, 173, 181, 185, 192, 191, 198, 185, 183, 178, 191, 185, 174, 178, 180, 191, 181, 191, 178, 182, 178, 175, 174, 175, 190, 175, 183, 175, 193, 173, 184, 178, 183, 191, 178, 174, 189, 174, 193, 180, 178, 189, 175, 185, 180, 185, 177, 173, 186, 173, 178, 175, 175, 183, 188, 188, 193, 188, 170, 183, 175, 175, 183, 180, 185, 185, 185, 173, 180, 180, 178, 182, 183, 185, 180, 175, 178, 193, 183, 185, 183, 178, 180, 187, 178, 180, 178, 191, 188, 185, 183, 177, 170, 188, 173, 182, 186, 185, 185, 186, 185, 184, 182, 188, 192, 183, 185, 173, 196, 182, 176, 181, 185, 188, 185, 183, 184, 188, 173, 186, 178, 188, 179, 191, 191, 188, 183, 176, 186, 175, 180, 183, 180, 185, 185, 187, 183, 180, 170, 181, 182, 181, 185, 168, 184, 191, 184, 183, 194, 190, 189, 181, 177, 190, 179, 186, 178, 190, 175, 192, 191, 183, 191, 184, 174, 192, 187, 178, 176, 175, 191, 179, 188, 178, 171, 180, 182, 173, 177, 192, 183, 175, 182, 178, 188, 192, 183, 186, 188, 180, 191, 180, 173, 191, 185, 181, 175, 180, 178, 175, 184, 179, 171, 180, 182, 174, 190, 170, 179, 175, 178, 177, 178, 171, 188, 173, 179, 182, 183, 185, 165, 182, 194, 181, 176, 180, 172, 187, 178, 187, 180, 182, 174, 196, 196, 175, 189, 175, 183, 179, 179]
positions = ['GK', 'M', 'A', 'D', 'M', 'D', 'M', 'M', 'M', 'A', 'M', 'M', 'A', 'A', 'A', 'M', 'D', 'A', 'D', 'M', 'GK', 'D', 'D', 'M', 'M', 'M', 'M', 'D', 'M', 'GK', 'D', 'GK', 'D', 'D', 'M', 'A', 'M', 'D', 'M', 'GK', 'M', 'GK', 'A', 'D', 'GK', 'A', 'GK', 'GK', 'GK', 'GK', 'A', 'D', 'A', 'D', 'D', 'M', 'D', 'M', 'D', 'D', 'GK', 'GK', 'D', 'M', 'M', 'GK', 'M', 'D', 'M', 'M', 'D', 'D', 'M', 'M', 'D', 'A', 'A', 'M', 'M', 'M', 'A', 'D', 'D', 'A', 'A', 'M', 'M', 'M', 'D', 'D', 'A', 'A', 'D', 'M', 'M', 'M', 'D', 'M', 'M', 'D', 'M', 'A', 'M', 'M', 'GK', 'M', 'D', 'M', 'M', 'D', 'M', 'M', 'A', 'GK', 'D', 'M', 'GK', 'M', 'M', 'M', 'M', 'D', 'D', 'M', 'D', 'M', 'D', 'M', 'M', 'A', 'M', 'GK', 'A', 'M', 'D', 'M', 'D', 'GK', 'D', 'D', 'M', 'A', 'GK', 'M', 'D', 'A', 'D', 'A', 'A', 'M', 'D', 'M', 'A', 'GK', 'D', 'M', 'GK', 'A', 'D', 'D', 'D', 'GK', 'GK', 'M', 'D', 'GK', 'D', 'M', 'GK', 'A', 'D', 'GK', 'GK', 'D', 'M', 'GK', 'D', 'D', 'D', 'M', 'D', 'M', 'D', 'D', 'A', 'D', 'D', 'D', 'M', 'M', 'A', 'D', 'M', 'M', 'D', 'M', 'A', 'A', 'D', 'A', 'GK', 'M', 'A', 'A', 'D', 'D', 'A', 'D', 'GK', 'D', 'M', 'D', 'D', 'M', 'M', 'GK', 'D', 'M', 'GK', 'GK', 'D', 'M', 'D', 'D', 'M', 'A', 'D', 'D', 'M', 'A', 'A', 'A', 'A', 'A', 'M', 'D', 'D', 'A', 'M', 'GK', 'M', 'GK', 'A', 'A', 'GK', 'M', 'D', 'M', 'D', 'D', 'M', 'M', 'A', 'A', 'D', 'D', 'D', 'M', 'M', 'GK', 'D', 'M', 'M', 'D', 'D', 'D', 'M', 'M', 'M', 'D', 'M', 'A', 'A', 'D', 'D', 'M', 'GK', 'A', 'D', 'D', 'D', 'GK', 'D', 'M', 'D', 'A', 'A', 'GK', 'A', 'D', 'M', 'M', 'GK', 'A', 'A', 'M', 'D', 'A', 'M', 'M', 'M', 'D', 'D', 'D', 'M', 'D', 'A', 'M', 'M', 'M', 'A', 'M', 'M', 'D', 'M', 'D', 'M', 'M', 'A', 'D', 'D', 'M', 'A', 'D', 'D', 'M', 'M', 'M', 'D', 'M', 'D', 'A', 'D', 'D', 'M', 'D', 'A', 'D', 'D', 'GK', 'M', 'M', 'M', 'GK', 'M', 'A', 'D', 'D', 'M', 'A', 'GK', 'M', 'D', 'A', 'M', 'A', 'A', 'A', 'M', 'GK', 'A', 'A', 'M', 'A', 'D', 'D', 'D', 'A', 'GK', 'D', 'D', 'D', 'D', 'GK', 'A', 'GK', 'D', 'D', 'M', 'GK', 'D', 'D', 'D', 'A', 'D', 'D', 'GK', 'D', 'D', 'D', 'GK', 'D', 'GK', 'A', 'M', 'A', 'M', 'A', 'D', 'D', 'D', 'GK', 'GK', 'GK', 'M', 'A', 'M', 'D', 'M', 'A', 'GK', 'M', 'D', 'M', 'M', 'D', 'A', 'GK', 'M', 'A', 'GK', 'GK', 'M', 'A', 'A', 'M', 'GK', 'GK', 'D', 'M', 'A', 'D', 'A', 'D', 'D', 'A', 'D', 'M', 'D', 'D', 'M', 'D', 'A', 'GK', 'D', 'D', 'GK', 'A', 'D', 'D', 'GK', 'D', 'A', 'M', 'A', 'A', 'GK', 'D', 'A', 'D', 'A', 'D', 'GK', 'D', 'D', 'A', 'A', 'M', 'A', 'GK', 'M', 'D', 'A', 'D', 'M', 'M', 'D', 'M', 'GK', 'D', 'M', 'A', 'A', 'M', 'M', 'M', 'GK', 'GK', 'D', 'A', 'M', 'GK', 'D', 'M', 'GK', 'M', 'M', 'GK', 'M', 'D', 'A', 'D', 'M', 'M', 'A', 'M', 'GK', 'A', 'GK', 'A', 'M', 'GK', 'GK', 'D', 'D', 'M', 'M', 'D', 'GK', 'A', 'M', 'GK', 'A', 'GK', 'D', 'D', 'M', 'M', 'M', 'D', 'M', 'M', 'GK', 'M', 'D', 'M', 'D', 'GK', 'M', 'A', 'GK', 'A', 'M', 'M', 'A', 'M', 'M', 'A', 'A', 'A', 'M', 'GK', 'D', 'D', 'M', 'D', 'GK', 'D', 'M', 'M', 'M', 'A', 'D', 'A', 'D', 'A', 'M', 'M', 'D', 'M', 'M', 'D', 'D', 'GK', 'M', 'A', 'GK', 'A', 'A', 'M', 'D', 'GK', 'D', 'M', 'M', 'GK', 'GK', 'D', 'D', 'M', 'D', 'M', 'M', 'M', 'M', 'GK', 'M', 'D', 'M', 'D', 'GK', 'A', 'M', 'D', 'M', 'A', 'A', 'D', 'D', 'D', 'M', 'GK', 'D', 'A', 'M', 'D', 'A', 'GK', 'M', 'D', 'M', 'D', 'A', 'A', 'M', 'A', 'D', 'D', 'M', 'A', 'M', 'M', 'A', 'D', 'GK', 'A', 'M', 'D', 'M', 'A', 'D', 'D', 'D', 'GK', 'D', 'M', 'GK', 'M', 'M', 'GK', 'M', 'M', 'D', 'M', 'D', 'D', 'M', 'D', 'A', 'M', 'D', 'D', 'GK', 'D', 'M', 'M', 'GK', 'GK', 'M', 'D', 'D', 'A', 'GK', 'D', 'D', 'D', 'GK', 'A', 'A', 'D', 'A', 'D', 'M', 'D', 'D', 'A', 'M', 'GK', 'D', 'M', 'D', 'M', 'A', 'A', 'GK', 'M', 'D', 'A', 'D', 'D', 'M', 'A', 'A', 'D', 'M', 'M', 'D', 'A', 'D', 'M', 'A', 'M', 'D', 'D', 'D', 'A', 'GK', 'D', 'D', 'M', 'M', 'A', 'M', 'A', 'D', 'M', 'A', 'A', 'GK', 'A', 'D', 'A', 'M', 'A', 'D', 'D', 'D', 'GK', 'A', 'D', 'D', 'D', 'A', 'A', 'A', 'M', 'GK', 'GK', 'D', 'A', 'GK', 'D', 'A', 'M', 'M', 'D', 'GK', 'M', 'A', 'M', 'D', 'M', 'M', 'M', 'D', 'A', 'GK', 'GK', 'D', 'M', 'D', 'D', 'D', 'M', 'GK', 'M', 'D', 'D', 'D', 'A', 'A', 'GK', 'D', 'D', 'M', 'M', 'D', 'D', 'M', 'M', 'D', 'A', 'M', 'D', 'M', 'M', 'M', 'A', 'GK', 'D', 'D', 'D', 'A', 'M', 'M', 'A', 'M', 'M', 'D', 'M', 'D', 'M', 'A', 'D', 'D', 'M', 'M', 'M', 'D', 'M', 'M', 'D', 'M', 'M', 'M', 'D', 'D', 'A', 'D', 'A', 'A', 'D', 'D', 'M', 'M', 'A', 'A', 'GK', 'A', 'GK', 'M', 'M', 'GK', 'D', 'GK', 'A', 'GK', 'D', 'M', 'GK', 'M', 'D', 'D', 'D', 'GK', 'M', 'GK', 'D', 'D', 'D', 'D', 'GK', 'A', 'M', 'M', 'D', 'GK', 'GK', 'GK', 'D', 'D', 'M', 'D', 'D', 'GK', 'D', 'A', 'D', 'M', 'D', 'D', 'D', 'M', 'D', 'M', 'D', 'M', 'D', 'D', 'M', 'M', 'D', 'D', 'A', 'M', 'D', 'M', 'A', 'M', 'D', 'A', 'M', 'D', 'GK', 'D', 'D', 'A', 'D', 'M', 'D', 'GK', 'A', 'D', 'A', 'M', 'A', 'A', 'GK', 'D', 'M', 'D', 'A', 'D', 'A', 'M', 'M', 'D', 'D', 'D', 'A', 'GK', 'A', 'D', 'M', 'M', 'M', 'D', 'A', 'A', 'D', 'D', 'M', 'D', 'D', 'D', 'GK', 'D', 'M', 'D', 'D', 'A', 'D', 'M', 'M', 'M', 'M', 'A', 'M', 'M', 'D', 'A', 'M', 'D', 'M', 'M', 'M', 'M', 'M', 'GK', 'D', 'M', 'A', 'D', 'D', 'M', 'M', 'M', 'A', 'M', 'GK', 'A', 'A', 'GK', 'A', 'A', 'GK', 'M', 'D', 'M', 'D', 'A', 'D', 'D', 'M', 'D', 'M', 'D', 'D', 'M', 'D', 'D', 'A', 'A', 'A', 'M', 'A', 'D', 'D', 'M', 'A', 'GK', 'D', 'M', 'A', 'D', 'GK', 'D', 'M', 'M', 'A', 'D', 'M', 'D', 'D', 'D', 'GK', 'M', 'A', 'A', 'A', 'D', 'GK', 'M', 'GK', 'M', 'GK', 'GK', 'M', 'M', 'M', 'D', 'GK', 'D', 'A', 'A', 'A', 'A', 'A', 'D', 'M', 'D', 'D', 'M', 'D', 'A', 'A', 'M', 'D', 'GK', 'D', 'M', 'A', 'D', 'D', 'A', 'M', 'M', 'D', 'D', 'A', 'D', 'M', 'D', 'A', 'A', 'D', 'M', 'M', 'GK', 'D', 'A', 'A', 'A', 'D', 'D', 'GK', 'M', 'M', 'A', 'M', 'M', 'GK', 'D', 'D', 'D', 'A', 'GK', 'M', 'D', 'M', 'D', 'GK', 'M', 'A', 'M', 'D', 'A', 'M', 'GK', 'D', 'D', 'A', 'M', 'D', 'M', 'GK', 'M', 'M', 'GK', 'A', 'M', 'D', 'D', 'A', 'D', 'A', 'D', 'D', 'M', 'M', 'D', 'M', 'GK', 'D', 'M', 'M', 'D', 'GK', 'M', 'M', 'GK', 'D', 'D', 'M', 'M', 'D', 'D', 'A', 'M', 'A', 'M', 'A', 'D', 'D', 'D', 'A', 'D', 'GK', 'A', 'M', 'D', 'D', 'D', 'GK', 'M', 'A', 'D', 'GK', 'M', 'D', 'A', 'GK', 'GK', 'A', 'D', 'M', 'A', 'D', 'GK', 'D', 'D', 'A', 'D', 'D', 'A', 'M', 'M', 'GK', 'D', 'D', 'M', 'GK', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'A', 'D', 'M', 'A', 'M', 'M', 'M', 'A', 'D', 'D', 'D', 'M', 'D', 'A', 'D', 'A', 'D', 'D', 'D', 'D', 'D', 'M', 'D', 'GK', 'D', 'M', 'A', 'M', 'GK', 'M', 'M', 'M', 'D', 'M', 'M', 'M', 'M', 'A', 'D', 'M', 'A', 'GK', 'M', 'M', 'D', 'D', 'M', 'A', 'A', 'A', 'GK', 'M', 'D', 'M', 'M', 'D', 'GK', 'D', 'GK', 'D', 'M', 'M', 'A', 'D', 'GK', 'A', 'D', 'A', 'A', 'D', 'A', 'M', 'A', 'M', 'M', 'M', 'D', 'M', 'M', 'D', 'D', 'M', 'D', 'D', 'D', 'A', 'M', 'D', 'M', 'A', 'A', 'GK', 'GK', 'M', 'A', 'M', 'D', 'D', 'D', 'GK', 'A', 'GK', 'D', 'M', 'D', 'M', 'D', 'A', 'M', 'D', 'M', 'D', 'GK', 'M', 'D', 'D', 'M', 'D', 'GK', 'A', 'D', 'D', 'GK', 'GK', 'D', 'A', 'A', 'M', 'A', 'D', 'GK', 'A', 'M', 'GK', 'GK', 'D', 'M', 'D', 'M', 'D', 'M', 'M', 'M', 'M', 'A', 'D', 'A', 'D', 'M', 'M', 'M', 'A', 'M', 'GK', 'M', 'A', 'M', 'M', 'A', 'D', 'GK', 'M', 'M', 'D', 'D', 'M', 'M', 'M', 'D', 'D', 'M', 'A', 'M', 'D', 'GK', 'D', 'M', 'D', 'D', 'M', 'A', 'GK', 'A', 'GK', 'GK', 'D', 'M', 'A', 'M', 'D', 'M', 'GK', 'D', 'M', 'D', 'A', 'D', 'D', 'D', 'GK', 'D', 'GK', 'M', 'D', 'A', 'A', 'M', 'M', 'M', 'A', 'GK', 'M', 'D', 'A', 'A', 'GK', 'A', 'M', 'M', 'D', 'D', 'D', 'D', 'A', 'D', 'GK', 'D', 'M', 'A', 'M', 'A', 'M', 'M', 'M', 'M', 'M', 'M', 'A', 'M', 'M', 'D', 'GK', 'M', 'A', 'GK', 'A', 'GK', 'M', 'M', 'M', 'M', 'A', 'A', 'GK', 'GK', 'A', 'M', 'M', 'A', 'M', 'D', 'A', 'M', 'M', 'M', 'GK', 'M', 'M', 'A', 'D', 'GK', 'D', 'GK', 'D', 'D', 'D', 'A', 'M', 'M', 'M', 'D', 'A', 'D', 'M', 'M', 'D', 'D', 'A', 'A', 'A', 'A', 'M', 'A', 'GK', 'M', 'D', 'M', 'M', 'A', 'D', 'M', 'M', 'GK', 'M', 'A', 'M', 'D', 'M', 'A', 'M', 'M', 'GK', 'D', 'D', 'GK', 'D', 'M', 'D', 'D', 'M', 'D', 'D', 'D', 'M', 'M', 'M', 'A', 'M', 'A', 'M', 'D', 'M', 'GK', 'D', 'A', 'M', 'A', 'M', 'GK', 'A', 'D', 'D', 'D', 'GK', 'D', 'A', 'A', 'M', 'M', 'D', 'M', 'A', 'M', 'M', 'M', 'M', 'D', 'A', 'D', 'A', 'M', 'A', 'M', 'M', 'M', 'M', 'M', 'GK', 'A', 'M', 'D', 'M', 'D', 'A', 'GK', 'D', 'M', 'A', 'A', 'A', 'A', 'M', 'D', 'GK', 'A', 'M', 'A', 'GK', 'D', 'D', 'D', 'D', 'A', 'D', 'M', 'D', 'D', 'A', 'M', 'GK', 'D', 'M', 'M', 'GK', 'A', 'M', 'D', 'M', 'M', 'M', 'A', 'A', 'A', 'D', 'M', 'A', 'D', 'M', 'A', 'D', 'A', 'GK', 'A', 'A', 'GK', 'GK', 'M', 'M', 'D', 'M', 'M', 'D', 'M', 'D', 'GK', 'D', 'M', 'A', 'D', 'M', 'GK', 'D', 'M', 'GK', 'D', 'GK', 'A', 'D', 'M', 'A', 'A', 'M', 'M', 'D', 'D', 'M', 'A', 'D', 'M', 'A', 'D', 'D', 'A', 'M', 'M', 'M', 'M', 'A', 'M', 'D', 'M', 'D', 'GK', 'GK', 'A', 'A', 'A', 'A', 'D', 'D', 'A', 'D', 'M', 'M', 'A', 'A', 'D', 'D', 'M', 'GK', 'A', 'D', 'A', 'GK', 'GK', 'A', 'D', 'M', 'A', 'D', 'M', 'M', 'A', 'D', 'M', 'M', 'D', 'D', 'M', 'D', 'GK', 'M', 'A', 'A', 'D', 'A', 'D', 'D', 'GK', 'D', 'D', 'GK', 'D', 'A', 'D', 'D', 'D', 'M', 'D', 'M', 'M', 'GK', 'A', 'D', 'GK', 'D', 'M', 'A', 'M', 'M', 'GK', 'M', 'GK', 'D', 'D', 'D', 'M', 'A', 'D', 'D', 'D', 'GK', 'M', 'A', 'D', 'M', 'GK', 'M', 'D', 'M', 'M', 'A', 'A', 'M', 'D', 'M', 'A', 'M', 'A', 'M', 'D', 'M', 'D', 'GK', 'M', 'A', 'D', 'A', 'A', 'D', 'M', 'D', 'D', 'M', 'D', 'D', 'M', 'M', 'M', 'M', 'M', 'A', 'D', 'A', 'D', 'M', 'A', 'M', 'M', 'M', 'D', 'M', 'D', 'M', 'M', 'M', 'A', 'D', 'M', 'M', 'M', 'M', 'D', 'D', 'GK', 'D', 'M', 'D', 'M', 'A', 'D', 'GK', 'D', 'A', 'A', 'A', 'M', 'M', 'M', 'M', 'M', 'GK', 'D', 'D', 'A', 'M', 'D', 'D', 'M', 'A', 'A', 'D', 'GK', 'GK', 'M', 'D', 'A', 'M', 'GK', 'GK', 'GK', 'D', 'M', 'M', 'A', 'D', 'D', 'M', 'M', 'D', 'A', 'M', 'D', 'M', 'A', 'GK', 'GK', 'D', 'GK', 'M', 'M', 'M', 'M', 'D', 'M', 'D', 'A', 'D', 'M', 'D', 'D', 'GK', 'A', 'A', 'M', 'D', 'D', 'A', 'M', 'M', 'D', 'A', 'M', 'M', 'M', 'D', 'A', 'M', 'GK', 'D', 'D', 'A', 'A', 'M', 'A', 'M', 'D', 'D', 'GK', 'M', 'D', 'M', 'M', 'D', 'D', 'D', 'D', 'D', 'A', 'M', 'M', 'M', 'D', 'M', 'GK', 'A', 'D', 'D', 'GK', 'M', 'M', 'A', 'A', 'M', 'M', 'A', 'D', 'A', 'D', 'M', 'GK', 'M', 'D', 'D', 'M', 'M', 'A', 'M', 'M', 'GK', 'A', 'A', 'GK', 'D', 'D', 'M', 'D', 'D', 'D', 'A', 'D', 'GK', 'M', 'A', 'D', 'D', 'GK', 'GK', 'GK', 'D', 'M', 'GK', 'M', 'D', 'M', 'M', 'A', 'GK', 'M', 'D', 'D', 'M', 'GK', 'A', 'GK', 'A', 'A', 'M', 'D', 'A', 'M', 'A', 'M', 'D', 'GK', 'D', 'M', 'A', 'A', 'M', 'M', 'D', 'GK', 'D', 'D', 'A', 'A', 'A', 'GK', 'D', 'M', 'D', 'GK', 'D', 'D', 'D', 'GK', 'M', 'M', 'D', 'D', 'D', 'A', 'A', 'D', 'A', 'A', 'D', 'D', 'M', 'GK', 'M', 'M', 'D', 'M', 'A', 'M', 'A', 'GK', 'D', 'D', 'M', 'M', 'A', 'GK', 'D', 'GK', 'D', 'D', 'M', 'A', 'M', 'M', 'M', 'A', 'A', 'D', 'M', 'M', 'M', 'M', 'A', 'D', 'D', 'M', 'M', 'M', 'GK', 'M', 'A', 'M', 'A', 'D', 'M', 'D', 'D', 'A', 'D', 'M', 'M', 'D', 'M', 'A', 'D', 'M', 'D', 'M', 'M', 'M', 'GK', 'A', 'D', 'M', 'D', 'D', 'M', 'D', 'A', 'GK', 'D', 'D', 'A', 'D', 'D', 'GK', 'M', 'D', 'D', 'M', 'M', 'M', 'M', 'M', 'D', 'A', 'A', 'A', 'A', 'M', 'M', 'A', 'A', 'A', 'D', 'M', 'M', 'A', 'A', 'A', 'D', 'M', 'M', 'M', 'GK', 'M', 'M', 'M', 'M', 'A', 'M', 'D', 'D', 'D', 'D', 'A', 'M', 'M', 'M', 'A', 'M', 'D', 'M', 'D', 'M', 'M', 'M', 'M', 'M', 'D', 'A', 'M', 'M', 'M', 'D', 'M', 'A', 'D', 'D', 'D', 'D', 'A', 'D', 'A', 'A', 'D', 'A', 'GK', 'M', 'M', 'A', 'D', 'D', 'M', 'A', 'M', 'A', 'A', 'GK', 'A', 'D', 'D', 'M', 'A', 'M', 'D', 'A', 'GK', 'A', 'A', 'D', 'D', 'M', 'A', 'GK', 'A', 'D', 'M', 'M', 'M', 'M', 'M', 'D', 'D', 'M', 'GK', 'D', 'M', 'M', 'A', 'D', 'M', 'D', 'GK', 'A', 'D', 'D', 'A', 'M', 'D', 'D', 'M', 'M', 'A', 'M', 'D', 'M', 'D', 'D', 'D', 'M', 'M', 'M', 'D', 'GK', 'D', 'D', 'GK', 'D', 'D', 'A', 'A', 'D', 'A', 'A', 'D', 'M', 'D', 'D', 'D', 'A', 'A', 'GK', 'M', 'A', 'D', 'M', 'M', 'M', 'D', 'M', 'GK', 'A', 'GK', 'M', 'M', 'GK', 'D', 'D', 'M', 'GK', 'M', 'M', 'M', 'M', 'GK', 'D', 'GK', 'M', 'M', 'M', 'D', 'D', 'D', 'M', 'A', 'M', 'M', 'A', 'M', 'M', 'A', 'M', 'D', 'A', 'D', 'A', 'D', 'D', 'M', 'A', 'GK', 'A', 'M', 'D', 'M', 'D', 'A', 'M', 'D', 'M', 'M', 'M', 'M', 'GK', 'M', 'M', 'A', 'A', 'GK', 'M', 'D', 'M', 'A', 'M', 'M', 'D', 'D', 'M', 'GK', 'A', 'D', 'A', 'M', 'A', 'D', 'D', 'M', 'A', 'M', 'M', 'D', 'M', 'D', 'D', 'D', 'M', 'A', 'M', 'D', 'A', 'D', 'A', 'D', 'GK', 'M', 'A', 'D', 'M', 'M', 'M', 'GK', 'D', 'M', 'A', 'A', 'A', 'D', 'D', 'D', 'D', 'M', 'D', 'M', 'A', 'A', 'GK', 'D', 'D', 'GK', 'D', 'A', 'D', 'M', 'A', 'D', 'A', 'A', 'D', 'GK', 'A', 'D', 'D', 'A', 'A', 'M', 'A', 'M', 'A', 'M', 'D', 'D', 'D', 'M', 'A', 'GK', 'M', 'M', 'A', 'D', 'D', 'M', 'A', 'A', 'D', 'D', 'M', 'D', 'D', 'A', 'M', 'D', 'M', 'A', 'D', 'D', 'M', 'A', 'D', 'M', 'A', 'D', 'D', 'GK', 'D', 'M', 'D', 'GK', 'A', 'M', 'D', 'D', 'M', 'A', 'M', 'M', 'M', 'M', 'M', 'M', 'A', 'M', 'M', 'GK', 'M', 'M', 'M', 'A', 'A', 'A', 'D', 'M', 'GK', 'GK', 'A', 'M', 'D', 'GK', 'M', 'A', 'M', 'D', 'D', 'M', 'D', 'A', 'D', 'D', 'GK', 'D', 'M', 'A', 'A', 'D', 'A', 'M', 'D', 'A', 'GK', 'A', 'D', 'A', 'D', 'GK', 'GK', 'GK', 'M', 'D', 'D', 'M', 'GK', 'D', 'A', 'D', 'GK', 'D', 'M', 'M', 'D', 'M', 'M', 'M', 'A', 'D', 'M', 'GK', 'D', 'D', 'A', 'GK', 'M', 'M', 'M', 'D', 'GK', 'D', 'D', 'M', 'D', 'D', 'GK', 'A', 'GK', 'D', 'A', 'D', 'M', 'D', 'A', 'A', 'M', 'D', 'D', 'D', 'M', 'GK', 'M', 'D', 'D', 'GK', 'D', 'M', 'D', 'A', 'D', 'A', 'M', 'M', 'M', 'D', 'A', 'GK', 'M', 'M', 'D', 'D', 'A', 'A', 'GK', 'GK', 'D', 'A', 'M', 'GK', 'M', 'M', 'A', 'D', 'M', 'A', 'A', 'GK', 'M', 'A', 'A', 'D', 'M', 'M', 'D', 'A', 'D', 'A', 'A', 'D', 'M', 'D', 'A', 'D', 'D', 'M', 'M', 'M', 'M', 'M', 'A', 'D', 'D', 'M', 'D', 'D', 'GK', 'D', 'M', 'D', 'M', 'GK', 'D', 'M', 'A', 'M', 'M', 'M', 'M', 'D', 'D', 'D', 'A', 'GK', 'A', 'GK', 'M', 'D', 'GK', 'GK', 'A', 'M', 'A', 'D', 'D', 'D', 'M', 'M', 'D', 'D', 'A', 'M', 'A', 'M', 'M', 'GK', 'M', 'D', 'A', 'M', 'A', 'M', 'D', 'D', 'D', 'D', 'D', 'D', 'A', 'D', 'GK', 'M', 'M', 'GK', 'GK', 'D', 'D', 'M', 'A', 'D', 'D', 'D', 'GK', 'GK', 'M', 'M', 'M', 'GK', 'D', 'M', 'M', 'M', 'M', 'D', 'D', 'A', 'D', 'A', 'A', 'GK', 'M', 'D', 'D', 'M', 'M', 'M', 'D', 'A', 'M', 'GK', 'M', 'GK', 'GK', 'D', 'M', 'GK', 'D', 'M', 'M', 'D', 'M', 'D', 'A', 'D', 'D', 'D', 'A', 'M', 'A', 'D', 'D', 'A', 'D', 'D', 'M', 'M', 'D', 'A', 'D', 'A', 'D', 'A', 'A', 'M', 'A', 'D', 'M', 'M', 'M', 'GK', 'GK', 'M', 'M', 'D', 'A', 'D', 'D', 'M', 'A', 'A', 'M', 'D', 'D', 'D', 'D', 'GK', 'M', 'M', 'D', 'D', 'D', 'D', 'M', 'D', 'D', 'D', 'M', 'D', 'M', 'D', 'A', 'D', 'D', 'A', 'A', 'D', 'D', 'M', 'D', 'GK', 'D', 'M', 'A', 'M', 'GK', 'D', 'D', 'M', 'A', 'M', 'A', 'M', 'A', 'A', 'A', 'M', 'D', 'M', 'D', 'M', 'D', 'A', 'M', 'D', 'M', 'A', 'M', 'M', 'D', 'A', 'A', 'A', 'D', 'A', 'M', 'D', 'M', 'A', 'M', 'D', 'A', 'M', 'A', 'GK', 'D', 'M', 'D', 'M', 'D', 'A', 'M', 'A', 'D', 'M', 'M', 'D', 'GK', 'A', 'M', 'M', 'M', 'M', 'D', 'D', 'M', 'A', 'M', 'M', 'D', 'M', 'M', 'D', 'GK', 'D', 'D', 'M', 'M', 'D', 'M', 'A', 'D', 'GK', 'A', 'M', 'D', 'A', 'A', 'A', 'A', 'GK', 'M', 'D', 'M', 'M', 'D', 'A', 'M', 'GK', 'D', 'M', 'A', 'M', 'GK', 'M', 'A', 'GK', 'A', 'D', 'A', 'M', 'M', 'D', 'M', 'D', 'M', 'D', 'A', 'M', 'A', 'D', 'D', 'M', 'GK', 'D', 'D', 'M', 'M', 'A', 'M', 'D', 'A', 'A', 'D', 'GK', 'GK', 'D', 'A', 'M', 'D', 'D', 'M', 'GK', 'D', 'M', 'M', 'D', 'M', 'GK', 'D', 'A', 'M', 'GK', 'M', 'M', 'M', 'A', 'M', 'M', 'GK', 'M', 'D', 'D', 'D', 'D', 'D', 'M', 'D', 'M', 'D', 'A', 'GK', 'M', 'D', 'D', 'A', 'GK', 'D', 'M', 'A', 'M', 'D', 'M', 'D', 'GK', 'M', 'GK', 'A', 'D', 'D', 'A', 'D', 'A', 'M', 'M', 'M', 'D', 'D', 'D', 'M', 'D', 'D', 'M', 'D', 'D', 'M', 'M', 'GK', 'M', 'GK', 'M', 'M', 'D', 'D', 'GK', 'D', 'M', 'D', 'D', 'A', 'M', 'GK', 'D', 'M', 'D', 'D', 'D', 'M', 'M', 'M', 'D', 'A', 'A', 'A', 'A', 'GK', 'D', 'GK', 'A', 'A', 'D', 'M', 'M', 'A', 'A', 'D', 'M', 'M', 'GK', 'M', 'D', 'D', 'M', 'D', 'GK', 'D', 'GK', 'M', 'D', 'D', 'GK', 'D', 'A', 'M', 'D', 'GK', 'D', 'A', 'A', 'A', 'D', 'GK', 'D', 'D', 'GK', 'GK', 'A', 'A', 'M', 'D', 'D', 'D', 'GK', 'A', 'M', 'M', 'M', 'A', 'A', 'M', 'D', 'D', 'D', 'D', 'M', 'D', 'A', 'A', 'D', 'D', 'D', 'D', 'M', 'M', 'M', 'D', 'D', 'M', 'A', 'M', 'D', 'D', 'A', 'GK', 'D', 'D', 'GK', 'M', 'D', 'M', 'A', 'A', 'A', 'A', 'GK', 'A', 'D', 'D', 'M', 'M', 'A', 'A', 'A', 'D', 'M', 'A', 'A', 'A', 'D', 'GK', 'D', 'M', 'D', 'M', 'M', 'M', 'A', 'A', 'A', 'D', 'A', 'A', 'D', 'A', 'A', 'M', 'D', 'M', 'A', 'A', 'M', 'A', 'M', 'M', 'D', 'D', 'M', 'D', 'GK', 'D', 'A', 'D', 'M', 'D', 'A', 'A', 'D', 'M', 'A', 'D', 'M', 'D', 'D', 'M', 'D', 'A', 'D', 'M', 'D', 'M', 'GK', 'A', 'D', 'GK', 'A', 'D', 'A', 'A', 'D', 'M', 'M', 'M', 'D', 'M', 'A', 'D', 'M', 'D', 'D', 'M', 'D', 'M', 'D', 'D', 'M', 'D', 'M', 'D', 'M', 'GK', 'D', 'M', 'M', 'M', 'M', 'M', 'D', 'M', 'GK', 'M', 'M', 'D', 'M', 'D', 'M', 'D', 'D', 'GK', 'A', 'D', 'A', 'A', 'M', 'D', 'M', 'M', 'GK', 'D', 'D', 'GK', 'A', 'GK', 'D', 'A', 'A', 'A', 'D', 'GK', 'A', 'M', 'A', 'A', 'GK', 'M', 'A', 'D', 'GK', 'D', 'M', 'A', 'M', 'A', 'A', 'M', 'D', 'M', 'GK', 'D', 'D', 'M', 'A', 'D', 'M', 'D', 'M', 'M', 'D', 'D', 'A', 'A', 'M', 'D', 'A', 'M', 'D', 'A', 'D', 'D', 'M', 'D', 'M', 'M', 'A', 'M', 'A', 'D', 'M', 'D', 'A', 'D', 'D', 'A', 'A', 'GK', 'D', 'M', 'A', 'A', 'A', 'M', 'D', 'D', 'GK', 'M', 'A', 'D', 'GK', 'M', 'D', 'A', 'M', 'M', 'A', 'D', 'M', 'D', 'A', 'M', 'M', 'D', 'D', 'M', 'M', 'GK', 'D', 'A', 'M', 'A', 'D', 'M', 'M', 'M', 'M', 'M', 'D', 'M', 'D', 'A', 'A', 'D', 'D', 'A', 'GK', 'D', 'M', 'GK', 'M', 'GK', 'D', 'D', 'A', 'A', 'D', 'D', 'A', 'M', 'D', 'M', 'M', 'M', 'D', 'M', 'D', 'A', 'M', 'M', 'A', 'A', 'M', 'M', 'D', 'D', 'D', 'D', 'D', 'A', 'M', 'M', 'M', 'D', 'GK', 'GK', 'A', 'D', 'M', 'M', 'M', 'M', 'M', 'A', 'D', 'M', 'D', 'D', 'A', 'D', 'D', 'M', 'D', 'A', 'D', 'D', 'D', 'A', 'M', 'M', 'D', 'A', 'A', 'D', 'A', 'A', 'D', 'D', 'D', 'M', 'M', 'M', 'D', 'A', 'A', 'A', 'M', 'M', 'D', 'GK', 'M', 'A', 'A', 'D', 'D', 'D', 'A', 'A', 'A', 'M', 'D', 'A', 'GK', 'A', 'M', 'A', 'D', 'A', 'D', 'D', 'M', 'A', 'M', 'M', 'M', 'M', 'M', 'A', 'M', 'A', 'A', 'D', 'GK', 'M', 'GK', 'D', 'A', 'M', 'GK', 'D', 'M', 'D', 'M', 'A', 'A', 'D', 'D', 'M', 'D', 'M', 'M', 'D', 'M', 'GK', 'A', 'D', 'D', 'D', 'A', 'D', 'M', 'D', 'D', 'A', 'M', 'D', 'A', 'D', 'M', 'D', 'A', 'GK', 'D', 'D', 'A', 'GK', 'M', 'M', 'GK', 'A', 'D', 'M', 'M', 'A', 'D', 'M', 'A', 'M', 'D', 'M', 'M', 'A', 'M', 'D', 'D', 'M', 'D', 'A', 'D', 'D', 'D', 'D', 'M', 'M', 'D', 'M', 'A', 'M', 'D', 'M', 'D', 'A', 'A', 'GK', 'M', 'D', 'D', 'M', 'D', 'M', 'A', 'A', 'D', 'GK', 'A', 'D', 'D', 'A', 'M', 'M', 'M', 'A', 'M', 'M', 'M', 'M', 'M', 'A', 'D', 'D', 'D', 'D', 'D', 'M', 'D', 'D', 'M', 'D', 'M', 'D', 'D', 'M', 'D', 'M', 'M', 'M', 'GK', 'D', 'GK', 'GK', 'GK', 'D', 'M', 'A', 'D', 'A', 'D', 'M', 'M', 'A', 'M', 'M', 'D', 'D', 'A', 'GK', 'GK', 'M', 'D', 'M', 'GK', 'M', 'D', 'D', 'D', 'D', 'D', 'GK', 'M', 'D', 'M', 'A', 'D', 'M', 'A', 'M', 'GK', 'M', 'M', 'D', 'D', 'D', 'M', 'M', 'D', 'M', 'GK', 'M', 'D', 'GK', 'A', 'M', 'A', 'D', 'D', 'D', 'A', 'A', 'GK', 'A', 'M', 'M', 'D', 'M', 'M', 'D', 'A', 'GK', 'A', 'D', 'GK', 'M', 'A', 'M', 'GK', 'D', 'GK', 'M', 'A', 'M', 'M', 'A', 'D', 'M', 'D', 'D', 'D', 'D', 'M', 'M', 'M', 'A', 'A', 'M', 'D', 'A', 'M', 'A', 'M', 'M', 'A', 'M', 'A', 'A', 'A', 'M', 'M', 'GK', 'D', 'D', 'D', 'D', 'D', 'M', 'GK', 'A', 'D', 'D', 'D', 'M', 'GK', 'M', 'D', 'GK', 'A', 'D', 'D', 'M', 'M', 'M', 'M', 'M', 'M', 'A', 'D', 'M', 'A', 'A', 'M', 'M', 'A', 'A', 'M', 'A', 'D', 'A', 'D', 'D', 'M', 'M', 'M', 'D', 'D', 'M', 'GK', 'A', 'A', 'M', 'A', 'D', 'A', 'D', 'D', 'A', 'M', 'A', 'A', 'M', 'M', 'D', 'M', 'A', 'A', 'D', 'D', 'D', 'A', 'D', 'M', 'A', 'D', 'D', 'D', 'M', 'M', 'D', 'D', 'D', 'GK', 'M', 'M', 'A', 'A', 'A', 'D', 'M', 'M', 'GK', 'GK', 'D', 'D', 'A', 'D', 'D', 'M', 'D', 'A', 'D', 'A', 'M', 'M', 'D', 'M', 'GK', 'A', 'M', 'D', 'M', 'M', 'M', 'GK', 'D', 'A', 'D', 'A', 'D', 'M', 'D', 'D', 'A', 'A', 'M', 'D', 'M', 'M', 'GK', 'M', 'D', 'M', 'D', 'M', 'GK', 'A', 'M', 'D', 'A', 'D', 'M', 'D', 'M', 'A', 'M', 'M', 'M', 'D', 'GK', 'GK', 'D', 'GK', 'D', 'D', 'A', 'D', 'A', 'M', 'D', 'A', 'M', 'A', 'A', 'GK', 'M', 'A', 'GK', 'M', 'M', 'A', 'M', 'A', 'GK', 'A', 'M', 'A', 'M', 'D', 'A', 'M', 'GK', 'M', 'M', 'A', 'GK', 'A', 'D', 'M', 'M', 'A', 'M', 'D', 'D', 'A', 'D', 'D', 'A', 'GK', 'M', 'M', 'GK', 'M', 'M', 'A', 'A', 'D', 'A', 'M', 'A', 'M', 'M', 'M', 'M', 'M', 'A', 'M', 'M', 'A', 'D', 'M', 'M', 'D', 'A', 'D', 'M', 'GK', 'D', 'M', 'A', 'D', 'M', 'D', 'A', 'M', 'D', 'A', 'M', 'D', 'M', 'D', 'A', 'A', 'A', 'M', 'A', 'D', 'M', 'M', 'D', 'M', 'A', 'D', 'M', 'A', 'A', 'D', 'D', 'D', 'M', 'D', 'M', 'M', 'A', 'M', 'M', 'M', 'A', 'A', 'M', 'M', 'GK', 'M', 'M', 'M', 'M', 'GK', 'D', 'D', 'M', 'A', 'D', 'GK', 'D', 'A', 'GK', 'D', 'A', 'D', 'M', 'M', 'A', 'M', 'A', 'M', 'D', 'A', 'M', 'D', 'M', 'A', 'M', 'D', 'M', 'D', 'D', 'M', 'D', 'D', 'D', 'A', 'D', 'M', 'A', 'A', 'M', 'A', 'M', 'A', 'M', 'D', 'A', 'D', 'A', 'M', 'M', 'M', 'M', 'A', 'M', 'D', 'M', 'D', 'A', 'GK', 'D', 'GK', 'M', 'D', 'A', 'D', 'GK', 'GK', 'M', 'M', 'A', 'GK', 'M', 'D', 'M', 'A', 'A', 'D', 'D', 'A', 'D', 'D', 'M', 'M', 'D', 'M', 'A', 'M', 'D', 'GK', 'A', 'M', 'GK', 'A', 'D', 'M', 'A', 'M', 'M', 'D', 'A', 'A', 'D', 'D', 'M', 'M', 'D', 'M', 'M', 'A', 'M', 'A', 'D', 'M', 'A', 'M', 'M', 'D', 'M', 'A', 'A', 'M', 'GK', 'M', 'M', 'D', 'M', 'D', 'D', 'M', 'M', 'D', 'M', 'M', 'M', 'A', 'M', 'A', 'A', 'D', 'M', 'GK', 'A', 'M', 'GK', 'A', 'A', 'A', 'A', 'A', 'A', 'D', 'M', 'D', 'D', 'M', 'GK', 'D', 'A', 'M', 'D', 'M', 'A', 'M', 'D', 'D', 'A', 'A', 'A', 'D', 'M', 'M', 'M', 'M', 'M', 'GK', 'M', 'A', 'A', 'D', 'D', 'D', 'M', 'A', 'M', 'D', 'A', 'D', 'D', 'A', 'M', 'GK', 'M', 'GK', 'D', 'A', 'GK', 'A', 'A', 'D', 'M', 'D', 'A', 'GK', 'M', 'M', 'D', 'D', 'D', 'GK', 'GK', 'A', 'D', 'D', 'M', 'A', 'D', 'D', 'D', 'M', 'D', 'GK', 'M', 'M', 'M', 'M', 'GK', 'D', 'GK', 'M', 'A', 'A', 'A', 'M', 'M', 'M', 'M', 'A', 'A', 'A', 'GK', 'D', 'D', 'D', 'M', 'M', 'D', 'GK', 'D', 'A', 'A', 'M', 'M', 'D', 'M', 'M', 'M', 'A', 'D', 'A', 'D', 'D', 'M', 'D', 'M', 'M', 'D', 'A', 'D', 'GK', 'A', 'GK', 'M', 'M', 'D', 'A', 'GK', 'A', 'A', 'M', 'M', 'M', 'GK', 'A', 'M', 'M', 'GK', 'A', 'A', 'D', 'A', 'A', 'M', 'A', 'D', 'A', 'A', 'M', 'A', 'A', 'M', 'M', 'A', 'GK', 'M', 'M', 'D', 'M', 'M', 'M', 'A', 'M', 'D', 'M', 'M', 'A', 'M', 'M', 'D', 'M', 'GK', 'GK', 'M', 'M', 'A', 'M', 'D', 'D', 'D', 'M', 'M', 'M', 'A', 'M', 'D', 'M', 'A', 'A', 'A', 'D', 'GK', 'M', 'M', 'M', 'A', 'GK', 'D', 'M', 'A', 'D', 'M', 'M', 'A', 'GK', 'A', 'D', 'A', 'M', 'D', 'A', 'M', 'A', 'D', 'A', 'D', 'A', 'D', 'A', 'D', 'A', 'D', 'M', 'A', 'M', 'M', 'A', 'D', 'M', 'D', 'M', 'D', 'GK', 'A', 'M', 'D', 'A', 'A', 'GK', 'A', 'A', 'A', 'D', 'M', 'D', 'A', 'D', 'A', 'M', 'D', 'M', 'M', 'D', 'M', 'A', 'M', 'D', 'A', 'D', 'A', 'M', 'M', 'M', 'A', 'A', 'M', 'A', 'M', 'D', 'A', 'A', 'M', 'M', 'D', 'D', 'D', 'M', 'A', 'A', 'M', 'D', 'D', 'A', 'D', 'D', 'A', 'D', 'D', 'D', 'A', 'D', 'M', 'D', 'GK', 'GK', 'D', 'M', 'D', 'D', 'GK', 'D', 'D', 'GK', 'D', 'M', 'D', 'M', 'M', 'A', 'GK', 'A', 'M', 'A', 'M', 'A', 'A', 'M', 'D', 'D', 'A', 'D', 'M', 'A', 'M', 'M', 'M', 'M', 'D', 'M', 'A', 'A', 'D', 'D', 'GK', 'D', 'M', 'M', 'D', 'M', 'D', 'D', 'M', 'D', 'M', 'M', 'M', 'D', 'M', 'M', 'A', 'M', 'M', 'D', 'A', 'A', 'A', 'M', 'D', 'M', 'M', 'M', 'D', 'A', 'D', 'M', 'M', 'D', 'GK', 'D', 'D', 'M', 'D', 'M', 'M', 'D', 'A', 'M', 'D', 'M', 'D', 'D', 'A', 'A', 'GK', 'M', 'A', 'A', 'D', 'A', 'M', 'D', 'GK', 'A', 'M', 'M', 'D', 'D', 'A', 'A', 'D', 'D', 'A', 'D', 'D', 'D', 'D', 'A', 'M', 'M', 'M', 'D', 'GK', 'M', 'A', 'M', 'GK', 'M', 'GK', 'D', 'A', 'D', 'A', 'M', 'A', 'D', 'D', 'M', 'M', 'D', 'GK', 'M', 'M', 'D', 'D', 'D', 'D', 'M', 'M', 'D', 'GK', 'D', 'A', 'M', 'GK', 'D', 'D', 'GK', 'A', 'M', 'A', 'D', 'D', 'D', 'M', 'GK', 'M', 'D', 'A', 'D', 'M', 'A', 'A', 'M', 'M', 'D', 'M', 'M', 'M', 'M', 'M', 'D', 'A', 'M', 'D', 'A', 'M', 'D', 'D', 'GK', 'D', 'D', 'A', 'D', 'A', 'GK', 'D', 'A', 'A', 'M', 'A', 'M', 'M', 'M', 'D', 'A', 'M', 'M', 'A', 'M', 'M', 'M', 'D', 'M', 'M', 'M', 'A', 'A', 'GK', 'A', 'A', 'D', 'A', 'GK', 'D', 'A', 'D', 'M', 'D', 'GK', 'A', 'M', 'D', 'A', 'D', 'M', 'M', 'M', 'M', 'A', 'M', 'D', 'GK', 'M', 'M', 'D', 'A', 'M', 'A', 'A', 'D', 'A', 'D', 'D', 'D', 'M', 'D', 'A', 'A', 'GK', 'GK', 'D', 'D', 'M', 'D', 'M', 'D', 'M', 'M', 'D', 'M', 'D', 'D', 'M', 'D', 'D', 'A', 'D', 'A', 'M', 'M', 'GK', 'A', 'A', 'M', 'D', 'GK', 'D', 'D', 'A', 'D', 'M', 'M', 'M', 'D', 'M', 'M', 'A', 'D', 'D', 'A', 'D', 'M', 'A', 'A', 'A', 'M', 'A', 'M', 'M', 'GK', 'GK', 'D', 'D', 'M', 'A', 'D', 'D', 'GK', 'M', 'GK', 'D', 'D', 'A', 'M', 'M', 'D', 'M', 'M', 'M', 'M', 'D', 'A', 'M', 'M', 'GK', 'M', 'D', 'A', 'D', 'D', 'D', 'D', 'D', 'A', 'M', 'M', 'D', 'A', 'GK', 'D', 'M', 'D', 'D', 'D', 'D', 'M', 'M', 'D', 'GK', 'M', 'D', 'M', 'M', 'A', 'A', 'M', 'M', 'GK', 'A', 'D', 'M', 'M', 'M', 'A', 'M', 'A', 'A', 'D', 'M', 'D', 'D', 'M', 'D', 'D', 'D', 'M', 'GK', 'D', 'D', 'GK', 'A', 'A', 'D', 'A', 'D', 'M', 'D', 'A', 'D', 'D', 'A', 'A', 'M', 'A', 'M', 'M', 'M', 'A', 'A', 'M', 'A', 'A', 'D', 'M', 'M', 'D', 'A', 'A', 'M', 'M', 'M', 'D', 'A', 'M', 'D', 'A', 'D', 'D', 'A', 'D', 'A', 'D', 'A', 'M', 'D', 'D', 'D', 'GK', 'M', 'M', 'M', 'A', 'M', 'M', 'D', 'M', 'D', 'D', 'A', 'D', 'M', 'M', 'M', 'M', 'M', 'GK', 'GK', 'A', 'A', 'GK', 'GK', 'D', 'D', 'D', 'A', 'A', 'M', 'GK', 'A', 'M', 'A', 'M', 'A', 'M', 'A', 'A', 'M', 'A', 'D', 'M', 'M', 'M', 'M', 'A', 'GK', 'D', 'M', 'M', 'D', 'A', 'A', 'A', 'M', 'D', 'D', 'D', 'M', 'M', 'M', 'D', 'M', 'A', 'D', 'M', 'M', 'M', 'M', 'M', 'D', 'A', 'GK', 'M', 'M', 'M', 'D', 'A', 'D', 'GK', 'M', 'D', 'A', 'D', 'D', 'A', 'A', 'D', 'M', 'M', 'D', 'D', 'M', 'D', 'A', 'M', 'M', 'A', 'M', 'D', 'D', 'M', 'D', 'D', 'M', 'A', 'GK', 'A', 'A', 'D', 'M', 'A', 'A', 'A', 'D', 'GK', 'M', 'A', 'A', 'M', 'A', 'GK', 'D', 'A', 'M', 'M', 'A', 'D', 'A', 'D', 'A', 'A', 'M', 'M', 'A', 'A', 'M', 'D', 'D', 'D', 'D', 'GK', 'A', 'GK', 'A', 'D', 'D', 'D', 'A', 'D', 'A', 'M', 'M', 'M', 'M', 'M', 'A', 'M', 'D', 'D', 'D', 'A', 'D', 'M', 'GK', 'M', 'D', 'D', 'M', 'D', 'GK', 'M', 'A', 'M', 'M', 'D', 'D', 'M', 'A', 'D', 'A', 'M', 'GK', 'M', 'D', 'A', 'D', 'A', 'D', 'M', 'GK', 'D', 'M', 'A', 'A', 'A', 'A', 'D', 'A', 'D', 'D', 'D', 'D', 'GK', 'D', 'GK', 'D', 'D', 'A', 'A', 'A', 'GK', 'D', 'M', 'GK', 'M', 'M', 'GK', 'D', 'A', 'A', 'D', 'M', 'M', 'M', 'A', 'D', 'M', 'D', 'M', 'A', 'D', 'M', 'D', 'A', 'GK', 'D', 'M', 'D', 'GK', 'D', 'M', 'GK', 'M', 'D', 'A', 'A', 'D', 'A', 'D', 'A', 'D', 'A', 'D', 'M', 'M', 'D', 'M', 'A', 'D', 'M', 'D', 'D', 'M', 'A', 'A', 'M', 'A', 'M', 'M', 'A', 'GK', 'GK', 'M', 'GK', 'D', 'D', 'A', 'M', 'D', 'GK', 'D', 'GK', 'D', 'A', 'M', 'A', 'GK', 'D', 'GK', 'A', 'M', 'M', 'M', 'D', 'M', 'M', 'M', 'GK', 'D', 'D', 'M', 'M', 'D', 'D', 'A', 'M', 'M', 'M', 'A', 'GK', 'D', 'A', 'M', 'M', 'GK', 'A', 'A', 'M', 'A', 'M', 'M', 'M', 'M', 'M', 'M', 'GK', 'M', 'M', 'D', 'M', 'M', 'D', 'D', 'GK', 'M', 'D', 'GK', 'D', 'M', 'M', 'A', 'D', 'M', 'M', 'D', 'D', 'D', 'M', 'M', 'M', 'A', 'D', 'A', 'GK', 'M', 'A', 'M', 'D', 'M', 'M', 'D', 'M', 'M', 'M', 'M', 'D', 'M', 'D', 'M', 'A', 'GK', 'A', 'A', 'D', 'D', 'D', 'A', 'M', 'D', 'D', 'M', 'M', 'M', 'M', 'M', 'D', 'D', 'GK', 'GK', 'D', 'M', 'D', 'A', 'D', 'D', 'M', 'D', 'M', 'M', 'A', 'A', 'D', 'D', 'D', 'M', 'D', 'D', 'A', 'D', 'M', 'A', 'A', 'D', 'D', 'A', 'A', 'D', 'GK', 'M', 'M', 'A', 'A', 'M', 'A', 'A', 'M', 'GK', 'M', 'D', 'A', 'A', 'A', 'M', 'M', 'M', 'D', 'M', 'A', 'M', 'A', 'A', 'M', 'D', 'A', 'GK', 'M', 'D', 'A', 'D', 'D', 'M', 'M', 'A', 'D', 'M', 'D', 'A', 'M', 'M', 'M', 'A', 'M', 'M', 'M', 'M', 'A', 'A', 'M', 'A', 'A', 'D', 'D', 'GK', 'A', 'M', 'A', 'D', 'A', 'M', 'GK', 'D', 'M', 'M', 'D', 'M', 'D', 'D', 'A', 'M', 'A', 'GK', 'A', 'A', 'D', 'M', 'D', 'A', 'D', 'M', 'D', 'M', 'A', 'GK', 'M', 'M', 'M', 'M', 'A', 'M', 'M', 'D', 'M', 'M', 'D', 'M', 'M', 'A', 'M', 'M', 'M', 'M', 'D', 'M', 'M', 'D', 'A', 'A', 'D', 'D', 'M', 'M', 'M', 'M', 'GK', 'GK', 'A', 'M', 'M', 'A', 'M', 'M', 'M', 'D', 'M', 'D', 'M', 'M', 'A', 'M', 'GK', 'A', 'A', 'A', 'GK', 'M', 'A', 'M', 'D', 'M', 'M', 'A', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'GK', 'D', 'M', 'M', 'D', 'GK', 'GK', 'D', 'M', 'A', 'A', 'M', 'A', 'M', 'M', 'M', 'A', 'GK', 'M', 'M', 'A', 'D', 'M', 'M', 'M', 'D', 'M', 'A', 'M', 'D', 'M', 'A', 'A', 'A', 'D', 'D', 'D', 'D', 'A', 'D', 'M', 'D', 'M', 'D', 'A', 'M', 'D', 'A', 'D', 'A', 'GK', 'A', 'D', 'M', 'A', 'M', 'D', 'M', 'D', 'M', 'M', 'D', 'A', 'M', 'A', 'D', 'M', 'D', 'D', 'A', 'GK', 'A', 'A', 'M', 'M', 'M', 'M', 'A', 'M', 'A', 'A', 'D', 'M', 'GK', 'M', 'D', 'A', 'M', 'A', 'M', 'GK', 'A', 'M', 'D', 'M', 'D', 'A', 'A', 'D', 'M', 'A', 'M', 'M', 'A', 'M', 'A', 'M', 'M', 'A', 'D', 'D', 'D', 'GK', 'D', 'A', 'D', 'D', 'M', 'D', 'A', 'D', 'D', 'D', 'M', 'M', 'A', 'D', 'M', 'D', 'A', 'D', 'M', 'M', 'D', 'D', 'M', 'M', 'D', 'GK', 'D', 'D', 'D', 'M', 'D', 'A', 'D', 'A', 'D', 'M', 'M', 'GK', 'A', 'A', 'M', 'D', 'GK', 'D', 'M', 'D', 'M', 'A', 'GK', 'GK', 'M', 'M', 'A', 'M', 'M', 'A', 'M', 'GK', 'D', 'D', 'M', 'M', 'D', 'M', 'A', 'M', 'GK', 'D', 'D', 'D', 'A', 'A', 'GK', 'D', 'GK', 'D', 'D', 'GK', 'D', 'A', 'A', 'M', 'D', 'A', 'D', 'D', 'M', 'D', 'A', 'A', 'M', 'M', 'A', 'D', 'M', 'M', 'A', 'D', 'M', 'M', 'A', 'A', 'M', 'M', 'A', 'D', 'M', 'M', 'D', 'M', 'D', 'GK', 'A', 'M', 'A', 'A', 'D', 'A', 'M', 'M', 'M', 'D', 'D', 'D', 'M', 'D', 'A', 'M', 'GK', 'M', 'A', 'GK', 'M', 'M', 'M', 'A', 'M', 'GK', 'D', 'A', 'D', 'D', 'D', 'D', 'D', 'M', 'M', 'M', 'GK', 'A', 'D', 'A', 'M', 'A', 'A', 'M', 'A', 'D', 'M', 'M', 'A', 'M', 'A', 'D', 'M', 'D', 'A', 'D', 'M', 'M', 'M', 'A', 'D', 'A', 'D', 'A', 'M', 'M', 'M', 'A', 'M', 'M', 'A', 'D', 'M', 'D', 'D', 'M', 'M', 'D', 'M', 'A', 'D', 'M', 'A', 'M', 'GK', 'D', 'M', 'A', 'GK', 'M', 'A', 'A', 'D', 'M', 'A', 'D', 'A', 'GK', 'D', 'D', 'A', 'M', 'A', 'D', 'D', 'D', 'A', 'M', 'D', 'GK', 'A', 'A', 'M', 'D', 'D', 'GK', 'D', 'M', 'M', 'D', 'M', 'A', 'A', 'M', 'M', 'A', 'D', 'M', 'M', 'M', 'D', 'M', 'D', 'M', 'M', 'A', 'A', 'D', 'A', 'A', 'A', 'M', 'M', 'A', 'M', 'M', 'GK', 'M', 'D', 'D', 'M', 'A', 'M', 'M', 'M', 'A', 'A', 'A', 'A', 'D', 'D', 'A', 'D', 'D', 'M', 'D', 'GK', 'GK', 'M', 'D', 'D', 'A', 'D', 'M', 'GK', 'A', 'M', 'M', 'A', 'M', 'A', 'A', 'D', 'A', 'D', 'A', 'D', 'D', 'D', 'D', 'D', 'M', 'GK', 'M', 'A', 'M', 'M', 'M', 'D', 'M', 'M', 'A', 'M', 'D', 'D', 'D', 'M', 'D', 'M', 'M', 'D', 'GK', 'GK', 'D', 'M', 'M', 'M', 'D', 'M', 'M', 'D', 'GK', 'A', 'GK', 'D', 'M', 'A', 'M', 'M', 'M', 'A', 'A', 'A', 'M', 'M', 'A', 'D', 'A', 'A', 'D', 'M', 'D', 'D', 'M', 'A', 'D', 'A', 'D', 'M', 'A', 'A', 'M', 'A', 'A', 'M', 'D', 'M', 'A', 'M', 'M', 'M', 'A', 'D', 'A', 'A', 'A', 'D', 'D', 'M', 'M', 'M', 'D', 'D', 'M', 'A', 'M', 'M', 'A', 'M', 'D', 'D', 'A', 'M', 'A', 'M', 'D', 'D', 'M', 'M', 'GK', 'D', 'M', 'D', 'M', 'D', 'D', 'D', 'A', 'A', 'M', 'M', 'M', 'A', 'D', 'A', 'D', 'M', 'D', 'M', 'A', 'A', 'D', 'M', 'A', 'A', 'D', 'A', 'GK', 'D', 'M', 'M', 'M', 'A', 'M', 'A', 'A', 'GK', 'M', 'D', 'A', 'A', 'A', 'GK', 'GK', 'M', 'M', 'D', 'D', 'M', 'M', 'D', 'GK', 'M', 'GK', 'M', 'M', 'A', 'M', 'A', 'M', 'D', 'M', 'M', 'D', 'A', 'GK', 'D', 'GK', 'M', 'D', 'A', 'M', 'D', 'D', 'D', 'A', 'D', 'M', 'M', 'M', 'A', 'M', 'A', 'M', 'D', 'M', 'A', 'A', 'D', 'M', 'M', 'M', 'M', 'A', 'D', 'M', 'A', 'D', 'D', 'A', 'D', 'M', 'M', 'D', 'M', 'A', 'M', 'M', 'A', 'A', 'M', 'A', 'GK', 'M', 'GK', 'A', 'A', 'A', 'M', 'A', 'GK', 'M', 'A', 'D', 'M', 'D', 'M', 'D', 'A', 'A', 'D', 'M', 'M', 'D', 'D', 'D', 'D', 'M', 'M', 'A', 'D', 'GK', 'M', 'M', 'A', 'GK', 'D', 'A', 'M', 'D', 'D', 'GK', 'A', 'D', 'D', 'M', 'M', 'A', 'D', 'GK', 'D', 'M', 'M', 'M', 'A', 'M', 'M', 'A', 'D', 'GK', 'GK', 'D', 'D', 'M', 'D', 'M', 'D', 'M', 'GK', 'A', 'A', 'D', 'A', 'M', 'D', 'D', 'D', 'GK', 'D', 'A', 'A', 'D', 'D', 'D', 'M', 'A', 'D', 'M', 'A', 'M', 'GK', 'A', 'M', 'A', 'D', 'D', 'M', 'D', 'D', 'A', 'M', 'M', 'D', 'D', 'D', 'D', 'M', 'M', 'A', 'A', 'D', 'D', 'M', 'GK', 'A', 'A', 'A', 'M', 'D', 'M', 'D', 'M', 'M', 'M', 'D', 'D', 'A', 'D', 'M', 'M', 'D', 'M', 'M', 'A', 'A', 'A', 'GK', 'M', 'D', 'D', 'M', 'M', 'D', 'M', 'D', 'D', 'M', 'D', 'A', 'M', 'D', 'M', 'A', 'A', 'GK', 'D', 'M', 'M', 'A', 'A', 'A', 'GK', 'A', 'M', 'M', 'M', 'A', 'D', 'M', 'D', 'A', 'A', 'A', 'M', 'A', 'A', 'M', 'GK', 'A', 'A', 'A', 'M', 'D', 'M', 'D', 'D', 'D', 'A', 'M', 'A', 'GK', 'M', 'D', 'D', 'D', 'M', 'A', 'GK', 'M', 'D', 'M', 'M', 'M', 'M', 'GK', 'D', 'M', 'A', 'D', 'D', 'D', 'A', 'GK', 'M', 'D', 'M', 'M', 'A', 'A', 'M', 'M', 'D', 'D', 'D', 'GK', 'GK', 'M', 'A', 'D', 'A', 'D', 'D', 'M', 'GK', 'D', 'M', 'A', 'M', 'A', 'M', 'D', 'D', 'M', 'A', 'D', 'M', 'D', 'D', 'M', 'M', 'D', 'M', 'A', 'A', 'D', 'M', 'A', 'M', 'M', 'GK', 'M', 'M', 'M', 'D', 'GK', 'D', 'A', 'M', 'M', 'GK', 'M', 'A', 'A', 'D', 'D', 'D', 'GK', 'M', 'M', 'A', 'M', 'M', 'M', 'D', 'M', 'A', 'M', 'D', 'D', 'A', 'M', 'GK', 'D', 'D', 'M', 'D', 'A', 'GK', 'D', 'A', 'M', 'A', 'D', 'D', 'GK', 'A', 'D', 'M', 'M', 'A', 'M', 'M', 'D', 'GK', 'M', 'M', 'D', 'GK', 'M', 'GK', 'A', 'D', 'M', 'M', 'A', 'A', 'M', 'A', 'M', 'GK', 'D', 'D', 'M', 'M', 'M', 'M', 'A', 'D', 'A', 'GK', 'D', 'D', 'D', 'M', 'GK', 'D', 'GK', 'GK', 'A', 'D', 'GK', 'GK', 'A', 'GK', 'M', 'A', 'M', 'M', 'M', 'M', 'M', 'D', 'GK', 'M', 'D', 'D', 'A', 'A', 'D', 'D', 'GK', 'M', 'A', 'M', 'M', 'D', 'M', 'D', 'D', 'A', 'M', 'M', 'M', 'D', 'A', 'D', 'GK', 'M', 'D', 'M', 'A', 'A', 'D', 'A', 'M', 'A', 'D', 'M', 'A', 'M', 'GK', 'D', 'A', 'A', 'M', 'A', 'D', 'A', 'A', 'M', 'D', 'M', 'D', 'D', 'M', 'GK', 'A', 'A', 'D', 'D', 'D', 'M', 'M', 'M', 'A', 'M', 'D', 'A', 'M', 'D', 'M', 'M', 'A', 'D', 'GK', 'M', 'A', 'D', 'M', 'A', 'D', 'M', 'M', 'D', 'M', 'D', 'A', 'M', 'A', 'M', 'GK', 'A', 'M', 'M', 'D', 'M', 'M', 'D', 'D', 'A', 'M', 'M', 'M', 'M', 'A', 'M', 'M', 'D', 'A', 'D', 'A', 'M', 'A', 'D', 'GK', 'A', 'D', 'M', 'GK', 'D', 'D', 'D', 'A', 'D', 'M', 'M', 'A', 'M', 'A', 'M', 'M', 'D', 'A', 'A', 'D', 'D', 'M', 'D', 'GK', 'A', 'D', 'A', 'D', 'M', 'D', 'D', 'D', 'GK', 'D', 'D', 'A', 'GK', 'D', 'D', 'D', 'A', 'A', 'GK', 'D', 'D', 'D', 'A', 'A', 'M', 'M', 'D', 'D', 'D', 'GK', 'A', 'D', 'M', 'D', 'A', 'D', 'M', 'D', 'A', 'D', 'M', 'A', 'A', 'D', 'A', 'M', 'M', 'A', 'D', 'A', 'A', 'M', 'D', 'GK', 'M', 'A', 'M', 'D', 'D', 'D', 'A', 'D', 'M', 'D', 'A', 'M', 'D', 'D', 'D', 'D', 'M', 'A', 'M', 'M', 'GK', 'D', 'M', 'GK', 'A', 'A', 'D', 'M', 'M', 'A', 'D', 'M', 'M', 'M', 'A', 'A', 'M', 'A', 'A', 'D', 'A', 'A', 'D', 'M', 'M', 'D', 'D', 'M', 'M', 'GK', 'M', 'D', 'D', 'M', 'GK', 'A', 'D', 'D', 'A', 'A', 'D', 'A', 'M', 'GK', 'A', 'D', 'M', 'M', 'M', 'GK', 'A', 'M', 'A', 'A', 'A', 'A', 'M', 'A', 'A', 'M', 'D', 'M', 'M', 'A', 'D', 'D', 'A', 'A', 'M', 'M', 'M', 'M', 'D', 'D', 'A', 'M', 'D', 'A', 'D', 'D', 'D', 'A', 'M', 'M', 'M', 'D', 'M', 'M', 'M', 'M', 'M', 'M', 'A', 'A', 'A', 'D', 'GK', 'M', 'M', 'M', 'A', 'A', 'M', 'M', 'M', 'A', 'M', 'M', 'GK', 'M', 'D', 'A', 'M', 'D', 'GK', 'M', 'GK', 'M', 'M', 'A', 'M', 'D', 'A', 'M', 'M', 'A', 'M', 'M', 'A', 'M', 'M', 'A', 'D', 'M', 'M', 'D', 'D', 'M', 'D', 'A', 'D', 'A', 'A', 'D', 'A', 'M', 'A', 'M', 'GK', 'A', 'M', 'M', 'A', 'D', 'D', 'A', 'A', 'A', 'A', 'A', 'D', 'M', 'D', 'M', 'M', 'GK', 'D', 'D', 'A', 'GK', 'M', 'D', 'D', 'M', 'D', 'D', 'M', 'D', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'D', 'M', 'M', 'A', 'M', 'D', 'D', 'D', 'D', 'M', 'A', 'D', 'D', 'D', 'A', 'D', 'A', 'A', 'D', 'D', 'D', 'A', 'D', 'M', 'M', 'D', 'M', 'A', 'D', 'M', 'D', 'A', 'D', 'A', 'D', 'A', 'M', 'GK', 'D', 'GK', 'M', 'D', 'M', 'A', 'D', 'A', 'D', 'A', 'M', 'D', 'GK', 'A', 'A', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'A', 'D', 'M', 'D', 'D', 'D', 'A', 'A', 'A', 'M', 'M', 'D', 'GK', 'A', 'A', 'M', 'A', 'A', 'M', 'D', 'M', 'M', 'M', 'M', 'M', 'M', 'A', 'A', 'D', 'A', 'A', 'M', 'M', 'M', 'M', 'M', 'D', 'D', 'A', 'A', 'M', 'D', 'D', 'M', 'A', 'M', 'D', 'M', 'M', 'D', 'D', 'M', 'D', 'M', 'M', 'A', 'A', 'D', 'A', 'A', 'D', 'A', 'A', 'M', 'D', 'M', 'GK', 'A', 'D', 'D', 'GK', 'GK', 'D', 'D', 'M', 'M', 'A', 'A', 'A', 'D', 'M', 'D', 'M', 'D', 'D', 'A', 'A', 'A', 'A', 'D', 'A', 'A', 'M', 'M', 'D', 'A', 'M', 'M', 'M', 'A', 'M', 'M', 'D', 'GK', 'A', 'M', 'GK', 'D', 'D', 'M', 'A', 'GK', 'M', 'M', 'M', 'D', 'M', 'M', 'M', 'M', 'A', 'M', 'GK', 'M', 'A', 'M', 'M', 'M', 'M', 'A', 'A', 'A', 'M', 'D', 'M', 'D', 'D', 'M', 'M', 'A', 'D', 'M', 'D', 'M', 'A', 'M', 'A', 'D', 'A', 'GK', 'M', 'M', 'D', 'M', 'A', 'M', 'M', 'M', 'D', 'GK', 'GK', 'D', 'M', 'D', 'A', 'M', 'A', 'GK', 'D', 'D', 'M', 'GK', 'D', 'D', 'A', 'M', 'D', 'A', 'M', 'M', 'M', 'D', 'M', 'D', 'A', 'M', 'A', 'A', 'M', 'M', 'A', 'M', 'M', 'A', 'GK', 'D', 'GK', 'D', 'A', 'D', 'M', 'GK', 'D', 'M', 'M', 'GK', 'M', 'M', 'A', 'A', 'M', 'GK', 'D', 'GK', 'A', 'D', 'M', 'A', 'D', 'A', 'A', 'A', 'A', 'M', 'A', 'D', 'A', 'A', 'GK', 'M', 'M', 'D', 'D', 'D', 'A', 'GK', 'GK', 'D', 'M', 'D', 'GK', 'M', 'GK', 'M', 'D', 'A', 'M', 'D', 'M', 'M', 'A', 'D', 'A', 'A', 'M', 'D', 'A', 'GK', 'A', 'A', 'M', 'GK', 'M', 'M', 'A', 'D', 'M', 'M', 'GK', 'D', 'M', 'M', 'M', 'M', 'A', 'D', 'GK', 'A', 'M', 'D', 'M', 'A', 'M', 'D', 'D', 'M', 'M', 'A', 'GK', 'GK', 'A', 'D', 'M', 'M', 'M', 'M', 'D', 'D', 'D', 'M', 'M', 'D', 'D', 'D', 'GK', 'D', 'D', 'M', 'D', 'D', 'D', 'M', 'M', 'A', 'A', 'M', 'A', 'GK', 'D', 'D', 'M', 'M', 'A', 'D', 'GK', 'A', 'M', 'D', 'A', 'D', 'GK', 'GK', 'M', 'D', 'A', 'M', 'D', 'A', 'A', 'M', 'M', 'D', 'D', 'D', 'A', 'GK', 'A', 'A', 'M', 'M', 'M', 'M', 'D', 'A', 'M', 'A', 'A', 'D', 'D', 'D', 'A', 'M', 'D', 'D', 'D', 'D', 'D', 'A', 'A', 'A', 'M', 'D', 'A', 'A', 'M', 'M', 'D', 'D', 'A', 'M', 'M', 'A', 'A', 'M', 'D', 'D', 'A', 'A', 'GK', 'A', 'A', 'M', 'A', 'D', 'GK', 'D', 'M', 'A', 'M', 'A', 'M', 'D', 'M', 'D', 'D', 'GK', 'M', 'D', 'A', 'M', 'D', 'D', 'M', 'A', 'D', 'M', 'M', 'D', 'D', 'D', 'A', 'D', 'D', 'M', 'M', 'M', 'A', 'GK', 'GK', 'M', 'D', 'M', 'A', 'D', 'A', 'GK', 'M', 'A', 'A', 'A', 'GK', 'M', 'M', 'M', 'M', 'M', 'D', 'M', 'GK', 'A', 'A', 'M', 'A', 'A', 'A', 'M', 'D', 'M', 'D', 'A', 'M', 'M', 'GK', 'M', 'D', 'GK', 'D', 'D', 'M', 'D', 'A', 'M', 'A', 'M', 'D', 'D', 'GK', 'D', 'D', 'M', 'D', 'M', 'A', 'M', 'D', 'GK', 'A', 'M', 'GK', 'A', 'A', 'A', 'M', 'M', 'GK', 'M', 'D', 'M', 'D', 'GK', 'GK', 'D', 'D', 'M', 'A', 'M', 'D', 'A', 'A', 'D', 'A', 'M', 'D', 'M', 'A', 'A', 'GK', 'M', 'D', 'M', 'A', 'M', 'A', 'A', 'A', 'D', 'D', 'M', 'M', 'M', 'A', 'A', 'A', 'GK', 'M', 'A', 'D', 'D', 'M', 'D', 'M', 'A', 'M', 'M', 'A', 'A', 'D', 'D', 'D', 'A', 'D', 'A', 'D', 'D', 'A', 'GK', 'D', 'M', 'M', 'GK', 'M', 'M', 'D', 'A', 'A', 'M', 'D', 'M', 'D', 'D', 'D', 'A', 'GK', 'D', 'GK', 'M', 'D', 'M', 'A', 'A', 'D', 'M', 'D', 'D', 'A', 'A', 'M', 'M', 'D', 'D', 'D', 'D', 'M', 'M', 'D', 'A', 'M', 'D', 'D', 'M', 'A', 'M', 'M', 'D', 'A', 'M', 'D', 'M', 'D', 'M', 'D', 'D', 'D', 'M', 'A', 'M', 'A', 'A', 'GK', 'A', 'D', 'M', 'A', 'D', 'GK', 'D', 'A', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'A', 'D', 'A', 'D', 'M', 'M', 'A', 'D', 'D', 'D', 'D', 'D', 'A', 'A', 'A', 'A', 'A', 'M', 'GK', 'M', 'M', 'D', 'M', 'M', 'M', 'M', 'M', 'M', 'D', 'D', 'M', 'M', 'M', 'D', 'M', 'M', 'A', 'M', 'M', 'M', 'D', 'A', 'M', 'GK', 'A', 'D', 'A', 'M', 'M', 'D', 'M', 'A', 'M', 'A', 'A', 'M', 'D', 'A', 'A', 'A', 'M', 'A', 'M', 'D', 'M', 'D', 'A', 'D', 'M', 'A', 'D', 'GK', 'D', 'D', 'A', 'A', 'A', 'M', 'M', 'A', 'M', 'M', 'M', 'A', 'D', 'D', 'A', 'A', 'GK', 'A', 'D', 'GK', 'M', 'M', 'D', 'M', 'GK', 'D', 'GK', 'D', 'M', 'A', 'M', 'A', 'M', 'A', 'D', 'D', 'D', 'D', 'D', 'GK', 'A', 'A', 'A', 'D', 'D', 'M', 'M', 'D', 'M', 'GK', 'M', 'A', 'GK', 'M', 'M', 'D', 'A', 'A', 'M', 'GK', 'D', 'A', 'A', 'M', 'A', 'D', 'GK', 'M', 'GK', 'M', 'A', 'D', 'A', 'D', 'A', 'A', 'M', 'A', 'A', 'GK', 'M', 'D', 'D', 'M', 'A', 'A', 'GK', 'D', 'M', 'M', 'M', 'A', 'A', 'A', 'D', 'GK', 'A', 'D', 'M', 'A', 'GK', 'A', 'A', 'GK', 'D', 'M', 'A', 'A', 'D', 'M', 'D', 'D', 'A', 'M', 'M', 'GK', 'D', 'D', 'A', 'A', 'M', 'D', 'A', 'A', 'M', 'D', 'M', 'A', 'D', 'D', 'D', 'M', 'M', 'D', 'D', 'M', 'M', 'A', 'D', 'A', 'M', 'D', 'D', 'M', 'M', 'A', 'A', 'D', 'M', 'M', 'D', 'A', 'A', 'M', 'D', 'D', 'D', 'M', 'GK', 'D', 'A', 'M', 'D', 'A', 'M', 'M', 'A', 'M', 'A', 'GK', 'A', 'D', 'D', 'M', 'A', 'A', 'A', 'D', 'D', 'A', 'D', 'D', 'M', 'GK', 'D', 'D', 'M', 'M', 'M', 'M', 'A', 'D', 'M', 'D', 'D', 'M', 'A', 'D', 'M', 'D', 'A', 'A', 'A', 'A', 'M', 'GK', 'M', 'A', 'A', 'D', 'D', 'M', 'M', 'M', 'A', 'D', 'A', 'GK', 'D', 'D', 'A', 'M', 'M', 'D', 'A', 'GK', 'D', 'M', 'M', 'M', 'A', 'M', 'D', 'D', 'M', 'D', 'GK', 'GK', 'D', 'D', 'M', 'D', 'M', 'M', 'M', 'M', 'M', 'D', 'M', 'M', 'D', 'M', 'M', 'GK', 'D', 'GK', 'A', 'M', 'M', 'D', 'D', 'M', 'M', 'D', 'M', 'D', 'D', 'M', 'D', 'D', 'A', 'GK', 'A', 'M', 'A', 'D', 'D', 'A', 'M', 'GK', 'D', 'A', 'D', 'A', 'M', 'D', 'A', 'M', 'GK', 'M', 'A', 'A', 'GK', 'M', 'D', 'M', 'A', 'M', 'D', 'M', 'M', 'GK', 'D', 'M', 'A', 'A', 'A', 'M', 'A', 'D', 'M', 'M', 'A', 'M', 'A', 'M', 'GK', 'M', 'A', 'D', 'M', 'A', 'M', 'D', 'GK', 'A', 'A', 'D', 'D', 'D', 'M', 'D', 'A', 'A', 'A', 'M', 'M', 'D', 'A', 'A', 'A', 'M', 'A', 'M', 'A', 'D', 'GK', 'GK', 'M', 'D', 'GK', 'A', 'A', 'M', 'M', 'D', 'D', 'M', 'M', 'GK', 'D', 'D', 'GK', 'M', 'M', 'M', 'A', 'GK', 'A', 'M', 'A', 'M', 'D', 'M', 'M', 'GK', 'M', 'A', 'GK', 'D', 'GK', 'GK', 'A', 'D', 'A', 'GK', 'M', 'M', 'A', 'GK', 'A', 'M', 'M', 'A', 'M', 'D', 'D', 'M', 'M', 'A', 'M', 'M', 'M', 'A', 'A', 'M', 'A', 'D', 'D', 'GK', 'A', 'D', 'D', 'D', 'D', 'M', 'D', 'M', 'A', 'GK', 'A', 'D', 'A', 'A', 'A', 'A', 'D', 'GK', 'D', 'M', 'D', 'M', 'A', 'D', 'A', 'M', 'D', 'GK', 'A', 'M', 'D', 'D', 'GK', 'D', 'A', 'M', 'M', 'D', 'M', 'A', 'M', 'M', 'M', 'M', 'M', 'D', 'M', 'A', 'D', 'M', 'D', 'A', 'GK', 'D', 'A', 'M', 'D', 'D', 'GK', 'A', 'M', 'M', 'D', 'D', 'M', 'D', 'D', 'D', 'GK', 'A', 'M', 'M', 'D', 'M', 'D', 'M', 'D', 'A', 'A', 'M', 'D', 'GK', 'D', 'D', 'A', 'D', 'GK', 'D', 'GK', 'M', 'M', 'GK', 'A', 'D', 'M', 'D', 'D', 'A', 'GK', 'GK', 'D', 'A', 'A', 'D', 'D', 'M', 'M', 'D', 'M', 'M', 'D', 'M', 'D', 'M', 'M', 'D', 'D', 'D', 'M', 'D', 'D', 'GK', 'GK', 'M', 'GK', 'D', 'A', 'M', 'D', 'GK', 'M', 'GK', 'D', 'M', 'D', 'M', 'D', 'D', 'A', 'D', 'M', 'M', 'M', 'D', 'M', 'M', 'D', 'D', 'A', 'D', 'A', 'A', 'M', 'D', 'A', 'M', 'D', 'D', 'D', 'D', 'M', 'D', 'D', 'A', 'M', 'D', 'M', 'A', 'M', 'D', 'M', 'M', 'M', 'A', 'M', 'D', 'GK', 'M', 'M', 'A', 'D', 'D', 'D', 'D', 'A', 'D', 'D', 'D', 'M', 'D', 'D', 'D', 'A', 'D', 'M', 'A', 'D', 'M', 'M', 'GK', 'M', 'A', 'M', 'M', 'M', 'D', 'M', 'M', 'M', 'D', 'A', 'M', 'D', 'M', 'A', 'D', 'D', 'A', 'D', 'M', 'M', 'M', 'GK', 'M', 'D', 'M', 'A', 'D', 'D', 'M', 'A', 'M', 'A', 'M', 'A', 'D', 'A', 'M', 'M', 'M', 'M', 'D', 'A', 'A', 'M', 'GK', 'A', 'D', 'D', 'M', 'D', 'M', 'D', 'M', 'D', 'M', 'A', 'M', 'D', 'M', 'M', 'D', 'M', 'D', 'A', 'A', 'D', 'A', 'D', 'GK', 'D', 'M', 'D', 'M', 'D', 'D', 'D', 'M', 'M', 'M', 'D', 'A', 'D', 'GK', 'D', 'M', 'D', 'A', 'M', 'GK', 'D', 'M', 'D', 'M', 'D', 'M', 'GK', 'GK', 'D', 'M', 'A', 'D', 'GK', 'M', 'M', 'D', 'M', 'D', 'GK', 'GK', 'D', 'A', 'M', 'GK', 'A', 'M', 'M', 'M', 'M', 'D', 'GK', 'D', 'M', 'D', 'M', 'D', 'A', 'A', 'GK', 'GK', 'A', 'D', 'A', 'GK', 'M', 'A', 'A', 'M', 'GK', 'GK', 'D', 'M', 'D', 'M', 'A', 'D', 'D', 'M', 'GK', 'D', 'A', 'M', 'M', 'M', 'M', 'A', 'D', 'GK', 'GK', 'D', 'M', 'D', 'M', 'A', 'D', 'A', 'M', 'D', 'M', 'M', 'D', 'A', 'M', 'D', 'M', 'M', 'D', 'A', 'GK', 'M', 'GK', 'D', 'D', 'D', 'GK', 'D', 'M', 'M', 'M', 'D', 'GK', 'M', 'D', 'A', 'M', 'M', 'A', 'GK', 'M', 'D', 'D', 'M', 'M', 'D', 'M', 'M', 'GK', 'A', 'M', 'A', 'A', 'A', 'GK', 'D', 'D', 'D', 'D', 'GK', 'D', 'GK', 'D', 'D', 'GK', 'M', 'D', 'D', 'M', 'D', 'M', 'GK', 'A', 'M', 'M', 'A', 'D', 'D', 'D', 'D', 'GK', 'A', 'D', 'M', 'D', 'D', 'A', 'M', 'M', 'M', 'A', 'GK', 'M', 'D', 'M', 'D', 'M', 'M', 'GK', 'D', 'GK', 'A', 'D', 'GK', 'M', 'M', 'M', 'M', 'M', 'A', 'M', 'D', 'M', 'D', 'D', 'GK', 'M', 'D', 'GK', 'M', 'D', 'D', 'M', 'D', 'GK', 'A', 'A', 'D', 'D', 'M', 'M', 'M', 'M', 'D', 'A', 'M', 'M', 'D', 'A', 'GK', 'D', 'D', 'D', 'M', 'GK', 'D', 'D', 'D', 'M', 'M', 'D', 'M', 'M', 'D', 'D', 'M', 'D', 'M', 'A', 'M', 'M', 'D', 'M', 'D', 'D', 'GK', 'A', 'M', 'GK', 'M', 'M', 'D', 'M', 'GK', 'M', 'D', 'A', 'D', 'M', 'D', 'M', 'A', 'A', 'M', 'M', 'M', 'M', 'D', 'D', 'D', 'A', 'D', 'A', 'D', 'A', 'A', 'D', 'GK', 'A', 'D', 'A', 'D', 'A', 'A', 'D', 'M', 'M', 'D', 'M', 'A', 'M', 'D', 'M', 'M', 'A', 'A', 'D', 'GK', 'A', 'M', 'M', 'D', 'M', 'D', 'D', 'D', 'GK', 'D', 'GK', 'A', 'M', 'M', 'D', 'A', 'D', 'M', 'D', 'D', 'D', 'A', 'D', 'M', 'M', 'M', 'D', 'GK', 'M', 'M', 'M', 'M', 'M', 'D', 'D', 'GK', 'M', 'M', 'A', 'A', 'M', 'A', 'M', 'D', 'M', 'A', 'M', 'D', 'A', 'D', 'D', 'M', 'A', 'M', 'D', 'M', 'M', 'GK', 'GK', 'GK', 'A', 'A', 'M', 'A', 'M', 'A', 'M', 'GK', 'D', 'GK', 'M', 'GK', 'M', 'D', 'D', 'A', 'GK', 'D', 'M', 'D', 'M', 'A', 'A', 'D', 'D', 'A', 'D', 'M', 'A', 'M', 'M', 'D', 'M', 'M', 'M', 'A', 'M', 'GK', 'D', 'GK', 'M', 'D', 'A', 'M', 'D', 'D', 'D', 'A', 'M', 'GK', 'A', 'A', 'D', 'M', 'A', 'GK', 'D', 'D', 'A', 'M', 'D', 'M', 'D', 'D', 'D', 'D', 'D', 'A', 'M', 'M', 'M', 'M', 'GK', 'D', 'A', 'A', 'GK', 'A', 'M', 'D', 'D', 'D', 'GK', 'D', 'M', 'GK', 'M', 'A', 'M', 'GK', 'M', 'A', 'M', 'A', 'D', 'M', 'D', 'A', 'D', 'A', 'GK', 'D', 'A', 'M', 'GK', 'M', 'GK', 'D', 'GK', 'D', 'M', 'M', 'D', 'D', 'M', 'M', 'D', 'A', 'A', 'M', 'D', 'M', 'A', 'D', 'D', 'GK', 'M', 'D', 'A', 'D', 'M', 'GK', 'D', 'GK', 'M', 'D', 'D', 'M', 'D', 'A', 'D', 'M', 'D', 'GK', 'M', 'GK', 'A', 'M', 'GK', 'GK', 'D', 'D', 'A', 'D', 'M', 'GK', 'D', 'A', 'D', 'A', 'A', 'M', 'A', 'A', 'M', 'D', 'M', 'A', 'GK', 'D', 'M', 'D', 'M', 'GK', 'A', 'A', 'GK', 'A', 'A', 'A', 'D', 'D', 'A', 'GK', 'M', 'A', 'M', 'M', 'D', 'D', 'M', 'M', 'M', 'D', 'D', 'M', 'M', 'D', 'M', 'M', 'D', 'M', 'D', 'M', 'GK', 'GK', 'M', 'M', 'M', 'A', 'A', 'M', 'D', 'A', 'M', 'M', 'GK', 'M', 'A', 'A', 'A', 'D', 'D', 'M']
# Câu 1:
# a) Tạo numpy array np_positions từ list positions
np_positions = np.array(positions)
# In danh sách các phần tử của np_positions
print(np_positions)
# Xem kiểu dữ liệu (type) của np_positions
print(type(np_positions))
# b) Tạo numpy array np_heights từ list heights
np_heights = np.array(heights)
# In danh sách các phần tử của np_heights
print(np_heights)
# Xem kiểu dữ liệu (type) của np_heights
print(type(np_heights))
###Output
['GK' 'M' 'A' ... 'D' 'D' 'M']
<class 'numpy.ndarray'>
[191 184 185 ... 183 179 179]
<class 'numpy.ndarray'>
###Markdown
Nhấn vào đây để xem kết quả ! <class 'numpy.ndarray'>['GK' 'M' 'A' ... 'D' 'D' 'M']<class 'numpy.ndarray'>[191 184 185 ... 183 179 179]
###Code
# Câu 2: Tính chiều cao trung bình của các GK (Goal Keeper).
# condition = np.where(np_positions == 'GK') return indexes of players whose position is GK
print (np.median(np_heights[np.where(np_positions == 'GK')]))
# Câu 3: Tính chiều cao trung bình của những vị trí khác (Không phải là Goal Keeper).
print(np.median(np_heights[np.where(np_positions != 'GK')]))
# Câu 4: Tạo mảng dữ liệu có cấu trúc tự định nghĩa players gồm 'position' kiểu văn bản (U5) và 'height' kiểu 'float'
print(np_heights.shape)
dt = np.dtype({'names': ('position', 'height'), 'formats': ('U5', 'float')})
np_players = np.zeros(np_positions.shape, dtype=dt)
np_players['position'] = positions
np_players['height'] = heights
print(np_players)
# Câu 5: Sắp mảng players theo height, cho biết vị trí có chiều cao cao nhất và chiều cao thấp nhất
print(np.sort(np_players, order='height'))
print('Max height: ', np.amax(np_players['height']))
print('Min height: ', np.amin(np_players['height']))
###Output
[('M', 158.) ('A', 160.) ('M', 160.) ... ('A', 203.) ('GK', 203.)
('GK', 208.)]
Max height: 208.0
Min height: 158.0
|
demo_nnm.ipynb | ###Markdown
MNIST with DNN (Sequential)
###Code
mnist <- LoadMnist()
train <- mnist$train
test <- mnist$test
layerSpec <- Sequential(
Dense(784, 128),
Dropout(128, keepProb=0.8),
Dense(128, 10, Activation.Identity),
Softmax)
layerSpec
modTime <- system.time(
mod <- nnm(train$x, train$y, layerSpec, verbose=1)
)
print(modTime)
# accuracy on test set
cat("accuracy = ", mean(test$y == predict(mod, test$x, type="label")), "\n")
###Output
accuracy = 0.9692
###Markdown
MNIST with Directed acyclic graph (DAG)We demo a simple DAG with residual connections. This DAGhas similar performance to the above full-connection graphbut with only about half parameters.
###Code
layers <- list(
Dense(784, 64),
Dropout(64, keepProb=0.8),
Dense(64, 32),
Dense(32, 16),
Dense(48, 10, Activation.Identity),
Softmax(10))
edges <- c(1, 2,
2, 3,
3, 4,
4, 5,
3, 5,
5, 6)
dag <- DAG(layers, edges)
dag
modTime2 <- system.time(
dagMod <- nnm(train$x, train$y, dag, verbose=1)
)
print(modTime2)
# accuracy on test set
cat("accuracy = ", mean(test$y == predict(dagMod, test$x, type="label")), "\n")
###Output
accuracy = 0.9653
###Markdown
Demo of embedding columns
###Code
n <- 1000
x <- data.frame(x1 = rnorm(n),
x2 = sample(letters, size=n, replace=TRUE),
x3 = sample(letters, size=n, replace=TRUE))
y <- x$x1 + x$x2 %in% c("a", "d") + rnorm(n)
embeddingCols <- c("x2" ,"x3")
embeddingDims <- c(2, 4)
layerSpecs <- list(Dense(1 + sum(embeddingDims), 2), Dense(2, 1, Activation.Identity))
mod2 <- nnm(x, y, layerSpecs, embeddingCols, embeddingDims)
mod2
cor(y, predict(mod2, x))
###Output
_____no_output_____ |
notebooks/ngram_model.ipynb | ###Markdown
Learning a Predictive N-Gram ModelThis notebook demonstrates how to use a Markov model to predict the next word in a text of the legal domain. Specifically, we model the language used in [German cases](https://de.wikipedia.org/wiki/Urteil_(Recht)). The focus lies on showing how data from the [Open Legal Data Project](https://openlegaldata.io) can be used to do machine learning._Note_: This demo is not about building the best predictive model for the legal domain, and not about building a competitive n-gram implementation. We use a simple fixed-order n-gram implementation without escaping, smoothing or exclusion techniques. InstallationInstall all repo requirements by running:```pipenv --python 3.7pipenv install```To install this environment as a Jupyter Notebook kernel run:```pipenv run python -m ipykernel install --name oldp-notebook``` ObtainWe obtain the training (and test) data using the [OLDP SDK for Python](https://github.com/openlegaldata/oldp-sdk-python). For a more detailed example about the API client usage refer to the [OLDP Client Demo](https://github.com/openlegaldata/oldp-notebooks/blob/master/notebooks/oldp-client-demo.ipynb) notebook.
###Code
import oldp_client
conf = oldp_client.Configuration()
conf.api_key['api_key'] = '123abc' # Replace this with your API key
api_client = oldp_client.ApiClient(conf)
cases_api = oldp_client.CasesApi(api_client)
cases = cases_api.cases_list(court=2, page_size=10).results # first page for court=Europäischer Gerichtshof
###Output
_____no_output_____
###Markdown
CleanThe raw data that we obtain from the API is in the HTML format. Before we can tokenize the text we have to clean it from the HTML tags and some special characters.
###Code
from utils import preprocessing
def clean(content):
content = preprocessing.remove_pattern(content, r'\n|\t', replace_with=' ')
content = preprocessing.remove_pattern(content, r'<[^>]+>')
content = preprocessing.replace_html_special_ents(content)
content = preprocessing.remove_whitespace(content)
return content
text = ''
for case in cases:
text += clean(case.content)
print("Before: ...{}...".format(cases[0].content[0:100]))
print("After: ...{}...".format(text[0:100]))
###Output
Before: ...<h2>Tenor</h2>
<div>
<p>Als funktional zuständig wird die allgemeine Zivilkammer be...
After: ...Tenor Als funktional zuständig wird die allgemeine Zivilkammer bestimmt. Gründe I. Die in München an...
###Markdown
Explore
###Code
import spacy
import collections
import numpy as np
np.random.seed(0)
class Corpus:
def __init__(self, text, test_percentage=0.1):
self.test_percentage = test_percentage
# use spacy NLP to do the tokenization and sentence boundary detection
nlp = spacy.load('de_core_news_sm')
self.doc = nlp(text)
def get_words(self):
for token in self.doc:
yield token.text
def get_sentences(self, test=False):
for sent in self.doc.sents:
# split into training and test sentences, according to the given percentage
if (np.random.random() >= self.test_percentage and not test) or \
(np.random.random() < self.test_percentage and test):
yield sent
def get_ngrams(self, n, test=False):
for sent in self.get_sentences(test=test):
if len(sent) < 10:
continue
for pos in range(len(sent)):
if len(sent)-pos < n:
break
yield (*[sent[pos+i].text for i in range(n)],)
def print_most_common(n):
counter = collections.Counter(corpus.get_ngrams(n))
print('\nThe most common {}-grams:'.format(n))
for k, v in counter.most_common(5):
print('{}: {}'.format(k, v))
corpus = Corpus(text)
print('Number of words in corpus: ', len(list(corpus.get_words())))
print('Number of training sentences in corpus: ', len(list(corpus.get_sentences())))
print('Number of test sentences in corpus: ', len(list(corpus.get_sentences(test=True))))
print('Size of alphabet:', len(set(corpus.get_words())))
print_most_common(1)
print_most_common(3)
print_most_common(5)
###Output
Number of words in corpus: 30282
Number of training sentences in corpus: 1673
Number of test sentences in corpus: 192
Size of alphabet: 5270
The most common 1-grams:
(',',): 1225
('.',): 1008
('der',): 882
('die',): 666
('des',): 407
The most common 3-grams:
(',', 'dass', 'die'): 35
('Abs.', '1', 'Satz'): 32
(',', 'dass', 'der'): 22
('1', 'Satz', '1'): 21
('§', '11', 'Abs.'): 18
The most common 5-grams:
('§', '124', 'Abs.', '2', 'Nr.'): 13
('Abs.', '5', 'Satz', '1', 'VwGO'): 8
('§', '11', 'Abs.', '2a', 'TierSchG'): 8
(',', 'juris', ',', 'Rn', '.'): 7
('vom', '19', '.', 'März', '2018'): 7
###Markdown
Learning a Model
###Code
class NgramModel:
def __init__(self, n=3):
self.n = n
self.ngrams = None
self.alphabet = None
def learn(self, corpus):
self.ngrams = collections.Counter(corpus.get_ngrams(self.n))
self.alphabet = set(corpus.get_words())
def predict(self, context):
if len(context) < self.n - 1:
raise ValueError('The context has to be at least of length {}!'.format(self.n - 1))
if len(context) >= self.n:
context = context[-self.n + 1:]
matches = {}
for word in self.alphabet:
count = self.ngrams[tuple(context) + (word,)]
if count > 0:
matches[word] = count
total_count = sum(matches.values(), 0.0)
return {k: v / total_count for k, v in matches.items()}
def predict_str(self, context_str):
nlp = spacy.load('de_core_news_sm')
context = [token.text for token in nlp(context_str)]
return self.predict(context)
corpus = Corpus(text)
model = NgramModel(n=3)
model.learn(corpus)
model.predict(['der', 'Europäischen'])
###Output
_____no_output_____
###Markdown
InterpretWe can use the predictive model to guess the next word in a sentence with legal content. This could be used as an autocompletion feature in a legal text editor.To compare the performance of several fixed-order models, we use cross entropy as a measure. We see that out of the tested values, n=10 has the best test performance. However, presumably due to the training dataset being too small, only about 12% of the contexts could be completed (if a context was not seen in the training data the implemented algorithm does not make a prediction). It seems likely, that the good performance especially with higher _n_ is caused by a large amount of set phrases (or tokens) in this domain.
###Code
d = model.predict_str('Am 23. Dezember 2006 nahm der Sicherheitsrat der Vereinten Nationen (im')
pred_next_word = max(d.keys(), key=lambda key: d[key])
pred_next_word
def eval(n):
corpus = Corpus(text)
model = NgramModel(n=n)
model.learn(corpus)
print('\nN={}:'.format(n))
print('Training cross ent: {} (count={})'.format(*cross_ent(model, corpus, n)))
print('Test cross ent: {} (count={})'.format(*cross_ent(model, corpus, n, test=True)))
def cross_ent(model, corpus, n, test=False):
cross_ent = 0.0
count = 0
for ngram in corpus.get_ngrams(n, test=test):
context = ngram[0:n-1]
pred = ngram[n-1]
distr = model.predict(context)
# only count ngrams that occurred in the training data
if pred in distr:
cross_ent -= np.log2(distr[pred])
count += 1
cross_ent /= count
return cross_ent, count
eval(2)
eval(3)
eval(5)
eval(10)
###Output
N=2:
Training cross ent: 3.6120551673497037 (count=23229)
Test cross ent: 3.5750781588445535 (count=2215)
N=3:
Training cross ent: 0.8136898275785924 (count=21265)
Test cross ent: 0.7985905308149392 (count=2364)
N=5:
Training cross ent: 0.07329458669651902 (count=19292)
Test cross ent: 0.08784666850808225 (count=2149)
N=10:
Training cross ent: 0.005888186456617001 (count=14754)
Test cross ent: 0.0020242914979757085 (count=1482)
|
spatial_statistics_demo.ipynb | ###Markdown
Set-up
###Code
%matplotlib inline
%load_ext autoreload
%autoreload 2
import matplotlib.pyplot as plt
from scipy.ndimage.filters import convolve
from scipy.ndimage.filters import gaussian_filter
import numpy as np
import time
el = 151 # number of elements per side in the microstructure
H = 2 # number of phases in the microstructure
vf = .4 # volume fraction of phase 0
iA = 0 # phase A of correlation
iB = 0 # phase B of correlation
###Output
_____no_output_____
###Markdown
Generate Microstructure
###Code
base = np.random.random((el, el))
r2a = np.random.randint(2, 20)
r2b = np.random.randint(2, 20)
weights = np.random.random(size=(r2a, r2b))
raw = convolve(base, weights, mode='wrap')
blur = gaussian_filter(raw, sigma=1)
amin = blur.min()
amax = blur.max()
scaled = (blur-amin)/(amax-amin)
micr = scaled > vf
plt.figure(figsize=[5, 4])
ax = plt.imshow(micr, origin='lower',
interpolation='none', cmap='gray')
plt.colorbar(ax)
plt.title('microstructure')
plt.show()
###Output
_____no_output_____
###Markdown
Compute Microstructure-Function
###Code
mf = np.zeros((H, el, el))
for h in xrange(H):
mf[h, ...] = micr[...] == h
frac = np.sum(mf[0, ...])/np.float32(mf[0, ...].size)
print "volume fraction phase 0: %s" % np.round(frac, 2)
plt.figure(figsize=[10, 4])
plt.subplot(121)
ax = plt.imshow(mf[0, ...], origin='lower',
interpolation='none', cmap='gray')
plt.colorbar(ax)
plt.title('mf[0, :, :]')
plt.subplot(122)
ax = plt.imshow(mf[1, ...], origin='lower',
interpolation='none', cmap='gray')
plt.colorbar(ax)
plt.title('mf[1, :, :]')
plt.show()
###Output
_____no_output_____
###Markdown
Calculate 2-pt Spatial Statistics: Naive Approach
###Code
st = time.time()
ff_v1 = np.zeros((el, el), dtype='float32')
S = np.float32(el**2)
set1 = np.float32(mf[iA, ...])
set2 = np.float32(mf[iB, ...])
for ii in xrange(el):
for jj in xrange(el):
tmp = np.roll(set2, shift=ii, axis=0)
tmp = np.roll(tmp, shift=jj, axis=1)
ff_v1[ii, jj] = np.sum(set1*tmp)/S
timeT = np.round(time.time()-st, 5)
print "correlation computed: %s s" % timeT
###Output
_____no_output_____
###Markdown
Calculate 2-pt Spatial Statistics: FFT Approach
###Code
st = time.time()
M = np.zeros((H, el, el), dtype='complex128')
for h in xrange(H):
M[h, ...] = np.fft.fftn(mf[h, ...], axes=[0, 1])
S = el**2
M1 = M[iA, ...]
mag1 = np.abs(M1)
ang1 = np.arctan2(M1.imag, M1.real)
exp1 = np.exp(-1j*ang1)
term1 = mag1*exp1
M2 = M[iB, ...]
mag2 = np.abs(M2)
ang2 = np.arctan2(M2.imag, M2.real)
exp2 = np.exp(1j*ang2)
term2 = mag2*exp2
FFtmp = term1*term2/S
ff_v2 = np.fft.ifftn(FFtmp, [el, el], [0, 1]).real
timeT = np.round(time.time()-st, 5)
print "correlation computed: %s s" % timeT
###Output
_____no_output_____
###Markdown
Compare Spatial-Statistics
###Code
plt.figure(figsize=[11, 4])
plt.subplot(121)
ff_c = np.fft.fftshift(ff_v1)
ax = plt.imshow(ff_c, origin='lower',
interpolation='none', cmap='gray')
plt.colorbar(ax)
plt.title('Correlation (Naive method): %s and %s' % (iA, iB))
plt.subplot(122)
ff_c = np.fft.fftshift(ff_v2)
ax = plt.imshow(ff_c, origin='lower',
interpolation='none', cmap='gray')
plt.colorbar(ax)
plt.title('Correlation (FFT method): %s and %s' % (iA, iB))
plt.show()
###Output
_____no_output_____ |
modal_mapping/plot_NRJ-n-ModScat_any.ipynb | ###Markdown
plot_NRJ-n-ModScat_anyplot various diags for checking robustness of modal decomposition and scattering diagnostics (eg compare NRJ balance terms, ...)for comparison with btrop/bclin NRJ decomposition, see notebook plot_BclinNRJ_evol.ipynb (in NRJ_flux_diags/) what is shown here* balance terms* barotropic-> baroclinic terms, from modal decomp and btrop/bclin decomposition, linear theory and simulation
###Code
%matplotlib notebook
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
import matplotlib.colors as colors
from mpl_toolkits.basemap import Basemap
from matplotlib.animation import FuncAnimation
from mpl_toolkits.axes_grid.inset_locator import inset_axes
import numpy as np
import sys, os
from netCDF4 import Dataset, MFDataset
from datetime import datetime
import scipy.signal as sig
from scipy.ndimage import gaussian_filter
import scipy.interpolate as itp
from PIL import Image, ImageDraw
import json
import pandas as pd
KRYPTON = "/data0/project/vortex/lahaye/"
RUCHBA = KRYPTON+"local_ruchba/"
simul = "luckyt"
if simul in ['luckyt']:
season = "_win"
app = ""
else:
season = ""
app = "-b"
app += season
grid_file = KRYPTON+"/lucky_corgrd.nc"
doms_file = "../NRJ_flux_diag/subdomains_lucky.json"
dirpic = 'pictures/scatdiag_process/'
dosavefig = True
# modal stuff
filscat = KRYPTON+'{0}_modemap/{0}_mode_scatdiag{1}.nc'.format(simul,app)
filcsv = "./{0}_diagscat{1}.csv".format(simul,app)
data_Fa14 = KRYPTON+"Tide_Conv/Falahat_etal_2014_ModalConvM2.nc"
# diag NRJ
filNRJ = "../NRJ_flux_diag/{0}_NRJ_diags.pkl".format(simul)
with open(doms_file, "r") as fp:
mydoms = json.load(fp)
# unfold subdomains
doms, nams = [], []
for key,val in mydoms.items():
if key == "ridges":
for ido,dom in enumerate(val):
doms.append(dom)
nams.append(key.rstrip("s")+str(ido+1))
else:
doms.append(val)
nams.append(key)
# load dataframe(s)
#datfra = pd.read_csv(filcsv, header=[0,1], index_col=0)#, converters={'Cmn': eval})
datfra = pd.read_pickle(filcsv.replace("csv","pkl"))
nmod = len(datfra)
datnrj = pd.read_pickle(filNRJ)
# load netCDF file
nc = Dataset(filscat, "r")
print("variables in {}:".format(filscat),nc.variables.keys())
nc.close()
rho0 = 1025
(datfra.KE/datfra.PE).iloc[1:].plot(marker="x")
plt.grid(True)
plt.legend(ncol=2)
# load data time series
nc = Dataset(filscat, "r")
time = nc.variables['time'][:]
ketser = nc.variables['KEtser'][:]
nc.close()
modes = np.arange(ketser.shape[1])
### plot modal NRJ variations
fig, axs = plt.subplots(3, 1, sharex=True)
# max variation
data = (ketser.max(axis=0)-ketser.min(axis=0))/(time[-1]-time[0])*1e3/3600
ax = axs[0]
ax.bar(np.arange(nmod), data)
ax.set_ylabel(r'max $\Delta K_h/\Delta t$ [J/m^2/s]')
ax.text(.95,.95,"max-min scaled by time period", transform=ax.transAxes, va="top", ha="right")
# relative variation
data /= ketser.mean(axis=0)*1e3
ax = axs[1]
ax.bar(np.arange(nmod), data)
ax.set_ylabel(r'rel $\Delta K_h/\Delta t$ [1/s]')
ax.text(.75,.95,"(max-min)/mean scaled by time period", transform=ax.transAxes, va="top", ha="right")
# whole time series tendency
data = (ketser[-1,:]-ketser[0,:])/(time[-1]-time[0])*1e3/3600
ax = axs[2]
ax.bar(np.arange(nmod), data)
ax.set_ylabel(r'tot $\Delta K_h$ [J/m^2/s]')
ax.text(.95,.95,"(end-beg) scaled by time period", transform=ax.transAxes, va="top", ha="right")
for ax in axs:
ax.grid(True)
ax.ticklabel_format(style='sci',scilimits=(-2,3),axis="y")
if dosavefig:
fig.savefig(dirpic+"{0}_modeNRJ_bar{1}.pdf".format(simul,app), magnification="auto", bbox_inches="tight")
# compare with mean flux divergence and Cbc, Cbt
nc = Dataset(filscat, "r")
divf= nc.variables['divf_full'][:].mean(axis=0)
cbtr = nc.variables['Cmn_tser'][0,:,:].mean(axis=-1)
cbcl = np.nanmean(nc.variables['Cbcl'][:], axis=(-1,-2))
nc.close()
fig, bxs = plt.subplots(2, 2, sharex=True, sharey=True)
axs = bxs.ravel()
#plot relative mean divf
data = abs(divf)/(ketser.mean(axis=0)*1e3)
ax = bxs[0,0]
ax.bar(modes[:], data[:], log=True)
ax.text(.5, .95, r'$|\nabla F_n|$', ha="center", va="top", transform=ax.transAxes)
data = (ketser.max(axis=0)-ketser.min(axis=0))/(time[-1]-time[0])/3600./ketser.mean(axis=0)
ax = bxs[1,0]
ax.bar(modes[:], data[:], log=True)
data = abs(cbtr)/(ketser.mean(axis=0)*1e3)
ax.text(.5, .95, r'$|\Delta K_h|$', ha="center", va="top", transform=ax.transAxes)
ax = bxs[0,1]
ax.bar(modes[:], data[:], log=True)
ax.text(.5, .95, '|C btrop|', ha="center", va="top", transform=ax.transAxes)
data = abs(cbcl)/(ketser.mean(axis=0)*1e3)
ax = bxs[1,1]
ax.bar(modes[:], data[:], log=True)
ax.text(.5, .95, '|C bclin|', ha="center", va="top", transform=ax.transAxes)
for ax in axs:
ax.grid(True)
ax.set_ylim([1e-8, 1e-5])
for ax in bxs[1,:]:
ax.set_xlabel('mode number')
fig.suptitle(r'Full domain, time-averaged terms of modal equation (/$K_h$) [s$^-1$]')
if dosavefig:
fig.savefig(dirpic+"{0}_modeNRJ_termsBal{1}.pdf".format(simul,app), \
magnification="auto", bbox_inches="tight")
# time series of total btrop conversion and btrop & bclin flux divergence
if simul in ['luckyt']:
limyW = [0, 2e-2]
else:
limyW = [0, 7e-3]
nc = Dataset(filscat, "r")
divfbc = nc.variables['divf_full'][:,1:].sum(axis=-1)
divfbt = nc.variables['divf_full'][:,0]
foutbc = nc.variables['divf_out'][:,1:].sum(axis=-1)
foutbt = nc.variables['divf_out'][:,0]
kebcl = nc.variables['KEtser'][:,1:].sum(axis=-1)
pebcl = nc.variables['PEtser'][:,1:].sum(axis=-1)
kebtr = nc.variables['KEtser'][:,0]
pebtr = nc.variables['PEtser'][:,0]
cbtr = nc.variables['Cmn_tser'][0,1:,:].sum(axis=0)
nc.close()
fig, axs = plt.subplots(2, 1, sharex=True)
ax = axs[0]
ax.plot(time, foutbc, label=r"$F_{bc}$ out")
ax.plot(time, divfbc, label=r"$\nabla F_c$")
ax.plot(time, foutbt, "--", label=r"$F_t$ out")
ax.plot(time, divfbt, "--", label=r"$\nabla F_t$")
ax.plot(time, cbtr, "k", label="C_t")
ax.set_ylim(limyW)
ax.legend(ncol=2)
ax = axs[1]
ax.plot(time, kebcl, label=r"$K_c$")
ax.plot(time, pebcl, label=r"$P_c$")
ax.plot(time, kebtr, "--", label=r"$K_t$")
ax.plot(time, pebtr, "--", label=r"$P_t$")
ax.set_ylim([0, 3])
ax.legend(ncol=2)
for ax in axs:
ax.grid(True)
if dosavefig:
fig.savefig(dirpic+"{0}_modeNRJ_evol{1}.pdf".format(simul,app), \
magnification="auto", bbox_inches="tight")
###Output
_____no_output_____
###Markdown
Conversion termcompare linear calculation vs. numerical simulation, modal vs. btrop/bclin decomposition* linear theory, btrop/bclin from Nycander conv* linear theory, modal decomposition from Falahat & Nycander 2014
###Code
imod = 1
nc = Dataset(filscat, "r")
cbt = nc.variables['Cmn'][0,imod,:,:]*1e3
cbc = nc.variables['Cbcl'][imod,:,:]*1e3
nc.close()
blurit = lambda x: gaussian_filter(x, sigma=5, mode="reflect")
vamp = 10
fig, axs = plt.subplots(1, 2, sharex=True, sharey=True)
axs[0].pcolormesh(blurit(cbt), vmin=-vamp, vmax=vamp, cmap="seismic")
axs[1].pcolormesh(blurit(cbc), vmin=-vamp, vmax=vamp, cmap="seismic")
for ax in axs:
ax.set_aspect(1)
###Output
/home/lahaye/Coding/virtual_envs/py3-jhub/lib/python3.5/site-packages/ipykernel_launcher.py:3: RuntimeWarning: invalid value encountered in greater
This is separate from the ipykernel package so we can avoid doing imports until
/home/lahaye/Coding/virtual_envs/py3-jhub/lib/python3.5/site-packages/ipykernel_launcher.py:4: RuntimeWarning: invalid value encountered in greater
after removing the cwd from sys.path.
###Markdown
Dissipation
###Code
nc = Dataset(filscat, "r")
diss = nc.variables['']
###Output
_____no_output_____ |
06_Scikit-HEP_particles-decays-units.ipynb | ###Markdown
Particles, decays, HEP units **Quick intro to the following packages**- `hepunits` - the HEP system of units.- `Particle` - PDG particle data, MC identification codes, and more.- `DecayLanguage` - Decay files (notably for EvtGen), universal description of decay chains. hepunits - The HEP system of unitsThe package ``hepunits`` collects the most commonly used units and constants in theHEP System of Units, which are *not* the same as the international system of units (aka SI units).The HEP system of units is based on the following:| Quantity | Name | Unit|| ------------------ :| ----------------- :| -- :|| Length | millimeter | mm || Time | nanosecond | ns || Energy | Mega electron Volt| MeV || Positron charge | eplus | || Temperature | kelvin | K || Amount of substance| mole | mol || Luminous intensity | candela | cd || Plane angle | radian | rad || Solid angle | steradian | sr |Note: no need to make use of sophisticated packages (e.g. as in AstroPy) since we basically never need to change systems of units (we never use ergs as energy, for example ;-)). **Basic usage is straightforward, though it may be confusing at first. Remember, all variables are written wrt to the units:**
###Code
from hepunits import mm, ns, MeV, eplus, GeV, kelvin, mol, cd, rad, sr
mm == ns == MeV == eplus == kelvin == mol == cd == rad == sr == 1
GeV == 1000*MeV
###Output
_____no_output_____
###Markdown
Add two quantities with different length units:
###Code
from hepunits import units as u
1*u.meter + 5*u.cm
###Output
_____no_output_____
###Markdown
Indeed, the result is in HEP units, so mm. Rather obtain the result in meters:
###Code
(1*u.meter + 5*u.cm) / u.meter
###Output
_____no_output_____
###Markdown
Do you need to play a bit more to get a proper feeling? This next (non-academic) exercise should help you ... **Quick time-of-flight study**Let's try to play with units in a meaningful way, in a kind of exercise that physicists encounter. Imagine you are investigating time-of-flight (ToF) detectors for particle identification. The time it takes a particle of velocity $\beta = v/c= pc/E$ to travel a distance $L$ is given by$$\mathrm{ToF} = \frac{L}{c \beta}$$It results that the mass $m$ of the particle can be determined from$$m = \frac{p}{c}\sqrt{\frac{c^2 \mathrm{ToF}^2}{L^2}-1}$$provided the path length and the momentum can be measured, say, by a tracking system. What are typical ToF differences say for (charged) kaons and pions?It is practical to perform the calculation as$$\Delta \mathrm{ToF} = \frac{L}{c}(\frac{1}{\beta_1} - \frac{1}{\beta_2})\,,$$with $\frac{1}{\beta} = \sqrt{1+m^2c^2/p^2}$.
###Code
from hepunits import c_light, GeV, meter, ps, ns
import numpy as np
def ToF(m, p, L):
"""Time-of-Flight = particle path length L / (c * beta)"""
# No c factors here because physicists give m and p without them, hence the c's cancel out, effectively ;-).
one_over_beta = np.sqrt(1 + m*m/(p*p))
return (L * one_over_beta /c_light)
###Output
_____no_output_____
###Markdown
For convenience, get hold of data information for the proton, $K^+$ and $\pi^+$ (see `Particle`package down this notebook):
###Code
from particle.particle.literals import proton, pi_plus, K_plus # particle name literals
###Output
_____no_output_____
###Markdown
Calculate the difference in ToF between 10 GeV kaons and pions travelling over 10 meters:
###Code
delta = ( ToF(K_plus.mass, 10*GeV, 10*meter) - ToF(pi_plus.mass, 10*GeV, 10*meter) ) / ps
print("At 10 GeV, Delta-TOF(K-pi) over 10 meters = {:.5} ps".format(delta))
###Output
At 10 GeV, Delta-TOF(K-pi) over 10 meters = 37.374 ps
###Markdown
Let's get a bit fancier:- Compare protons, kaons and pions for a 1-meter path length.- Look at the ToF difference versus momentum.Other plotting tools (from HEP, actually) will be presented later on. For now let's just use the standard `matplotlib` library.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
p = np.arange(0.5, 5.1, 0.05) * GeV
# Calculate all the delta-ToF in picoseconds
delta1 = ( ToF(K_plus.mass, p, 1.*meter) - ToF(pi_plus.mass, p, 1.*meter) ) / ps
delta2 = ( ToF(proton.mass, p, 1.*meter) - ToF(K_plus.mass, p, 1.*meter) ) / ps
delta3 = ( ToF(proton.mass, p, 1.*meter) - ToF(pi_plus.mass, p, 1.*meter) ) / ps
fig, ax = plt.subplots()
ax.plot(p/GeV, delta1, label='K-$\pi$')
ax.plot(p/GeV, delta2, label='p-K')
ax.plot(p/GeV, delta3, label='p-$\pi$')
ax.set(xlabel='p [GeV]', ylabel='$\Delta$ ToF [ps]',
title='Time-of-flight difference for a 1-meter path')
ax.grid()
plt.legend()
plt.ylim(bottom=0, top=500)
plt.show()
###Output
_____no_output_____
###Markdown
PDG particle data, MC identification codes **Pythonic interface to**- Particle Data Group (PDG) particle data table.- Particle MC identification codes, with inter-MC converters.- With various extra goodies. Package motivation - particle data- The [PDG](http://pdg.lbl.gov/) provides a downloadable table of particle masses, widths, charges and Monte Carlo particle ID numbers (PDG IDs). - Most recent file [here](http://pdg.lbl.gov/2020/html/computer_read.html).- It also provided an experimental file with extended information(spin, quark content, P and C parities, etc.) until 2008 only, see [here](http://pdg.lbl.gov/2008/html/computer_read.html) (not widely known!).- But anyone wanting to use these data, the only readily available,has to parse the file programmatically.- Why not make a Python package to deal with all these data, for everyone? Package motivation - MC identification codes- The C++ HepPID and HepPDT libraries provide functions for processing particle ID codesin the standard particle (aka PDG) numbering scheme.- Different event generators may have their separate set of particle IDs: Geant3, etc.- Again, why not make a package providing all functionality/conversions, Python-ically, for everyone? Package, in short- Particle - loads extended PDG data tables and implements search and manipulations / display.- PDGID - find out as much as possible from the PDG ID number. No table lookup.- Converters for MC IDs used in Pythia and Geant.- Basic usage via the command line.- Fexible / advanced usage programmatically. **1. `PDGID` class and MC ID classes**- Classes `PDGID`, `PythiaID`, `Geant3ID`.- Converters in module `particle.converters`: `Geant2PDGIDBiMap`, etc. PDG IDs module overview- Process and query PDG IDs, and more – no look-up table needed. - Current version of package reflects the latest version of the HepPID & HepPDT utility functions defined in the C++ HepPID and HepPDT versions 3.04.01 - It contains more functionality than that available in the C++ code … and minor fixes too.- Definition of a PDGID class, PDG ID literals,and set of standalone HepPID functions to query PDG IDs(is_meson, has_bottom, j_spin, charge, etc.). - All PDGID class functions are available standalone. PDGID class- Wrapper class `PDGID` for PDG IDs.- Behaves like an int, with extra goodies.- Large spectrum of properties and methods, with a Pythonic interface, and yet more!
###Code
from particle import PDGID
pid = PDGID(211)
pid
PDGID(99999999)
from particle.pdgid import is_meson
pid.is_meson, is_meson(pid)
###Output
_____no_output_____
###Markdown
To print all `PDGID` properties:
###Code
print(pid.info())
###Output
A None
J 0.0
L 0
S 0
Z None
abspid 211
charge 1.0
has_bottom False
has_charm False
has_down True
has_fundamental_anti False
has_strange False
has_top False
has_up True
is_Qball False
is_Rhadron False
is_SUSY False
is_baryon False
is_composite_quark_or_lepton False
is_diquark False
is_dyon False
is_gauge_boson_or_higgs False
is_generator_specific False
is_hadron True
is_lepton False
is_meson True
is_nucleus False
is_pentaquark False
is_quark False
is_sm_gauge_boson_or_higgs False
is_special_particle False
is_technicolor False
is_valid True
j_spin 1
l_spin 1
s_spin 1
three_charge 3
###Markdown
MC ID classes and converters- Classes for MC IDs used in Pythia and Geant3: `PythiaID` and `Geant3ID`.- ID converters in module `particle.converters`: `Geant2PDGIDBiMap`, etc.
###Code
from particle import PythiaID, Geant3ID
pyid = PythiaID(10221)
pyid.to_pdgid()
###Output
_____no_output_____
###Markdown
Conversions are directly available via mapping classes.E.g., bi-directional map Pythia ID - PDG ID:
###Code
from particle.converters import Pythia2PDGIDBiMap
Pythia2PDGIDBiMap[PDGID(9010221)]
Pythia2PDGIDBiMap[PythiaID(10221)]
###Output
_____no_output_____
###Markdown
**2. `Particle` class**There are various ways to create a particle. The often used method is via its PDG ID.
###Code
from particle import Particle
Particle.from_pdgid(211)
###Output
_____no_output_____
###Markdown
**Searching**Simple and natural API to deal with the PDG particle data table,with powerful 1-line search and look-up utilities!- `Particle.find(…)` – search a single match (exception raised if multiple particles match the search specifications).- `Particle.findall(…)` – search a list of candidates.- Search methods that can query any particle property!
###Code
Particle.find('J/psi')
###Output
_____no_output_____
###Markdown
You can specify search terms as keywords - _any particle property_:
###Code
Particle.find(latex_name=r'\phi(1020)')
###Output
_____no_output_____
###Markdown
You can directly check the numeric charge:
###Code
Particle.findall('pi', charge=-1)
###Output
_____no_output_____
###Markdown
Or use a **lambda function** for the ultimate in generality! For example, to find all the neutral particles with a bottom quark between 5.2 and 5.3 GeV:
###Code
from hepunits import GeV, s # Units are good. Use them.
Particle.findall(lambda p:
p.pdgid.has_bottom
and p.charge==0
and 5.2*GeV < p.mass < 5.3*GeV
)
###Output
_____no_output_____
###Markdown
Another lambda function example: You can use the width or the lifetime:
###Code
Particle.findall(lambda p: p.lifetime > 1000*s)
###Output
_____no_output_____
###Markdown
If you want infinite lifetime, you could just use the keyword search instead:
###Code
Particle.findall(lifetime=float('inf'))
###Output
_____no_output_____
###Markdown
Trivially find all pseudoscalar charm mesons:
###Code
from particle import SpinType
Particle.findall(lambda p: p.pdgid.is_meson and p.pdgid.has_charm and p.spin_type==SpinType.PseudoScalar)
###Output
_____no_output_____
###Markdown
**Display**Nice display in Jupyter notebooks, as well as `str` and `repr` support:
###Code
p = Particle.from_pdgid(-415)
p
print(p)
print(repr(p))
###Output
<Particle: name="D(2)*(2460)-", pdgid=-415, mass=2465.4 ± 1.3 MeV>
###Markdown
Full descriptions:
###Code
print(p.describe())
###Output
Name: D(2)*(2460)- ID: -415 Latex: $D_{2}^{*}(2460)^{-}$
Mass = 2465.4 ± 1.3 MeV
Width = 46.7 ± 1.2 MeV
Q (charge) = - J (total angular) = 2.0 P (space parity) = +
C (charge parity) = None I (isospin) = 0.5 G (G-parity) = None
SpinType: SpinType.Tensor
Quarks: Cd
Antiparticle name: D(2)*(2460)+ (antiparticle status: ChargeInv)
###Markdown
You may find LaTeX or HTML to be more useful in your program; both are supported:
###Code
print(p.latex_name, '\n', p.html_name)
###Output
D_{2}^{*}(2460)^{-}
D<SUB>2</SUB><SUP>*</SUP>(2460)<SUP>-</SUP>
###Markdown
**Particle properties**You can do things to particles, like **invert** them:
###Code
~p
###Output
_____no_output_____
###Markdown
There are a plethora of properties you can access:
###Code
p.spin_type
###Output
_____no_output_____
###Markdown
You can quickly access the PDGID of a particle:
###Code
p.pdgid
###Output
_____no_output_____
###Markdown
**3. Literals**They provide a handy way to manipulate things with human-readable names!`Particle` defines literals for most common particles, with easily recognisable names.- Literals are dynamically generated on import for both `PDGID` and `Particle` classes. **PDGID literals**
###Code
from particle.pdgid import literals as lid
lid.phi_1020
###Output
_____no_output_____
###Markdown
**Particle literals**
###Code
from particle import literals as lp
lp.phi_1020
###Output
_____no_output_____
###Markdown
**4. Data files, stored in `particle/data/`**- PDG particle data files - Original PDG data files, which are in a fixed-width format - simply for bookkeeping and reference. - Code rather uses “digested forms” of these, produced within `Particle`, stored as CSV, for optimised querying. - Latest PDG data (2020) used by default. - Advanced usage: user can load older PDG tables, load a “user table” with new particles, append to default table.- Other data files - CSV file for mapping of PDG IDs to particle LaTeX names. **Dump table contents**The package provides the 2 methods `Particle.to_dict(...)` and `Particle.to_list(...)`, which make it easy to dump (selected) particle properties in an easy way. No need to dig into the package installation directory to inspect the particle data table ;-).Tabular output can be formatted with the powerful package `tabulate`, for example (other similar libraries exist).
###Code
help(Particle.to_dict)
from tabulate import tabulate
fields = ['pdgid', 'pdg_name', 'mass', 'mass_upper', 'mass_lower', 'three_charge']
query_as_dict = Particle.to_dict(exclusive_fields=fields, n_rows=10)
print(tabulate(query_as_dict, headers='keys'))
###Output
pdgid pdg_name mass mass_upper mass_lower three_charge
------- ---------- ------- ------------ ------------ --------------
1 d 4.67 0.5 0.2 -1
-1 d 4.67 0.5 0.2 1
2 u 2.16 0.5 0.3 2
-2 u 2.16 0.5 0.3 -2
3 s 93 11 5 -1
-3 s 93 11 5 1
4 c 1270 20 20 2
-4 c 1270 20 20 -2
5 b 4180 30 20 -1
-5 b 4180 30 20 1
###Markdown
Be fancy - table with all pseudoscalar charm hadrons, in _reStructuredText_ format:
###Code
fields = ['pdgid', 'name', 'evtgen_name', 'mass', 'mass_upper', 'mass_lower', 'three_charge']
query_as_dict = Particle.to_dict(filter_fn=lambda p: p.pdgid.is_meson and p.pdgid.has_charm and p.spin_type==SpinType.PseudoScalar,
exclusive_fields=fields)
print(tabulate(query_as_dict, headers='keys', tablefmt='rst'))
###Output
======= ========== ============= ======= ============ ============ ==============
pdgid name evtgen_name mass mass_upper mass_lower three_charge
======= ========== ============= ======= ============ ============ ==============
411 D+ D+ 1869.65 0.05 0.05 3
-411 D- D- 1869.65 0.05 0.05 -3
421 D0 D0 1864.83 0.05 0.05 0
-421 D~0 anti-D0 1864.83 0.05 0.05 0
431 D(s)+ D_s+ 1968.34 0.07 0.07 3
-431 D(s)- D_s- 1968.34 0.07 0.07 -3
441 eta(c)(1S) eta_c 2983.9 0.5 0.5 0
541 B(c)+ B_c+ 6274.9 0.8 0.8 3
-541 B(c)- B_c- 6274.9 0.8 0.8 -3
100441 eta(c)(2S) eta_c(2S) 3637.5 1.1 1.1 0
======= ========== ============= ======= ============ ============ ==============
###Markdown
Notebook-friendly HTML is just as easy:
###Code
from IPython.display import HTML
query_as_dict = Particle.to_dict(filter_fn=lambda p: p.pdgid.is_meson and p.pdgid.has_charm and p.spin_type==SpinType.PseudoScalar,
exclusive_fields=['pdgid', 'pdg_name', 'html_name'])
HTML(tabulate(query_as_dict, headers='keys', tablefmt='html'))
###Output
_____no_output_____
###Markdown
**5. Advanced usage**You can:* Extend or replace the default particle data table in `Particle`.* Adjust properties for a particle.* Make custom particles. Decay files, universal description of decay chains`DecayLanguage` is designed for the manipulation of decay structures in Python. The current package has:- Decay file parsers: - Read *.dec DecFiles*, such as EvtGen decay files typically used in Flavour Physics experiments. - Manipulate and visualise them in Python.- Amplitude Analysis decay language: - Input based on AmpGen generator, output format for GooFit C++ program. Package motivation- Ability to describe decay-tree-like structures.- Provide a translation of decay amplitude models from AmpGen to GooFit. - Idea is to generalise this to other decay descriptions.- Any experiment uses event generators which, among many things, need to describe particle decay chains.- Programs such as EvtGen rely on so-called .dec decay files.- Many experiments need decay data files.- Why not make a Python package to deal with decay files, for everyone? Package, in short- Tools to parse decay files and programmatically manipulate them, query, display information. - Descriptions and parsing built atop the [Lark parser](https://github.com/lark-parser/lark/).- Tools to translate decay amplitude models from AmpGen to GooFit, and manipulate them. **1. Decay files** *Master file” DECAY.DECGigantic file defining decay modes for all relevant particles, including decay model specifications.LHCb uses one. Belle II as well, and others. User .dec files- Needed to produce specific MC samples.- Typically contain a single decay chain (except if defining inclusive samples). **Example user decay file:** Decay file for [B_c+ -> (B_s0 -> K+ K-) pi+]ccAlias B_c+sig B_c+Alias B_c-sig B_c-ChargeConj B_c+sig B_c-sigAlias MyB_s0 B_s0Alias Myanti-B_s0 anti-B_s0ChargeConj MyB_s0 Myanti-B_s0Decay B_c+sig 1.000 MyB_s0 pi+ PHOTOS PHSP;EnddecayCDecay B_c-sigDecay MyB_s0 1.000 K+ K- SSD_CP 20.e12 0.1 1.0 0.04 9.6 -0.8 8.4 -0.6;EnddecayCDecay Myanti-B_s0 **2. Decay file parsing**- **Parsing should be simple** - Expert users can configure parser choice and settings, etc. - **Parsing should be (reasonably) fast!**After parsing, many queries are possible!
###Code
from decaylanguage import DecFileParser
###Output
_____no_output_____
###Markdown
The LHCb "master decay file"It's a big file! ~ 500 particle decays defined, thousands of decay modes, over 11k lines in total.
###Code
dfp = DecFileParser('data/DECAY_LHCB.DEC')
%%time
dfp.parse()
dfp
###Output
_____no_output_____
###Markdown
Let's parse and play with a small decay file:
###Code
with open('data/Dst.dec') as f:
print(f.read())
dfp_Dst = DecFileParser('data/Dst.dec')
dfp_Dst
dfp_Dst.parse()
dfp_Dst
###Output
_____no_output_____
###Markdown
It can be handy to **parse from a multi-line string** rather than a file:
###Code
s = """
# Decay file for [B_c+ -> (B_s0 -> K+ K-) pi+]cc
Alias B_c+sig B_c+
Alias B_c-sig B_c-
ChargeConj B_c+sig B_c-sig
Alias MyB_s0 B_s0
Alias Myanti-B_s0 anti-B_s0
ChargeConj MyB_s0 Myanti-B_s0
Decay B_c+sig
1.000 MyB_s0 pi+ PHOTOS PHSP;
Enddecay
CDecay B_c-sig
Decay MyB_s0
1.000 K+ K- SSD_CP 20.e12 0.1 1.0 0.04 9.6 -0.8 8.4 -0.6;
Enddecay
CDecay Myanti-B_s0
"""
dfp = DecFileParser.from_string(s)
dfp.parse()
dfp
###Output
_____no_output_____
###Markdown
Decay file information
###Code
dfp_Dst.print_decay_modes('D*+')
dfp_Dst.list_decay_mother_names()
dfp_Dst.list_decay_modes('D*+')
###Output
_____no_output_____
###Markdown
Info such as particle aliases
###Code
dfp.dict_aliases()
dfp.dict_charge_conjugates()
###Output
_____no_output_____
###Markdown
**3. Display of decay chains**The parser can provide a simple `dict` representation of any decay chain found in the input decay file(s). Being generic and simple, that is what is used as input information for the viewer class (see below).
###Code
dc = dfp_Dst.build_decay_chains('D+')
dc
from decaylanguage import DecayChainViewer
DecayChainViewer(dc)
DecayChainViewer(dfp_Dst.build_decay_chains('D*+'))
dc = dfp_Dst.build_decay_chains('D*+', stable_particles=['D+', 'D0', 'pi0'])
DecayChainViewer(dc)
###Output
_____no_output_____
###Markdown
**Charge conjugation**
###Code
dc_cc = dfp_Dst.build_decay_chains('D*-', stable_particles=['D-', 'anti-D0', 'pi0'])
DecayChainViewer(dc_cc)
###Output
_____no_output_____
###Markdown
**Parsing several files**Typically useful when the user decay file needs information from the master decay file.
###Code
s = u"""
Alias MyXic+ Xi_c+
Alias MyantiXic- anti-Xi_c-
ChargeConj MyXic+ MyantiXic-
Decay Xi_cc+sig
1.000 MyXic+ pi- pi+ PHSP;
Enddecay
CDecay anti-Xi_cc-sig
Decay MyXic+
1.000 p+ K- pi+ PHSP;
Enddecay
CDecay MyantiXic-
End
"""
dfp = DecFileParser.from_string(s)
dfp.parse()
dfp
###Output
C:\home\sw\Anaconda3\lib\site-packages\decaylanguage\dec\dec.py:447: UserWarning:
Corresponding 'Decay' statement for 'CDecay' statement(s) of following particle(s) not found:
anti-Xi_cc-sig.
Skipping creation of these charge-conjugate decay trees.
warnings.warn(msg)
###Markdown
Note the subtletly: 3, not 4 decays, are found! This is because the file contains no statement`ChargeConj anti-Xi_cc-sigXi_cc+sig`, hence the parser cannot know to which particle (matching `Decay` statement) the charge-conjugate decay of `anti-Xi_cc-sig` relates to (code does not rely on position of statements to guess ;-)).
###Code
d = dfp.build_decay_chains('Xi_cc+sig')
DecayChainViewer(d)
###Output
_____no_output_____
###Markdown
As said in the warning, the information provided is not enough for the anti-Xi_cc-sig to make sense:
###Code
from decaylanguage.dec.dec import DecayNotFound
try:
d = dfp.build_decay_chains('anti-Xi_cc-sig')
except DecayNotFound:
print("Decays of particle 'anti-Xi_cc-sig' not found in .dec file!")
###Output
Decays of particle 'anti-Xi_cc-sig' not found in .dec file!
###Markdown
But the missing information is easily providing **parsing two files simultaneously ...!** (Any number of files is allowed.)
###Code
from tempfile import NamedTemporaryFile
with NamedTemporaryFile(delete=False) as tf:
tf.write(s.encode('utf-8'))
dfp = DecFileParser(tf.name, 'data/DECAY_LHCB.DEC')
dfp.parse()
dc = dfp.build_decay_chains('Xi_cc+sig')
DecayChainViewer(dc)
dc_cc = dfp.build_decay_chains('anti-Xi_cc-sig')
DecayChainViewer(dc_cc)
###Output
_____no_output_____
###Markdown
Want to save a graph? Try for example ```pythondcv = DecayChainViewer(...)dcv.graph.write_pdf('test.pdf')``` **4. Representation of decay chains**The universal (and digital) representation of decay chains is of interest well outside the context of decay file parsing! Building blocks- A daughters list - list of final-state particles.- A decay mode - typically a branching fraction and a list of final-state particles (may also contain _any_ metadata such as decay model and optional decay-model parameters, as defined for example in .dec decay files).- A decay chain - can be seen as a mother particle and a list of decay modes.
###Code
from decaylanguage.decay.decay import DaughtersDict, DecayMode, DecayChain
###Output
_____no_output_____
###Markdown
**Daughters list** (actually a ``Counter`` dictionary, internally):
###Code
# Constructor from a dictionary
dd = DaughtersDict({'K+': 1, 'K-': 2, 'pi+': 1, 'pi0': 1})
# Constructor from a list of particle names
dd = DaughtersDict(['K+', 'K-', 'K-', 'pi+', 'pi0'])
# Constructor from a string representing the final state
dd = DaughtersDict('K+ K- pi0')
dd
###Output
_____no_output_____
###Markdown
Decay Modes
###Code
# A 'default' and hence empty, decay mode
dm = DecayMode()
# Decay mode with minimal input information
dd = DaughtersDict('K+ K-')
dm = DecayMode(0.5, dd)
# Decay mode with decay model information and user metadata
dm = DecayMode(0.2551, # branching fraction
'pi- pi0 nu_tau', # final-state particles
model='TAUHADNU', # decay model
model_params=[-0.108, 0.775, 0.149, 1.364, 0.400], # decay-model parameters
study='toy', year=2019 # user metadata
)
dm
print(dm.describe())
###Output
Daughters: pi- pi0 nu_tau , BF: 0.2551
Decay model: TAUHADNU [-0.108, 0.775, 0.149, 1.364, 0.4]
Extra info:
study: toy
year: 2019
###Markdown
Various manipulations are available:
###Code
dm = DecayMode.from_pdgids(0.5, [321, -321])
print(dm)
dm = DecayMode(1.0, 'K+ K+ pi-')
dm.charge_conjugate()
###Output
<DecayMode: daughters=K+ K-, BF=0.5>
###Markdown
Decay chains
###Code
dm1 = DecayMode(0.0124, 'K_S0 pi0', model='PHSP')
dm2 = DecayMode(0.692, 'pi+ pi-')
dm3 = DecayMode(0.98823, 'gamma gamma')
dc = DecayChain('D0', {'D0':dm1, 'K_S0':dm2, 'pi0':dm3})
dc
dc.decays
###Output
_____no_output_____
###Markdown
Flatten the decay chain, i.e. replace all intermediate, decaying particles, with their final states:- The BF is now the *visible BF*
###Code
dc.flatten()
###Output
_____no_output_____
###Markdown
Of course you can sill just as easily visualise decays defined via this `DecayChain` class:
###Code
DecayChainViewer(dc.to_dict())
###Output
_____no_output_____ |
Quantitative Finance Lectures/lecture02/fama_french_portfolios/double_sort.ipynb | ###Markdown
Double sorted Fama-french portfolios Author: Prof. Gustavo Soares Imports
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
tr_df = pd.read_csv('tr_df.csv', index_col=[0,1]).iloc[:,0].unstack().T.astype(float)
tr_df.index = pd.to_datetime(tr_df.index)
ibov_composition = pd.read_csv('ibov_composition.csv', index_col=[0,1])['Weights'].astype(float)
ibov_composition.index = pd.MultiIndex.from_tuples([(x, pd.to_datetime(d)) for x,d in ibov_composition.index])
ibov_composition = ibov_composition.unstack().T.fillna(0)
df = pd.read_csv('IBOV_time_series.csv', index_col=0).astype(float)
df.index = pd.to_datetime(df.index)
tr_df.tail()
ibov_composition.tail()
###Output
_____no_output_____
###Markdown
Momentum and low volatility signalsLet's now calculate a momentum and low volatility signals for every stock at every month end.
###Code
k = 12 # months
mom_signals = tr_df.shift(21).pct_change(k * 12 - 21).dropna()
vol_signals = (np.log(tr_df).diff(1).rolling(252).std() * np.sqrt(252)).shift(1).dropna()
###Output
_____no_output_____
###Markdown
Momentum portfoliosLet's now pick the top (20%) and bottom (20%) stocks according to our momentum signal:
###Code
month_ends = [tr_df.index[i-1] for i in range(1,tr_df.shape[0]) if tr_df.index[i-1].month != tr_df.index[i].month]
month_ends = [eom for eom in month_ends if eom>mom_signals.index.min() and eom>vol_signals.index.min()]
mom_portfolios = {}
for eom in month_ends:
stocks_on_date = mom_signals.loc[eom][mom_signals.loc[eom].abs()>0.0003].dropna().rank()
n = stocks_on_date.shape[0]
date_port = {
'top' : list(stocks_on_date[stocks_on_date>n-n/5].index),
'bottom' : list(stocks_on_date[stocks_on_date<n/5].index)
}
mom_portfolios[eom] = date_port
###Output
_____no_output_____
###Markdown
Low vol portfoliosLet's now pick the lowest (20%) and highest (20%) volatility stocks:
###Code
vol_portfolios = {}
for eom in month_ends:
stocks_on_date = vol_signals.loc[eom][vol_signals.loc[eom].abs()>0.0003].dropna().rank(ascending=False)
n = stocks_on_date.shape[0]
date_port = {
'top' : list(stocks_on_date[stocks_on_date>n-n/5].index),
'bottom' : list(stocks_on_date[stocks_on_date<n/5].index)
}
vol_portfolios[eom] = date_port
###Output
_____no_output_____
###Markdown
Momentum and Low vol intersection portfolios
###Code
def intersection(lst1, lst2):
return list(set(lst1) & set(lst2))
combo_port = {}
for eom in list(set(mom_portfolios.keys()) & set(vol_portfolios.keys())):
date_port = {}
top_stocks = intersection(mom_portfolios[eom]['top'], vol_portfolios[eom]['top'])
bot_stocks = intersection(mom_portfolios[eom]['bottom'], vol_portfolios[eom]['bottom'])
if len(top_stocks) == 0:
print(f"No stocks for top on {eom}")
if len(bot_stocks) == 0:
print(f"No stocks for bottom on {eom}")
date_port['top'] = top_stocks
date_port['bottom'] = bot_stocks
combo_port[eom] = date_port
def weights_on_date(d, port_name):
if d in combo_port.keys() and len(combo_port[d][port_name])>-1:
w = ibov_composition.loc[d, combo_port[d][port_name]]
w = w/w.sum()
else:
w = None
return w
def quant_on_date(d1, d0, port_name, notional):
w = weights_on_date(d0, port_name)
if w is not None:
prices = tr_df.loc[d0, w.index]
q = notional * w / prices
else:
q = None
return q
calendar = [d for d in tr_df.index.unique() if d>=month_ends[0]]
backtests = pd.DataFrame(index=calendar, columns=['top', 'bottom'])
backtests.iloc[0] = 100
quant = {
'q_top' : quant_on_date(month_ends[0], month_ends[0], 'top', backtests.iloc[0,0]),
'q_bottom' : quant_on_date(month_ends[0], month_ends[0], 'bottom', backtests.iloc[0,1]),
}
for tdy, yst in zip(calendar[1:], calendar[:-1]):
# calculate pnl of the top stocks
p1top = tr_df.loc[tdy, quant['q_top'].index]
p0top = tr_df.loc[yst, quant['q_top'].index]
toppnl = (quant['q_top'] * (p1top - p0top)).sum()
backtests.loc[tdy, 'top'] = backtests.loc[yst, 'top'] + toppnl
# calculate pnl of the bottom stocks
p1bot = tr_df.loc[tdy, quant['q_bottom'].index]
p0bot = tr_df.loc[yst, quant['q_bottom'].index]
botpnl = (quant['q_bottom'] * (p1bot - p0bot)).sum()
backtests.loc[tdy, 'bottom'] = backtests.loc[yst, 'bottom'] + botpnl
if yst in combo_port.keys(): # rebalance the portfolio
qt = quant_on_date(tdy, yst, 'top', backtests.loc[tdy, 'top'])
qb = quant_on_date(tdy, yst, 'bottom', backtests.loc[tdy, 'bottom'])
if qt is not None:
quant['q_top'] = qt.fillna(0)
if qb is not None:
quant['q_bottom'] = qb.fillna(0)
backtests.head()
backtests.plot(figsize=(15,10), fontsize=16)
plt.title('Top and bottom portfolios', fontsize=20)
plt.legend(fontsize=20)
plt.show()
###Output
_____no_output_____
###Markdown
Low vol with Momentum portfoliosLet's now choose positive momentum stocks among those with low vol:
###Code
vol_then_mom_portfolios = {}
for eom in month_ends:
stocks_on_date = vol_signals.loc[eom][vol_signals.loc[eom].abs()>0.0003].dropna().rank(ascending=False)
n = stocks_on_date.shape[0]
low_vol_stocks = list(stocks_on_date[stocks_on_date>n-n/5].index)
stocks_on_date = mom_signals.loc[eom, low_vol_stocks]
stocks_on_date = stocks_on_date[stocks_on_date.abs()>0.0003].dropna().rank()
n = stocks_on_date.shape[0]
low_vol_and_mom_stocks = list(stocks_on_date[stocks_on_date>n-n/5].index)
vol_then_mom_portfolios[eom] = low_vol_and_mom_stocks
calendar = [d for d in tr_df.index.unique() if d>=month_ends[0]]
double_sort_backtest = pd.Series(index=calendar)
double_sort_backtest.iloc[0] = 100
d = month_ends[0]
w = ibov_composition.loc[d, vol_then_mom_portfolios[d]]
w = w/w.sum()
ref_date = max([x for x in tr_df.index.unique() if x < d])
q = double_sort_backtest.iloc[0] * w / tr_df.loc[d, w.index]
for tdy, yst in zip(calendar[1:], calendar[:-1]):
# calculate pnl of the top stocks
p1 = tr_df.loc[tdy, q.index]
p0 = tr_df.loc[yst, q.index]
toppnl = (q * (p1 - p0)).sum()
double_sort_backtest.loc[tdy] = double_sort_backtest.loc[yst] + toppnl
if yst in vol_then_mom_portfolios.keys(): # rebalance the portfolio
w = ibov_composition.loc[d, vol_then_mom_portfolios[yst]]
w = w/w.sum()
q = double_sort_backtest.loc[tdy] * w / tr_df.loc[yst, w.index]
double_sort_backtest.head()
df2 = pd.concat([double_sort_backtest.to_frame('double_sort'), df], axis=1, sort=True).dropna().drop('CDI', 1).astype(float)
df2 = np.exp(np.log(df2).diff(1).fillna(0).cumsum())
df2.plot(figsize=(15,10), fontsize=16)
plt.title('Low vol stocks with positive momentum vs. Ibovespa', fontsize=20)
plt.legend(fontsize=20)
plt.show()
###Output
_____no_output_____ |
neural_networks/mnist_digits_denoise.ipynb | ###Markdown
Define wrapper functions
###Code
def weight_variable(shape):
""" Create TensorFlow weight with initial noise. """
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
""" Create TensorFlow bias with initial value of 0.1. """
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
""" 2D TensorFlow convolution with stride of 1 and zero padding. """
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
""" TensorFlow max pooling over 2x2 blocks. """
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME')
def imshow(img, scale=None):
# Assign default scale
if not scale:
scale = [np.min(img), np.max(img)]
# Set up image plot
plt.imshow(img, cmap='gray', vmin=scale[0], vmax=scale[1])
plt.xticks([]), plt.yticks([])
# Show
plt.show()
###Output
_____no_output_____
###Markdown
Construct network
###Code
# Define variables
x = tf.placeholder(tf.float32, shape=[None, 784])
y_ = tf.placeholder(tf.float32, shape=[None, 784])
## Layer 1 (Convolutional)
# Create weights
W_conv1 = weight_variable([5, 5, 1, 1])
# Reshape image
# -1 allows tf.reshape to infer that dimension
x_image = tf.reshape(x, [-1, 28, 28, 1])
# Convolve image
h_conv1 = conv2d(x_image, W_conv1)
## Readout layer
# Compute output
y_conv = tf.reshape(h_conv1, [-1, 784])
###Output
_____no_output_____
###Markdown
Train network
###Code
# Build training function
cross_entropy = tf.reduce_mean(tf.square(y_ - y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
# Build accuracy measure
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Create saver
saver = tf.train.Saver()
with tf.Session() as sess:
# Initialize variables
sess.run(tf.global_variables_initializer())
# Iterate through
for i in range(20000):
# Import next batch
# batch = (data, labels)
# data: 50 x 784, labels: 50 x 784
batch = mnist.train.next_batch(50)
noise_batch = batch[0] + 0.5*np.random.rand(*batch[0].shape)
# Report iteration stats
if i % 1000 == 0:
train_accuracy = cross_entropy.eval(feed_dict={x: noise_batch,
y_: batch[0]})
print('step {:5d}, current score {:g}'.format(
i, train_accuracy))
# Train network
train_step.run(feed_dict={x: noise_batch, y_: batch[0]})
# Save result
saver.save(sess, "checkpoints/mnist_digits_denoise.ckpt")
result_kernel = np.squeeze(np.array(W_conv1.eval()))
###Output
step 0, current score 0.0965327
step 1000, current score 0.041716
step 2000, current score 0.0288027
step 3000, current score 0.0254543
step 4000, current score 0.0238367
step 5000, current score 0.0240528
step 6000, current score 0.0244158
step 7000, current score 0.0227147
step 8000, current score 0.0235703
step 9000, current score 0.0226999
step 10000, current score 0.0236743
step 11000, current score 0.0248221
step 12000, current score 0.023135
step 13000, current score 0.0238586
step 14000, current score 0.0237676
step 15000, current score 0.0234607
step 16000, current score 0.0234187
step 17000, current score 0.0235196
step 18000, current score 0.0237018
step 19000, current score 0.0232255
###Markdown
Visualize Result
###Code
imshow(result_kernel)
###Output
_____no_output_____ |
experiments/NEL/NEL_elasticsearch/train classifier - ranking.ipynb | ###Markdown
1. Analysing annotations
###Code
print(f"Total number of true annotations: {sum(df_annotated.link_correct)}")
# print(f"Number of entity mentions with at least one true annotation: {(df_annotated.groupby(['item_uri', 'ent_text']).sum() > 0)['link_correct'].sum()}")
###Output
Total number of true annotations: 51
###Markdown
entity typeis entity type a good predictor of a match? -> **yes**
###Code
sns.countplot(y="link_correct", hue="_type_match", data=df_annotated);
###Output
_____no_output_____
###Markdown
1.1. Planning a Baseline predictor1. Use **text similarity only** as a predictor of whether an entity mention maps to a SMG record.
###Code
g = sns.boxplot(data=df_annotated, x="_ent_candidate_sorted_similarity", y="link_correct", orient='h', showfliers=False)
g.set_title("Entity mentions and candidate titles are generally more similar for true matches");
###Output
_____no_output_____
###Markdown
2. Use **text similarity and matched type** as a predictor of whether an entity mention maps to an SMG record.
###Code
g = sns.boxplot(data=df_annotated, x="_ent_candidate_sorted_similarity", y="link_correct", hue="_type_match", orient='h', showfliers=False)
g.set_title("The difference is more apparent for correct links when matched types are taken into consideration");
g = sns.FacetGrid(df_annotated, col="ent_label")
g.map_dataframe(sns.boxplot, data=df_annotated, x="_ent_candidate_sorted_similarity", y="link_correct", hue="_type_match", orient='h', showfliers=False)
###Output
_____no_output_____
###Markdown
2. Building a Baseline Predictor- assume an entity mention and record are linked if `fuzz.token_sort_ratio(ent_mention, record_title)` is greater than a threshold, and the predicted entity mention type is the same as the record type- set this threshold initially to 0.8 based on the above plot and then tune it for accuracy
###Code
threshold = 0.8
sim_metric_col = "_ent_candidate_sorted_similarity"
df_annotated['baseline_prediction'] = (df_annotated[sim_metric_col] >= threshold) & (df_annotated["_type_match"])
def calc_metrics(data, gt_col, pred_col):
tp = len(data[data[gt_col] & data[pred_col]])
fp = len(data[~data[gt_col] & data[pred_col]])
tn = len(data[~data[gt_col] & ~data[pred_col]])
fn = len(data[data[gt_col] & ~data[pred_col]])
acc = (tp + tn) / (tp + fp + tn + fn)
pr = tp / (tp + fp)
re = tp / (tp + fn)
f1 = (2 * pr * re) / (pr + re)
return {"f1": f1, "precision": pr, "recall": re, "accuracy": acc}
print("OVERALL")
print(calc_metrics(df_annotated, 'link_correct', 'baseline_prediction'))
for t in ["PERSON", "OBJECT", "ORGANISATION"]:
print(t)
print(calc_metrics(df_annotated.loc[df_annotated["ent_label"] == t, :], 'link_correct', 'baseline_prediction'))
for threshold in (0.6, 0.7, 0.75, 0.8, 0.85):
df_annotated['baseline_prediction'] = (df_annotated[sim_metric_col] >= threshold) & (df_annotated["_type_match"])
print(f"--- {threshold} ---")
print(calc_metrics(df_annotated, 'link_correct', 'baseline_prediction'))
for t in ["PERSON", "OBJECT", "ORGANISATION"]:
print(t)
print(calc_metrics(df_annotated.loc[df_annotated["ent_label"] == t, :], 'link_correct', 'baseline_prediction'))
###Output
--- 0.6 ---
{'f1': 0.5584415584415585, 'precision': 0.4174757281553398, 'recall': 0.8431372549019608, 'accuracy': 0.8800705467372134}
PERSON
{'f1': 0.5714285714285714, 'precision': 0.45454545454545453, 'recall': 0.7692307692307693, 'accuracy': 0.8507462686567164}
OBJECT
{'f1': 0.5882352941176471, 'precision': 0.45454545454545453, 'recall': 0.8333333333333334, 'accuracy': 0.7666666666666667}
ORGANISATION
{'f1': 0.537313432835821, 'precision': 0.375, 'recall': 0.9473684210526315, 'accuracy': 0.9077380952380952}
--- 0.7 ---
{'f1': 0.7207207207207207, 'precision': 0.6666666666666666, 'recall': 0.7843137254901961, 'accuracy': 0.9453262786596119}
PERSON
{'f1': 0.6545454545454545, 'precision': 0.6206896551724138, 'recall': 0.6923076923076923, 'accuracy': 0.9054726368159204}
OBJECT
{'f1': 0.8333333333333334, 'precision': 0.8333333333333334, 'recall': 0.8333333333333334, 'accuracy': 0.9333333333333333}
ORGANISATION
{'f1': 0.7727272727272727, 'precision': 0.68, 'recall': 0.8947368421052632, 'accuracy': 0.9702380952380952}
--- 0.75 ---
{'f1': 0.7835051546391751, 'precision': 0.8260869565217391, 'recall': 0.7450980392156863, 'accuracy': 0.9629629629629629}
PERSON
{'f1': 0.7555555555555555, 'precision': 0.8947368421052632, 'recall': 0.6538461538461539, 'accuracy': 0.945273631840796}
OBJECT
{'f1': 0.8333333333333334, 'precision': 0.8333333333333334, 'recall': 0.8333333333333334, 'accuracy': 0.9333333333333333}
ORGANISATION
{'f1': 0.8, 'precision': 0.7619047619047619, 'recall': 0.8421052631578947, 'accuracy': 0.9761904761904762}
--- 0.8 ---
{'f1': 0.7640449438202247, 'precision': 0.8947368421052632, 'recall': 0.6666666666666666, 'accuracy': 0.9629629629629629}
PERSON
{'f1': 0.7555555555555555, 'precision': 0.8947368421052632, 'recall': 0.6538461538461539, 'accuracy': 0.945273631840796}
OBJECT
{'f1': 0.8333333333333334, 'precision': 0.8333333333333334, 'recall': 0.8333333333333334, 'accuracy': 0.9333333333333333}
ORGANISATION
{'f1': 0.7499999999999999, 'precision': 0.9230769230769231, 'recall': 0.631578947368421, 'accuracy': 0.9761904761904762}
--- 0.85 ---
{'f1': 0.738095238095238, 'precision': 0.9393939393939394, 'recall': 0.6078431372549019, 'accuracy': 0.9611992945326279}
PERSON
{'f1': 0.7142857142857143, 'precision': 0.9375, 'recall': 0.5769230769230769, 'accuracy': 0.9402985074626866}
OBJECT
{'f1': 0.9090909090909091, 'precision': 1.0, 'recall': 0.8333333333333334, 'accuracy': 0.9666666666666667}
ORGANISATION
{'f1': 0.7096774193548387, 'precision': 0.9166666666666666, 'recall': 0.5789473684210527, 'accuracy': 0.9732142857142857}
###Markdown
3. Building a machine learning predictorUsing only the mention, title, and types of each.
###Code
from sklearn.preprocessing import OneHotEncoder
from typing import List
class FeatureGenerator:
def __init__(self, data: pd.DataFrame, ent_mention_col: str, ent_type_col: str, ent_context_col: str, candidate_title_col: str, candidate_type_col: str, candidate_context_col: str):
self.data = data
# TODO: do lowercase transformation here to make all methods case-insensitive
self.ent_mention_col = self.data[ent_mention_col]
self.ent_type_col = self.data[ent_type_col]
self.ent_context_col = self.data[ent_context_col]
self.candidate_title_col = self.data[candidate_title_col]
self.candidate_type_col = self.data[candidate_type_col]
self.candidate_context_col = self.data[candidate_context_col]
self.n_records = self.data.shape[0]
self.suffix_list = ORG_LEGAL_SUFFIXES
self.ent_type_encoder = OneHotEncoder().fit(self.ent_type_col.unique().reshape(-1, 1))
self.candidate_type_encoder = OneHotEncoder().fit(self.candidate_type_col.unique().reshape(-1, 1))
@staticmethod
def _remove_suffixes(text: str, suffix_list: List[str]) -> str:
"""
Returns lowercased version of text with any of the suffixes in suffix_list removed. Case-insensitive.
"""
mod_text = text[:-1].lower() if text[-1] == "." else text.lower()
for suffix in suffix_list:
if mod_text.endswith(suffix.lower()):
mod_text = mod_text.rstrip(suffix.lower()).strip()
break
return mod_text
def _apply_string_sim_method(self, method, col_a: pd.Series, col_b: pd.Series, token_wise: bool, denominator: int = 1) -> np.ndarray:
"""
Params:
- token_wise (bool): if True, split each string by spaces (`method` is passed two sequences rather than two strings)
"""
if token_wise:
return np.array([[method(col_a.iloc[idx].split(), col_b.iloc[idx].split()) / denominator] if all([pd.notnull(col_a.iloc[idx]), pd.notnull(col_b.iloc[idx])]) else [0] for idx in range(self.n_records)])
else:
return np.array([[method(col_a.iloc[idx], col_b.iloc[idx]) / denominator] if all([pd.notnull(col_a.iloc[idx]), pd.notnull(col_b.iloc[idx])]) else [0] for idx in range(self.n_records)])
def _generate_similarity_fuzz_sort(self, col_a: pd.Series, col_b: pd.Series, **kwargs) -> np.ndarray:
return self._apply_string_sim_method(fuzz.token_sort_ratio, col_a, col_b, denominator=100, token_wise=False)
def _generate_similarity_levenshtein(self, col_a: pd.Series, col_b: pd.Series, **kwargs) -> np.ndarray:
return self._apply_string_sim_method(textdistance.levenshtein.normalized_similarity, col_a, col_b, token_wise=False)
def _generate_similarity_jarowinkler(self, col_a: pd.Series, col_b: pd.Series, **kwargs) -> np.ndarray:
return self._apply_string_sim_method(textdistance.jaro_winkler.normalized_similarity, col_a, col_b, token_wise=False)
def _generate_similarity_jaccard(self, col_a: pd.Series, col_b: pd.Series, **kwargs) -> np.ndarray:
return self._apply_string_sim_method(textdistance.jaccard.normalized_similarity, col_a, col_b, token_wise=True)
def _generate_similarity_sorensen_dice(self, col_a: pd.Series, col_b: pd.Series, **kwargs) -> np.ndarray:
return self._apply_string_sim_method(textdistance.sorensen_dice.normalized_similarity, col_a, col_b, token_wise=True)
def _generate_ml_similarity_fuzz_sort_ignore_suffixes(self, **kwargs) -> np.ndarray:
if "string_sim_metric" in kwargs:
return np.array([[kwargs["string_sim_metric"](self.ent_mention_col.iloc[idx], self.candidate_title_col.iloc[idx]) / 100] for idx in range(self.n_records) ])
else:
return np.array([[fuzz.token_sort_ratio(
self._remove_suffixes(self.ent_mention_col.iloc[idx], self.suffix_list),
self._remove_suffixes(self.candidate_title_col.iloc[idx], self.suffix_list)
) / 100] for idx in range(self.n_records) ])
def _generate_label_in_mention(self, **kwargs) -> np.ndarray:
return np.array(
[[float(self.candidate_title_col.iloc[idx].lower() in self.ent_mention_col.iloc[idx].lower())] for idx in range(self.n_records)]
)
def _generate_mention_in_label(self, **kwargs) -> np.ndarray:
return np.array(
[[float(self.ent_mention_col.iloc[idx].lower() in self.candidate_title_col.iloc[idx].lower())] for idx in range(self.n_records)]
)
def _generate_type_features(self, **kwargs) -> np.ndarray:
return np.concatenate(
(
self.ent_type_encoder.transform(self.ent_type_col.values.reshape(-1,1)).toarray(),
self.candidate_type_encoder.transform(self.candidate_type_col.values.reshape(-1,1)).toarray()
),
axis=1)
def get_feature_matrix(self) -> np.ndarray:
feature_rows = np.concatenate(
(
self._generate_similarity_fuzz_sort(self.ent_mention_col, self.candidate_title_col),
self._generate_similarity_levenshtein(self.ent_mention_col, self.candidate_title_col),
self._generate_similarity_jarowinkler(self.ent_mention_col, self.candidate_title_col),
self._generate_ml_similarity_fuzz_sort_ignore_suffixes(),
self._generate_similarity_jarowinkler(self.ent_context_col, self.candidate_context_col),
self._generate_similarity_jaccard(self.ent_context_col, self.candidate_context_col),
self._generate_similarity_sorensen_dice(self.ent_context_col, self.candidate_context_col),
self._generate_label_in_mention(),
self._generate_mention_in_label(),
self._generate_type_features(),
),
axis=1
)
return feature_rows
f = FeatureGenerator(df_annotated,
ent_mention_col='ent_text', ent_type_col='ent_label', ent_context_col='item_description',
candidate_title_col='candidate_title', candidate_type_col='candidate_type', candidate_context_col='candidate_description')
X = f.get_feature_matrix()
y = 1*(df_annotated['link_correct'].values)
i = 0
n = 10
for idx, row in df_annotated.head(n).iterrows():
print(row['ent_text'], "---" ,row['candidate_title'], X[i, [5]])
i += 1
from sklearn.model_selection import cross_validate
from sklearn.svm import SVC
clf = SVC(kernel='linear', C=1, random_state=42)
scores = cross_validate(clf, X, list(y), cv=10, scoring=['precision_macro', 'recall_macro'])
(scores['test_precision_macro'].mean(), scores['test_precision_macro'].std()), (scores['test_recall_macro'].mean(), scores['test_recall_macro'].std())
from sklearn.linear_model import LogisticRegressionCV
log_r = LogisticRegressionCV(cv=5, random_state=0, max_iter=500).fit(X, list(y))
scores = cross_validate(log_r, X, list(y), cv=10, scoring=['precision_macro', 'recall_macro'])
(scores['test_precision_macro'].mean(), scores['test_precision_macro'].std()), (scores['test_recall_macro'].mean(), scores['test_recall_macro'].std())
from sklearn.neural_network import MLPClassifier
mlp = MLPClassifier(random_state=0, max_iter=1000)
scores = cross_validate(mlp, X, list(y), cv=10, scoring=['precision_macro', 'recall_macro'])
(scores['test_precision_macro'].mean(), scores['test_precision_macro'].std()), (scores['test_recall_macro'].mean(), scores['test_recall_macro'].std())
###Output
_____no_output_____
###Markdown
3.1 Test on unannotated data
###Code
df_unannotated = df[df['link_correct'].isnull()]
len(df_unannotated)
f_un = FeatureGenerator(df_unannotated,
ent_mention_col='ent_text', ent_type_col='ent_label', ent_context_col='item_description',
candidate_title_col='candidate_title', candidate_type_col='candidate_type', candidate_context_col='candidate_description')
X_un = f_un.get_feature_matrix()
X_un.shape
classifier = mlp.fit(X, list(y))
df_unannotated['log_r_prediction'] = classifier.predict(X_un)
df_unannotated['log_r_prediction_proba'] = classifier.predict_proba(X_un)[:,1]
df_unannotated[df_unannotated['log_r_prediction'] == 1].head(50)
# df_unannotated.head(20)
df_unannotated[df_unannotated['log_r_prediction'] == 1].to_csv("pos_preds.csv")
###Output
_____no_output_____
###Markdown
4. Building a pairwise ranking classifier 4.1 RankSVM
###Code
from sklearn.preprocessing import OneHotEncoder
from typing import List
class FeatureGenerator:
def __init__(self, data: pd.DataFrame, source_uri_col: str, ent_mention_col: str, ent_type_col: str, ent_context_col: str, candidate_title_col: str, candidate_type_col: str, candidate_context_col: str):
self.data = data
# TODO: do lowercase transformation here to make all methods case-insensitive
self.data = data
self.source_uri_col = self.data[source_uri_col]
self.ent_mention_col = self.data[ent_mention_col]
self.ent_type_col = self.data[ent_type_col]
self.ent_context_col = self.data[ent_context_col]
self.candidate_title_col = self.data[candidate_title_col]
self.candidate_type_col = self.data[candidate_type_col]
self.candidate_context_col = self.data[candidate_context_col]
self.n_records = self.data.shape[0]
self.suffix_list = ORG_LEGAL_SUFFIXES
self.ent_type_encoder = OneHotEncoder().fit(self.ent_type_col.unique().reshape(-1, 1))
self.candidate_type_encoder = OneHotEncoder().fit(self.candidate_type_col.unique().reshape(-1, 1))
@staticmethod
def _remove_suffixes(text: str, suffix_list: List[str]) -> str:
"""
Returns lowercased version of text with any of the suffixes in suffix_list removed. Case-insensitive.
"""
mod_text = text[:-1].lower() if text[-1] == "." else text.lower()
for suffix in suffix_list:
if mod_text.endswith(suffix.lower()):
mod_text = mod_text.rstrip(suffix.lower()).strip()
break
return mod_text
def _apply_string_sim_method(self, method, col_a: pd.Series, col_b: pd.Series, token_wise: bool, denominator: int = 1) -> np.ndarray:
"""
Params:
- token_wise (bool): if True, split each string by spaces (`method` is passed two sequences rather than two strings)
"""
if token_wise:
return np.array([[method(col_a.iloc[idx].split(), col_b.iloc[idx].split()) / denominator] if all([pd.notnull(col_a.iloc[idx]), pd.notnull(col_b.iloc[idx])]) else [0] for idx in range(self.n_records)])
else:
return np.array([[method(col_a.iloc[idx], col_b.iloc[idx]) / denominator] if all([pd.notnull(col_a.iloc[idx]), pd.notnull(col_b.iloc[idx])]) else [0] for idx in range(self.n_records)])
def _generate_similarity_fuzz_sort(self, col_a: pd.Series, col_b: pd.Series, **kwargs) -> np.ndarray:
return self._apply_string_sim_method(fuzz.token_sort_ratio, col_a, col_b, denominator=100, token_wise=False)
def _generate_similarity_levenshtein(self, col_a: pd.Series, col_b: pd.Series, **kwargs) -> np.ndarray:
return self._apply_string_sim_method(textdistance.levenshtein.normalized_similarity, col_a, col_b, token_wise=False)
def _generate_similarity_jarowinkler(self, col_a: pd.Series, col_b: pd.Series, **kwargs) -> np.ndarray:
return self._apply_string_sim_method(textdistance.jaro_winkler.normalized_similarity, col_a, col_b, token_wise=False)
def _generate_similarity_jaccard(self, col_a: pd.Series, col_b: pd.Series, **kwargs) -> np.ndarray:
return self._apply_string_sim_method(textdistance.jaccard.normalized_similarity, col_a, col_b, token_wise=True)
def _generate_similarity_sorensen_dice(self, col_a: pd.Series, col_b: pd.Series, **kwargs) -> np.ndarray:
return self._apply_string_sim_method(textdistance.sorensen_dice.normalized_similarity, col_a, col_b, token_wise=True)
def _generate_ml_similarity_fuzz_sort_ignore_suffixes(self, **kwargs) -> np.ndarray:
if "string_sim_metric" in kwargs:
return np.array([[kwargs["string_sim_metric"](self.ent_mention_col.iloc[idx], self.candidate_title_col.iloc[idx]) / 100] for idx in range(self.n_records) ])
else:
return np.array([[fuzz.token_sort_ratio(
self._remove_suffixes(self.ent_mention_col.iloc[idx], self.suffix_list),
self._remove_suffixes(self.candidate_title_col.iloc[idx], self.suffix_list)
) / 100] for idx in range(self.n_records) ])
def _generate_label_in_mention(self, **kwargs) -> np.ndarray:
return np.array(
[[float(self.candidate_title_col.iloc[idx].lower() in self.ent_mention_col.iloc[idx].lower())] for idx in range(self.n_records)]
)
def _generate_mention_in_label(self, **kwargs) -> np.ndarray:
return np.array(
[[float(self.ent_mention_col.iloc[idx].lower() in self.candidate_title_col.iloc[idx].lower())] for idx in range(self.n_records)]
)
def _generate_type_features(self, **kwargs) -> np.ndarray:
return np.concatenate(
(
self.ent_type_encoder.transform(self.ent_type_col.values.reshape(-1,1)).toarray(),
self.candidate_type_encoder.transform(self.candidate_type_col.values.reshape(-1,1)).toarray()
),
axis=1)
def get_feature_matrix(self) -> np.ndarray:
feature_rows = np.concatenate(
(
self._generate_similarity_fuzz_sort(self.ent_mention_col, self.candidate_title_col),
self._generate_similarity_levenshtein(self.ent_mention_col, self.candidate_title_col),
self._generate_similarity_jarowinkler(self.ent_mention_col, self.candidate_title_col),
self._generate_ml_similarity_fuzz_sort_ignore_suffixes(),
self._generate_similarity_jarowinkler(self.ent_context_col, self.candidate_context_col),
self._generate_similarity_jaccard(self.ent_context_col, self.candidate_context_col),
self._generate_similarity_sorensen_dice(self.ent_context_col, self.candidate_context_col),
self._generate_label_in_mention(),
self._generate_mention_in_label(),
self._generate_type_features(),
),
axis=1
)
return feature_rows
@staticmethod
def group_data_by_source(data: pd.DataFrame) -> List[pd.DataFrame]:
# TODO: make column names in __init__ strings rather than series and pass them to groupby here
return [_slice for (_, _slice) in data.groupby(["item_uri", "ent_text", "ent_label"])]
def get_data_with_features(self) -> pd.DataFrame:
"""Adds X to the end of the dataframe in columns with names 'feat_i' and returns it"""
X = self.get_feature_matrix()
feat_cols = [f"feat{i}" for i in range(X.shape[1])]
return pd.concat([self.data.reset_index(drop=True), pd.DataFrame(X, columns=feat_cols)], axis=1)
def get_pairwise_features_and_targets(self) -> np.ndarray:
# TODO: put this in __init__
target_col = 'link_correct'
##
X_pairwise = []
y_pairwise = []
X = self.get_feature_matrix()
feat_cols = [f"feat{i}" for i in range(X.shape[1])]
# data_with_X = pd.concat([self.data.reset_index(), pd.DataFrame(X, columns=feat_cols)], axis=1)
data_with_X = self.get_data_with_features()
for group_data in self.group_data_by_source(data_with_X):
pos_idxs = group_data[group_data[target_col] == True].index
neg_idxs = group_data[group_data[target_col] == False].index
X_g = []
y_g = []
for pos_idx in pos_idxs:
for neg_idx in neg_idxs:
X_g.append(group_data.loc[pos_idx, feat_cols].values - group_data.loc[neg_idx, feat_cols].values)
y_g.append(1)
X_g.append(group_data.loc[neg_idx, feat_cols].values - group_data.loc[pos_idx, feat_cols].values)
y_g.append(-1)
X_pairwise += X_g
y_pairwise += y_g
return np.array(X_pairwise), np.array(y_pairwise)
f = FeatureGenerator(df_annotated,
source_uri_col = 'item_uri',
ent_mention_col='ent_text', ent_type_col='ent_label', ent_context_col='item_description',
candidate_title_col='candidate_title', candidate_type_col='candidate_type', candidate_context_col='candidate_description')
X_p, y_p = f.get_pairwise_features_and_targets()
X_p.shape, y_p.shape
from sklearn import svm
from scipy import linalg
class RankSVM():
def fit(self, X_train: np.ndarray, y_train: np.ndarray, verbose=False):
clf = svm.LinearSVC(dual=False, verbose=verbose)
clf.fit(X_train, y_train)
self.model = clf
self._weights = self.model.coef_.ravel() / linalg.norm(self.model.coef_)
return self
def rank(self, group: pd.DataFrame) -> List[int]:
feat_cols = [col for col in group.columns if col.startswith('feat')]
X = group[feat_cols].values
scores = X.dot(self._weights)
scored_group = group.assign(rerank_scores=scores)
scored_group.sort_values("rerank_scores", inplace=True, ascending=False)
return scored_group
# result = scored_group["candidate_id"].astype(np.int).values
# return list(result)
def explain(self, features: List[str]) -> List:
# Gene Selection for Cancer Classification using Support Vector Machines
results = []
scores = self._weights * self._weights
scores = normalize(scores.reshape(1, -1), norm="l1").squeeze()
for w, l in zip(scores, features):
results.append({"feature": l, "weight": w})
pair_clf = RankSVM().fit(X_p, y_p)
f_un = FeatureGenerator(df_unannotated,
source_uri_col = 'item_uri',
ent_mention_col='ent_text', ent_type_col='ent_label', ent_context_col='item_description',
candidate_title_col='candidate_title', candidate_type_col='candidate_type', candidate_context_col='candidate_description')
df_un_with_X = f_un.get_data_with_features()
un_groups = [_slice for (_, _slice) in df_un_with_X.groupby(["item_uri", "ent_text", "ent_label"])]
pair_clf.rank(un_groups[17])
###Output
_____no_output_____ |
PorscheScraper.ipynb | ###Markdown
###Code
import requests
import pandas as pd
from bs4 import BeautifulSoup as bs
import datetime
import pickle
import re
import dateutil
import time
base_url = 'https://bringatrailer.com'
auction_results_url = 'auctions/results'
# TODO: You may want to use a user-agent that is not `python-requests-` to not
# trigger anti-bot measures
request = requests.get('/'.join([base_url, auction_results_url])).text
soup = bs(request)
for auction_result in soup.find('div', id='initial-results'):
a = auction_result.find('a')
title = a.text
link = a['href']
print(title, link)
no_reserve_re = re.compile('[Nn]o [Rr]eserve')
year_re = re.compile('[\d]{4}')
make_model_trim_re = re.compile('\d{4}.*')
tmu_re = re.compile('TMU')
lot_re = re.compile('Lot')
mileage_re = re.compile('[\d]*k?\s[Mm]iles')
location_re = re.compile('Location:')
def re_search(regex, text):
search = regex.search(text)
return search.group(0) if search is not None else None
def get_year(text):
return re_search(year_re, text)
def get_make_model_trim(text):
result = re_search(make_model_trim_re, text)
return result.split(' ')[1:] if result else None
def is_no_reserve(text):
return bool(re_search(no_reserve_re, text))
def to_date(date_str):
date_str = re.match('[\w\s,]+', date_str)[0]
return dateutil.parser.parse(date_str)
def to_int(number_str):
number_str = re.match('[\d,$]+', number_str)[0]
number_str = number_str.replace('$', '').replace(',', '')
return int(number_str)
def tmu(text):
return bool(re_search(tmu_re, text))
def mileage(text):
result = re_search(mileage_re, text)
return to_int(search.group(0).split(' ')[0].replace('k', '000')) if result else None
def location(tag):
postal_code = country = None
_, city, state, *args = listing_essentials.find(text=location_re).replace(',', '').replace(':', '').split()
if args[0].isdigit():
postal_code = to_int(args[0])
country = 'USA'
else:
counrty = args[0]
return {
'city': city,
'state': state,
'country': country,
'postal_code': postal_code,
}
def lot_num(tag):
return to_int(listing_essentials.find(text=lot_re).split('#')[-1])
listings = []
auction_items = soup.find('div', id='initial-results').find_all('div', class_='auctions-item-extended')
for item in auction_items:
auction_item = {}
title = item.find('span').text.strip() # Title
status = item.find('div', class_='auctions-item-status').text
auction_item['link'] = item.find('a')['href'] # Link
auction_item['title'] = title
auction_item['no_reserve'] = is_no_reserve(title)
auction_item['model_year'] = get_year(title)
auction_item['make_model_trim'] = get_make_model_trim(title)
auction_item['auction_date'] = to_date(status.split()[-1])
auction_item['price'] = to_int(status.split()[2])
auction_item['sold'] = status.split()[0].lower() == 'sold'
listings.append(auction_item)
from pprint import PrettyPrinter
pp = PrettyPrinter(indent=4)
pp.pprint(listings[:4])
len(items)
for listing in listings:
listing_request = requests.get(listing['link']).text
listing_soup = bs(listing_request)
listing_essentials = listing_soup.find('div', class_='listing-essentials')
listing['lot_number'] = lot_num(listing_essentials)
listing['tmu'] = tmu(listing_essentials.text)
listing['mileage'] = mileage(listing_essentials.text)
listing['location'] = location(listing_essentials)
pp.pprint(listing)
time.sleep(1)
listings = { 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/2000-bmw-m-roadster-58/',
'location': { 'city': 'Beaverton',
'country': 'USA',
'postal_code': 97007,
'state': 'Oregon'},
'lot_number': 30983,
'make_model_trim': ['BMW', 'M', 'Roadster'],
'mileage': 50000,
'model_year': '2000',
'no_reserve': True,
'price': 16250,
'sold': True,
'title': 'No Reserve: 2000 BMW M Roadster',
'tmu': False}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/1991-jaguar-xjs-23/',
'location': { 'city': 'Raleigh',
'country': None,
'postal_code': None,
'state': 'North'},
'lot_number': 30980,
'make_model_trim': ['Jaguar', 'XJS', 'Convertible'],
'mileage': 50000,
'model_year': '1991',
'no_reserve': False,
'price': 16500,
'sold': True,
'title': '19k-Mile 1991 Jaguar XJS Convertible',
'tmu': False}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/1999-porsche-911-carrera-119/',
'location': { 'city': 'Denver',
'country': 'USA',
'postal_code': 80216,
'state': 'Colorado'},
'lot_number': 30979,
'make_model_trim': ['Porsche', '911', 'Carrera', 'Cabriolet', '6-Speed'],
'mileage': 50000,
'model_year': '1999',
'no_reserve': False,
'price': 16100,
'sold': True,
'title': '1999 Porsche 911 Carrera Cabriolet 6-Speed',
'tmu': False}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/1970-honda-trail-90-2/',
'location': { 'city': 'Portland',
'country': 'USA',
'postal_code': 97218,
'state': 'Oregon'},
'lot_number': 30985,
'make_model_trim': ['Honda', 'Trail', '90'],
'mileage': 50000,
'model_year': '1970',
'no_reserve': True,
'price': 2800,
'sold': True,
'title': 'No Reserve: 1970 Honda Trail 90',
'tmu': False}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/1969-chevrolet-corvette-56/',
'location': { 'city': 'Monterey',
'country': 'USA',
'postal_code': 93940,
'state': 'California'},
'lot_number': 30986,
'make_model_trim': [ 'Chevrolet',
'Corvette',
'Convertible',
'350',
'5-Speed'],
'mileage': 50000,
'model_year': '1969',
'no_reserve': False,
'price': 22701,
'sold': True,
'title': '1969 Chevrolet Corvette Convertible 350 5-Speed',
'tmu': True}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/2006-mercedes-benz-sl600-6/',
'location': { 'city': 'Naples',
'country': 'USA',
'postal_code': 34119,
'state': 'Florida'},
'lot_number': 30976,
'make_model_trim': ['Mercedes-Benz', 'SL600'],
'mileage': 50000,
'model_year': '2006',
'no_reserve': False,
'price': 23750,
'sold': True,
'title': '39k-Mile 2006 Mercedes-Benz SL600',
'tmu': False}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/2006-porsche-911-carrera-4-6/',
'location': { 'city': 'Miami',
'country': 'USA',
'postal_code': 33186,
'state': 'Florida'},
'lot_number': 30984,
'make_model_trim': ['Porsche', '911', 'Carrera', '4'],
'mileage': 50000,
'model_year': '2006',
'no_reserve': True,
'price': 26750,
'sold': True,
'title': 'No Reserve: 2006 Porsche 911 Carrera 4',
'tmu': False}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/2000-jaguar-xkr-11/',
'location': { 'city': 'Ball',
'country': None,
'postal_code': None,
'state': 'Ground'},
'lot_number': 30967,
'make_model_trim': ['Jaguar', 'XKR', 'Convertible'],
'mileage': 50000,
'model_year': '2000',
'no_reserve': False,
'price': 31750,
'sold': True,
'title': 'Modified 2000 Jaguar XKR Convertible',
'tmu': False}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/1969-honda-mini-trail-50-8/',
'location': { 'city': 'Allendale',
'country': 'USA',
'postal_code': 49401,
'state': 'Michigan'},
'lot_number': 30982,
'make_model_trim': ['Honda', 'Z50A', 'Monkey'],
'mileage': None,
'model_year': '1969',
'no_reserve': True,
'price': 4800,
'sold': True,
'title': 'No Reserve: 1969 Honda Z50A Monkey',
'tmu': False}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/2004-porsche-911-turbo-cabriolet-41/',
'location': { 'city': 'Upland',
'country': 'USA',
'postal_code': 91784,
'state': 'California'},
'lot_number': 30981,
'make_model_trim': ['Porsche', '911', 'Turbo', 'Cabriolet'],
'mileage': None,
'model_year': '2004',
'no_reserve': False,
'price': 47250,
'sold': True,
'title': '29k-Mile 2004 Porsche 911 Turbo Cabriolet',
'tmu': False}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/1972-ford-f-100-11/',
'location': { 'city': 'Los',
'country': None,
'postal_code': None,
'state': 'Angeles'},
'lot_number': 30978,
'make_model_trim': ['Ford', 'F-100'],
'mileage': 50000,
'model_year': '1972',
'no_reserve': False,
'price': 9999,
'sold': False,
'title': '1972 Ford F-100',
'tmu': True}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/1985-audi-ur-quattro-12/',
'location': { 'city': 'Mount',
'country': None,
'postal_code': None,
'state': 'Kisco'},
'lot_number': 30977,
'make_model_trim': ['Audi', 'Ur-Quattro'],
'mileage': 50000,
'model_year': '1985',
'no_reserve': False,
'price': 49100,
'sold': True,
'title': '1985 Audi Ur-Quattro',
'tmu': False}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/1961-austin-healey-bug-eye-sprite-14/',
'location': { 'city': 'Johns',
'country': None,
'postal_code': None,
'state': 'Island'},
'lot_number': 30973,
'make_model_trim': ['Austin-Healey', 'Bugeye', 'Sprite'],
'mileage': 50000,
'model_year': '1961',
'no_reserve': True,
'price': 18750,
'sold': True,
'title': 'No Reserve: 1961 Austin-Healey Bugeye Sprite',
'tmu': True}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/2014-audi-rs-5-13/',
'location': { 'city': 'Sanford',
'country': 'USA',
'postal_code': 32771,
'state': 'Florida'},
'lot_number': 30972,
'make_model_trim': ['Audi', 'RS5'],
'mileage': 50000,
'model_year': '2014',
'no_reserve': False,
'price': 40500,
'sold': True,
'title': '11k-Mile 2014 Audi RS5',
'tmu': False}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/2002-honda-s2000-63/',
'location': { 'city': 'Tallahassee',
'country': 'USA',
'postal_code': 32317,
'state': 'Florida'},
'lot_number': 30974,
'make_model_trim': ['Honda', 'S2000'],
'mileage': 50000,
'model_year': '2002',
'no_reserve': False,
'price': 19000,
'sold': True,
'title': '2002 Honda S2000',
'tmu': False}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/1980-puch-maxi-s-2-speed/',
'location': { 'city': 'Pleasanton',
'country': 'USA',
'postal_code': 94566,
'state': 'California'},
'lot_number': 30971,
'make_model_trim': ['Puch', 'Maxi', 'Sport', 'Mk', 'II'],
'mileage': 50000,
'model_year': '1980',
'no_reserve': True,
'price': 5800,
'sold': True,
'title': 'No Reserve: 1980 Puch Maxi Sport Mk II',
'tmu': True}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/1923-ford-t-bucket-11-2/',
'location': { 'city': 'Erie',
'country': 'USA',
'postal_code': 16509,
'state': 'Pennsylvania'},
'lot_number': 30970,
'make_model_trim': ['Ford', 'T-Bucket', 'Hot', 'Rod'],
'mileage': 50000,
'model_year': '1923',
'no_reserve': False,
'price': 22500,
'sold': True,
'title': '1923 Ford T-Bucket Hot Rod',
'tmu': True}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/2010-porsche-911-gt3-rs-8/',
'location': { 'city': 'Mill',
'country': None,
'postal_code': None,
'state': 'Valley'},
'lot_number': 30962,
'make_model_trim': ['Porsche', '911', 'GT3', 'RS'],
'mileage': 50000,
'model_year': '2010',
'no_reserve': False,
'price': 126500,
'sold': True,
'title': '2010 Porsche 911 GT3 RS',
'tmu': False}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/scuderia-ferrari-annual-yearbook/',
'location': { 'city': 'San',
'country': None,
'postal_code': None,
'state': 'Rafael'},
'lot_number': 30961,
'make_model_trim': ['Scuderia', 'Ferrari', 'Yearbook'],
'mileage': None,
'model_year': '1932',
'no_reserve': False,
'price': 5200,
'sold': True,
'title': '1932 Scuderia Ferrari Yearbook',
'tmu': False}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/1977-land-rover-88-series-iii/',
'location': { 'city': 'Boston',
'country': 'USA',
'postal_code': 2116,
'state': 'Massachusetts'},
'lot_number': 30964,
'make_model_trim': ['Land', 'Rover', '88', 'Series', 'III'],
'mileage': 50000,
'model_year': '1977',
'no_reserve': True,
'price': 12000,
'sold': True,
'title': 'No Reserve: 1977 Land Rover 88 Series III',
'tmu': True}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/2009-porsche-cayenne-turbo-s-11/',
'location': { 'city': 'Fort',
'country': None,
'postal_code': None,
'state': 'Lauderdale'},
'lot_number': 30956,
'make_model_trim': ['Porsche', 'Cayenne', 'Turbo', 'S'],
'mileage': 50000,
'model_year': '2009',
'no_reserve': False,
'price': 29000,
'sold': True,
'title': '43k-Mile 2009 Porsche Cayenne Turbo S',
'tmu': False}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/2007-bentley-continental-gt-6/',
'location': { 'city': 'Akron',
'country': 'USA',
'postal_code': 44313,
'state': 'Ohio'},
'lot_number': 30965,
'make_model_trim': ['Bentley', 'Continental', 'GT'],
'mileage': 50000,
'model_year': '2007',
'no_reserve': False,
'price': 41200,
'sold': False,
'title': '2007 Bentley Continental GT',
'tmu': False}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/1995-mitsubishi-pajero-mini/',
'location': { 'city': 'Cookeville',
'country': 'USA',
'postal_code': 38501,
'state': 'Tennessee'},
'lot_number': 30953,
'make_model_trim': ['Mitsubishi', 'Pajero', 'Mini', 'XR-II'],
'mileage': 50000,
'model_year': '1995',
'no_reserve': True,
'price': 14250,
'sold': True,
'title': 'No Reserve: 11k-Mile 1995 Mitsubishi Pajero Mini XR-II',
'tmu': False}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/2008-bmw-535-40/',
'location': { 'city': 'Milton',
'country': 'USA',
'postal_code': 2186,
'state': 'Massachusetts'},
'lot_number': 30959,
'make_model_trim': ['BMW', '535xi', 'Sports', 'Wagon'],
'mileage': 50000,
'model_year': '2008',
'no_reserve': True,
'price': 12850,
'sold': True,
'title': 'No Reserve: 2008 BMW 535xi Sports Wagon',
'tmu': False}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/1966-bug-rear-engine-sprint/',
'location': { 'city': 'Vancouver',
'country': 'USA',
'postal_code': 98685,
'state': 'Washington'},
'lot_number': 30955,
'make_model_trim': ['Bug', 'Sprint', 'Kart'],
'mileage': None,
'model_year': '1966',
'no_reserve': True,
'price': 7000,
'sold': True,
'title': 'No Reserve: 1966 Bug Sprint Kart',
'tmu': False}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/1998-land-rover-range-rover-2/',
'location': { 'city': 'Northbrook',
'country': 'USA',
'postal_code': 60062,
'state': 'Illinois'},
'lot_number': 30960,
'make_model_trim': ['Land', 'Rover', 'Range', 'Rover', '4.6', 'HSE'],
'mileage': 50000,
'model_year': '1998',
'no_reserve': False,
'price': 12000,
'sold': True,
'title': '1998 Land Rover Range Rover 4.6 HSE',
'tmu': False}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/2020-chevy-corvette/',
'location': { 'city': 'Delavan',
'country': 'USA',
'postal_code': 53115,
'state': 'Wisconsin'},
'lot_number': 30954,
'make_model_trim': ['Chevrolet', 'Corvette', 'Stingray', 'Coupe'],
'mileage': 50000,
'model_year': '2020',
'no_reserve': False,
'price': 91500,
'sold': True,
'title': '2020 Chevrolet Corvette Stingray Coupe',
'tmu': False}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/1948-lincoln-continental-coupe-2/',
'location': { 'city': 'Kingwood',
'country': 'USA',
'postal_code': 77345,
'state': 'Texas'},
'lot_number': 30947,
'make_model_trim': ['Lincoln', 'Continental', 'Coupe'],
'mileage': 50000,
'model_year': '1948',
'no_reserve': False,
'price': 32000,
'sold': True,
'title': '1948 Lincoln Continental Coupe',
'tmu': True}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/2012-porsche-cayman-r-16/',
'location': { 'city': 'Campbell',
'country': 'USA',
'postal_code': 95008,
'state': 'California'},
'lot_number': 30950,
'make_model_trim': ['Porsche', 'Cayman', 'R', '6-Speed'],
'mileage': 50000,
'model_year': '2012',
'no_reserve': False,
'price': 46300,
'sold': True,
'title': '2012 Porsche Cayman R 6-Speed',
'tmu': False}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/1983-ford-mustang-gt-5-0-5/',
'location': { 'city': 'Topton',
'country': 'USA',
'postal_code': 19562,
'state': 'Pennsylvania'},
'lot_number': 30948,
'make_model_trim': ['Ford', 'Mustang', 'GT', '5.0', '4-Speed'],
'mileage': 50000,
'model_year': '1983',
'no_reserve': False,
'price': 16000,
'sold': True,
'title': '36-Years-Owned 1983 Ford Mustang GT 5.0 4-Speed',
'tmu': False}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/1960-mercedes-benz-190sl-18/',
'location': { 'city': 'Ball',
'country': None,
'postal_code': None,
'state': 'Ground'},
'lot_number': 30949,
'make_model_trim': ['Mercedes-Benz', '190SL'],
'mileage': 50000,
'model_year': '1960',
'no_reserve': False,
'price': 69100,
'sold': True,
'title': '1960 Mercedes-Benz 190SL',
'tmu': True}
{ 'auction_date': datetime.datetime(2020, 5, 5, 0, 0),
'link': 'https://bringatrailer.com/listing/1972-honda-z600-9/',
'location': { 'city': 'Ventura',
'country': 'USA',
'postal_code': 93001,
'state': 'California'},
'lot_number': 30951,
'make_model_trim': ['Honda', 'Z600'],
'mileage': 50000,
'model_year': '1972',
'no_reserve': False,
'price': 11250,
'sold': True,
'title': '1972 Honda Z600',
'tmu': True}
import pickle
with open('listings.pickle', 'wb') as file:
pickle.dump(listings, file)
# Previously Listed
listing_html = requests.get('https://bringatrailer.com/listing/2006-bmw-m3-coupe-41/').text
soup = bs(listing_html)
soup.find('div', class_='post-excerpt').find_all('a', rel='noreferrer')
###Output
_____no_output_____ |
Examples-biasutti/myproject.ipynb | ###Markdown
Test project: plot the lagged autocorrelation of daily rainfall for one point in West Africa
###Code
###Output
_____no_output_____ |
Automated_ML/03b_Forecasting_Pipeline/03b_Forecasting_Pipeline.ipynb | ###Markdown
Copyright (c) Microsoft Corporation. All rights reserved.Licensed under the MIT License. 03b Forecasting Pipeline---  In this notebook we create a pipeline for Forcasting 11,973 AutoML models. The training and scoring of these models was completed in the Training notebook in this repository. We will set up the Pipeline for forecasting given the desired forecasting horizon. We utitlize the [ParallelRunStep](https://docs.microsoft.com/en-us/python/api/azureml-contrib-pipeline-steps/azureml.contrib.pipeline.steps.parallel_run_step.parallelrunstep?view=azure-ml-py) to parallelize the process. For more information about the Data and Models refer to the Data Preparation and Training Notebooks. The pipeline set up is similar to the Training Pipeline in this repository. For more details on the steps and functions refer to the Training folder. Prerequisites At this point, you should have already:1. Created your AML Workspace using the [00_Setup_AML_Workspace notebook](../00_Setup_AML_Workspace.ipynb)2. Run [01b_Data_Preparation.ipynb](../01b_Data_Preparation/01b_Data_Preparation.ipynb) to create the dataset3. Run [02b_Train_AutomatedML.ipynb](../02b_Train_AutoML/02b_Train_AutoML.ipynb) to train the models 1.0 Call the Workspace, Datastore, and ComputeAs we did in the Training Pipeline notebook, we need to call the Workspace. We also want to create variables for the datastore and compute cluster. Connect to the workspace
###Code
import azureml.core
from azureml.core import Workspace, Datastore
import pandas as pd
# set up workspace
ws= Workspace.from_config()
# Take a look at Workspace
ws.get_details()
# set up datastores
dstore = ws.get_default_datastore()
output = {}
output['SDK version'] = azureml.core.VERSION
output['Subscription ID'] = ws.subscription_id
output['Workspace'] = ws.name
output['Resource Group'] = ws.resource_group
output['Location'] = ws.location
output['Default datastore name'] = dstore.name
pd.set_option('display.max_colwidth', -1)
outputDf = pd.DataFrame(data = output, index = [''])
outputDf.T
###Output
_____no_output_____
###Markdown
Attach existing compute resource
###Code
from azureml.core.compute import AmlCompute, ComputeTarget
# Choose a name for your cluster.
amlcompute_cluster_name = "train-many-model"
found = False
# Check if this compute target already exists in the workspace.
cts = ws.compute_targets
if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == 'AmlCompute':
found = True
print('Found existing compute target.')
compute = cts[amlcompute_cluster_name]
if not found:
print('Creating a new compute target...')
provisioning_config = AmlCompute.provisioning_configuration(vm_size='STANDARD_D13_V2',
min_nodes=3,
max_nodes=20)
# Create the cluster.
compute = ComputeTarget.create(ws, amlcompute_cluster_name, provisioning_config)
print('Checking cluster status...')
# Can poll for a minimum number of nodes and for a specific timeout.
# If no min_node_count is provided, it will use the scale settings for the cluster.
compute.wait_for_completion(show_output = True, min_node_count = None, timeout_in_minutes = 20)
# For a more detailed view of current AmlCompute status, use get_status().
###Output
_____no_output_____
###Markdown
Set up an Experiment
###Code
from azureml.core import Experiment
experiment = Experiment(ws, 'manymodels-forecasting-pipeline')
###Output
_____no_output_____
###Markdown
Call the Datastore
###Code
from azureml.core import Datastore
dstore = ws.get_default_datastore()
###Output
_____no_output_____
###Markdown
2.0 Call Registered FileDatasetIn the Data Preparation notebook, we registered the orange juice inference data to the Workspace. You can choose to run the pipeline on the subet of 10 series or the full dataset of 11,973 series. We recommend starting with 10 series then expanding.
###Code
from azureml.core.dataset import Dataset
filedst_10_models = Dataset.get_by_name(ws, name='oj_inference_small')
filedst_10_models_input = filedst_10_models.as_named_input('forecast_10_models')
filedst_all_models = Dataset.get_by_name(ws, name='oj_inference')
filedst_all_models_input = filedst_all_models.as_named_input('forecast_all_models')
###Output
_____no_output_____
###Markdown
3.0 Build forecasting pipelineNow that the data, models, and compute resources are set up, we can put together a pipeline for forecasting. Set up the environment to run the scriptSpecify the conda dependencies for your script. This will allow us to install packages and configure the environment.
###Code
from scripts.helper import get_automl_environment
forecast_env = get_automl_environment()
###Output
_____no_output_____
###Markdown
Create the configuration to wrap the entry script [ParallelRunConfig](https://docs.microsoft.com/en-us/python/api/azureml-contrib-pipeline-steps/azureml.contrib.pipeline.steps.parallel_run_config.parallelrunconfig) is configuration for parallel run step. You will need to determine the number of workers and nodes appropriate for your use case. The process_count_per_node is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process.* node_count: The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long.* process_count_per_node: The number of processes per node.* run_invocation_timeout: The run() method invocation timeout in seconds. The timeout should be set to maximum training time of one AutoML run(with some buffer), by default it's 60 seconds.NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 20 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429).
###Code
#!pip install azureml.contrib.pipeline.steps
from scripts.helper import build_parallel_run_config_for_forecasting
# PLEASE MODIFY the following three settings based on your compute and experiment timeout.
node_count=3
process_count_per_node=6
run_invocation_timeout=300 # this timeout(in seconds), for larger models need to change this to a higher timeout
parallel_run_config = build_parallel_run_config_for_forecasting(forecast_env, compute, node_count, process_count_per_node, run_invocation_timeout)
###Output
_____no_output_____
###Markdown
Create the ParallelRunStep The [ParallelRunStep](https://docs.microsoft.com/en-us/python/api/azureml-contrib-pipeline-steps/azureml.contrib.pipeline.steps.parallel_run_step.parallelrunstep?view=azure-ml-py) is the main step in our pipeline. We specified the following parameters: **input**, **output**, and **arguments**. We also set the output directory. For the orange juice sales forecasting, we pass two **arguments** to the entry_script. - **group_column_names** list of column names that identifies groups- **target_column_name** [Optional] column name only if the inference dataset has the target - **time_column_name** [Optional] column name only if it is timeseries*arguments* and *inputs* are the two parameters that can pass information to the entry_script.You can change between running the pipeline on a subset of models or the full data set by changing the inputs parameter.
###Code
from azureml.pipeline.core import PipelineData
from azureml.contrib.pipeline.steps import ParallelRunStep
forecasting_output_name = 'forecasting_output'
output_dir = PipelineData(name = forecasting_output_name,
datastore = dstore)
parallelrun_step = ParallelRunStep(
name="many-models-forecasting",
parallel_run_config=parallel_run_config,
inputs=[filedst_10_models_input],
#inputs=[filedst_all_models_input],
output=output_dir,
models= [],
arguments=['--group_column_names', 'Store', 'Brand',
'--target_column_name', 'Quantity', # this is optional, and needs to be passed only if inference data contains target column
'--time_column_name', 'WeekStarting' # this is needed for timeseries
])
###Output
_____no_output_____
###Markdown
4.0 Run the forecast pipelineWe can use the Experiment we created to track the runs of the pipeline and review the output.
###Code
from azureml.pipeline.core import Pipeline
pipeline = Pipeline(workspace = ws, steps=parallelrun_step)
run = experiment.submit(pipeline)
###Output
_____no_output_____
###Markdown
You can run the folowing command if you'd like to monitor the forecasting process in jupyter notebook. It will stream logs live while forecasting. **Note**: this command may not work for Notebook VM, however it should work on your local laptop.
###Code
run.wait_for_completion(show_output=True)
###Output
_____no_output_____
###Markdown
Succesfully forecasted on AutoML Models. 5.0 Pipeline OutputsThe forecasting pipeline forecasts the orange juice quantity for a Store by Brand. The pipeline returns one file with the predictions for each store and outputs the result to the forecasting_output Blob container. The details of the blob container is listed in 'forecasting_output.txt' under Outputs+logs. The following code snippet:1. Downloads the contents of the output folder that is passed in the parallel run step 2. Reads the parallel_run_step.txt file that has the predictions as pandas dataframe and 3. Displays the top 10 rows of the predictions
###Code
import pandas as pd
import shutil
import os
import sys
from scripts.helper import get_forecasting_output
forecasting_results_name = "forecasting_results"
forecast_file = get_forecasting_output(run, forecasting_results_name, forecasting_output_name)
df = pd.read_csv(forecast_file, delimiter=" ", header=None)
df.columns = ["Week Starting", "Store", "Brand", "Quantity", "Advert", "Price" , "Revenue", "Predicted" ]
print("Prediction has ", df.shape[0], " rows. Here the first 10 rows are being displayed.")
df.head(10)
###Output
_____no_output_____
###Markdown
6.0 Publish and schedule the pipeline (Optional) 6.1 Publish the pipelineOnce you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipelinepublish-a-pipeline) for additional information on publishing and calling pipelines.
###Code
# published_pipeline = pipeline.publish(name = 'automl_forecast_many_models',
# description = 'forecast many models',
# version = '1',
# continue_on_step_failure = False)
###Output
_____no_output_____
###Markdown
6.2 Schedule the pipelineYou can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain or forecast models every month or based on another trigger such as data drift.
###Code
# from azureml.pipeline.core import Schedule, ScheduleRecurrence
# forecasting_pipeline_id = published_pipeline.id
# recurrence = ScheduleRecurrence(frequency="Month", interval=1, start_time="2020-01-01T09:00:00")
# recurring_schedule = Schedule.create(ws, name="automl_forecasting_recurring_schedule",
# description="Schedule Forecasting Pipeline to run on the first day of every week",
# pipeline_id=forecasting_pipeline_id,
# experiment_name=experiment.name,
# recurrence=recurrence)
###Output
_____no_output_____ |
Steane_Code_FT_encoding_simple.ipynb | ###Markdown
Steane code encoding fault tolerance=============================== 1. Set up two logical zero for Steane code based on the parity matrix in the book by Nielsen MA, Chuang IL. Quantum Computation and Quantum Information, 10th Anniversary Edition. Cambridge University Press; 2016. p. 4742. Set up fault tolerance as per scheme B, C and D from Goto H. Minimizing resource overheads for fault-tolerant preparation of encoded states of the Steane code. Sci Rep. 2016 Jan 27;6:19578. 3. Compare this with the non fault tolerant encoding circuit and a single qubit.3. Find out if either scheme have a tolerance using the simple decoding method. Import the necessary function modules, including the SteaneCodeLogicalQubit class. The methods of this class are called in this notebook.
###Code
from qiskit import(
QuantumCircuit,
QuantumRegister,
ClassicalRegister,
execute,
Aer
)
from qiskit.providers.aer.noise import NoiseModel
from qiskit.providers.aer.noise.errors import pauli_error, depolarizing_error
from circuits import SteaneCodeLogicalQubit
from helper_functions import (
get_noise,
mean_of_list,
calculate_standard_error,
print_time,
process_FT_results,
calculate_simple_parity_bits
)
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Define constants so the process flow can be controlled from one place:
###Code
SINGLE_GATE_ERRORS = ['x', 'y', 'z', 'h', 's', 'sdg']
TWO_GATE_ERRORS = ['cx', 'cz']
NOISE = True #Test with noise
SHOTS = 100000 #Number of shots to run
SHOTS_SINGLE = 1000000 #Number of shots to run
MEASURE_NOISE = 0.0046 #Measurement noise not relevant
SINGLE_GATE_DEPOLARISING = 0.000366 #Single gate noise
TWO_GATE_DEPOLARISING = 0.022
FACTOR_LIST = [1, 0.1, 0.01, 0.001, 0.0001]
ITERATIONS = 25
SIMULATOR = Aer.get_backend('qasm_simulator')
TITLE = 'Steane code encoding with fault tolerance'
#constants needed for correction and detection with FTc scheme
ANC_ZERO = '0000'
ANC_ONE = '0001'
ANCILLA_TYPES = 2
ANCILLA_QUBITS = 3
ANCILLA_MEASUREMENT_REPEATS = 3
DATA_MEASUREMENT_REPEATS = 3
DATA_MEAS_QUBITS = 1
DATA_MEAS_START = ANCILLA_TYPES * ANCILLA_QUBITS * ANCILLA_MEASUREMENT_REPEATS
DATA_START = DATA_MEAS_START + (DATA_MEAS_QUBITS * DATA_MEASUREMENT_REPEATS)
SIMPLE_DECODING = True
qubit_list = calculate_simple_parity_bits()
print(qubit_list)
###Output
[2, 4, 5]
###Markdown
We specify the parity check matrix, since this defines the Steane code. It is validated before the logical qubit is initiated to check that it is orthogonal to the valid codewords.
###Code
parity_check_matrix = ['0001111',
'0110011',
'1010101'
]
codewords = ['0000000',
'1010101',
'0110011',
'1100110',
'0001111',
'1011010',
'0111100',
'1101001'
]
def single_qubit():
cd = QuantumRegister(1,'data')
sd = ClassicalRegister(1,'measure_data')
qc = QuantumCircuit(cd, sd)
qc.measure(cd, sd)
if NOISE:
result = execute(qc, SIMULATOR, noise_model = noise_model, shots = SHOTS_SINGLE).result()
else:
result = execute(qc, SIMULATOR, shots = SHOTS_SINGLE).result()
counts = result.get_counts(qc)
return(counts)
###Output
_____no_output_____
###Markdown
Function module for non fault tolerant decoding circuit.
###Code
def encoding():
# no fault tolerance
qubit = SteaneCodeLogicalQubit(1, parity_check_matrix, codewords, ancilla = False)
qubit.set_up_logical_zero(0)
qubit.logical_measure_data(0)
if NOISE:
result = execute(qubit, SIMULATOR, noise_model = noise_model, shots = SHOTS).result()
else:
result = execute(qubit, SIMULATOR, shots = SHOTS).result()
counts = result.get_counts(qubit)
#print(counts)
return(counts)
###Output
_____no_output_____
###Markdown
Function module for non fault tolerant decoding circuit, scheme b from Goto's paper. See worksheet "Steane_code_encoding_FTb" for circuit diagram
###Code
def encoding_FTb():
qubit = SteaneCodeLogicalQubit(2, parity_check_matrix, codewords,
ancilla = False, fault_tolerant_b = True,
data_rounds = 3
)
qubit.set_up_logical_zero(0)
for i in range(3):
qubit.barrier()
qubit.set_up_logical_zero(1)
qubit.barrier()
qubit.logical_gate_CX(0, 1)
qubit.barrier()
qubit.logical_measure_data_FT(logical_qubit = 1, measure_round = i + 1)
qubit.barrier()
qubit.logical_measure_data(0)
if NOISE:
result = execute(qubit, SIMULATOR, noise_model = noise_model, shots = SHOTS).result()
else:
result = execute(qubit, SIMULATOR, shots = SHOTS).result()
counts = result.get_counts(qubit)
return(counts)
###Output
_____no_output_____
###Markdown
Function module for non fault tolerant decoding circuit, scheme c from Goto's paper. See worksheet "Steane_code_encoding_FTc" for circuit diagram
###Code
def encoding_FTc():
qubit = SteaneCodeLogicalQubit(1, parity_check_matrix, codewords, ancilla = False,
fault_tolerant_c = True, data_rounds = 3, )
qubit.set_up_logical_zero(0)
qubit.barrier()
qubit.barrier()
for i in range(3):
qubit.encode_fault_tolerant_method_C(qubit_list)
qubit.barrier()
qubit.logical_measure_data_FT(0, i + 1)
qubit.barrier()
qubit.logical_measure_data(0)
qubit.barrier()
if NOISE:
result = execute(qubit, SIMULATOR, noise_model = noise_model, shots = SHOTS).result()
else:
result = execute(qubit, SIMULATOR, shots=SHOTS).result()
counts = result.get_counts(qubit)
#print(counts)
return(counts)
###Output
_____no_output_____
###Markdown
Function module for non fault tolerant decoding circuit, based on own design. See worksheet "Steane_code_encoding_FTd" for circuit diagram.
###Code
def encoding_FTd():
qubit = SteaneCodeLogicalQubit(2, parity_check_matrix, codewords,
ancilla = False, fault_tolerant_b = True,
data_rounds = 3
)
qubit.set_up_logical_zero(0)
for i in range(3):
qubit.barrier()
qubit.logical_data_reset(1)
qubit.barrier()
qubit.logical_gate_CX(0, 1)
qubit.barrier()
qubit.logical_measure_data_FT(logical_qubit = 1, measure_round = i + 1)
qubit.barrier()
qubit.logical_measure_data(0)
if NOISE:
result = execute(qubit, SIMULATOR, noise_model = noise_model, shots = SHOTS).result()
else:
result = execute(qubit, SIMULATOR, shots = SHOTS).result()
counts = result.get_counts(qubit)
#print(counts)
return(counts)
###Output
_____no_output_____
###Markdown
Function module for non fault tolerant decoding circuit, scheme c from Goto's paper, with FT ancilla. See worksheet "Steane_code_encoding_FTc" for circuit diagram
###Code
def encoding_and_detection_FTc():
qubit = SteaneCodeLogicalQubit(1, parity_check_matrix, codewords,
fault_tolerant_ancilla = True, fault_tolerant_c = True,
ancilla_rounds = 3, data_rounds = 3
)
qubit.set_up_logical_zero(0)
for i in range(ANCILLA_MEASUREMENT_REPEATS):
qubit.encode_fault_tolerant_method_C(qubit_list)
qubit.barrier()
qubit.logical_measure_data_FT(0, i + 1)
qubit.barrier()
for i in range(ANCILLA_MEASUREMENT_REPEATS):
qubit.set_up_ancilla(0)
qubit.logical_measure_ancilla(0, i)
qubit.logical_measure_data(0)
if NOISE:
result = execute(qubit, SIMULATOR, noise_model=noise_model, shots=SHOTS).result()
else:
result = execute(qubit, SIMULATOR, shots=SHOTS).result()
counts = result.get_counts(qubit)
return(counts)
#print ('The different states can be counted. The simulated result are', counts)
###Output
_____no_output_____
###Markdown
Function module for non FT circuit with ancilla
###Code
def encoding_with_ancilla():
# no fault tolerance
qubit = SteaneCodeLogicalQubit(1, parity_check_matrix, codewords)
qubit.set_up_logical_zero(0)
qubit.set_up_ancilla(0)
qubit.logical_measure_data(0)
qubit.logical_measure_ancilla(0)
if NOISE:
result = execute(qubit, SIMULATOR, noise_model = noise_model, shots = SHOTS).result()
else:
result = execute(qubit, SIMULATOR, shots = SHOTS).result()
counts = result.get_counts(qubit)
#print(counts)
return(counts)
###Output
_____no_output_____
###Markdown
Calculation and processing of results
###Code
single = []
nonft = []
ftb = []
ftc = []
ftd = []
ftca = []
nonfta = []
error_single = []
error_nonft = []
error_ftb = []
error_ftc = []
error_ftd = []
error_ftca = []
error_nonfta = []
for factor in FACTOR_LIST:
print()
print_time()
print(f'Processing factor {factor}')
if NOISE:
noise_model = get_noise(MEASURE_NOISE * factor, SINGLE_GATE_DEPOLARISING * factor,
TWO_GATE_DEPOLARISING * factor, SINGLE_GATE_ERRORS, TWO_GATE_ERRORS
)
print('Processing single qubit')
results_list = []
for iteration in range(ITERATIONS):
counts = single_qubit()
error_rate, rejected, accepted, valid, invalid = process_FT_results(counts, ['0'])
results_list.append(error_rate)
mean = mean_of_list(results_list)
standard_deviation, standard_error = calculate_standard_error(results_list)
print(f'The mean is {mean:.6f}, the standard error is {standard_error:.6f}')
print(f'The standard devation is {standard_deviation:.6f}')
single.append(mean)
error_single.append(standard_error)
print()
print('process non FT circuit')
results_list = []
for iteration in range(ITERATIONS):
counts = encoding()
error_rate, rejected, accepted, valid, invalid = process_FT_results(counts,
codewords = ['0'],
verbose = False,
simple = SIMPLE_DECODING)
results_list.append(error_rate)
mean = mean_of_list(results_list)
standard_deviation, standard_error = calculate_standard_error(results_list)
print(f'The mean is {mean:.6f}, the standard error is {standard_error:.6f}')
print(f'The standard devations {standard_deviation:.6f}')
nonft.append(mean)
error_nonft.append(standard_error)
print()
print('Processing FTb')
results_list = []
for iteration in range(ITERATIONS):
counts = encoding_FTb()
error_rate, rejected, accepted, valid, invalid = process_FT_results(counts,
codewords = ['0'],
verbose = False,
data_start = 3, data_meas_qubits = 1,
data_meas_repeats = 3,
data_meas_strings = codewords,
simple = SIMPLE_DECODING
)
results_list.append(error_rate)
mean = mean_of_list(results_list)
standard_deviation, standard_error = calculate_standard_error(results_list)
print(f'The mean is {mean:.6f}, the standard error is {standard_error:.6f}')
print(f'The standard devation is {standard_deviation:.6f}')
ftb.append(mean)
error_ftb.append(standard_error)
print()
print('Processing FTc')
results_list = []
for iteration in range(ITERATIONS):
counts = encoding_FTc()
error_rate, rejected, accepted, valid, invalid = process_FT_results(counts,
codewords = ['0'],
verbose = False,
data_start = 3,
data_meas_qubits = 1,
data_meas_repeats = 3,
data_meas_strings = ['0'],
simple = SIMPLE_DECODING
)
results_list.append(error_rate)
mean = mean_of_list(results_list)
standard_deviation, standard_error = calculate_standard_error(results_list)
print(f'The mean is {mean:.6f}, the standard error is {standard_error:.6f}')
print(f'The standard devation is {standard_deviation:.6f}')
ftc.append(mean)
error_ftc.append(standard_error)
print()
print('Processing FTd')
results_list = []
for iteration in range(ITERATIONS):
counts = encoding_FTd()
error_rate, rejected, accepted, valid, invalid = process_FT_results(counts,
codewords = ['0'],
verbose = False,
data_start = 3,
data_meas_qubits = 1,
data_meas_repeats = 3,
data_meas_strings = codewords,
simple = SIMPLE_DECODING
)
results_list.append(error_rate)
mean = mean_of_list(results_list)
standard_deviation, standard_error = calculate_standard_error(results_list)
print(f'The mean is {mean:.6f}, the standard error is {standard_error:.6f}')
print(f'The standard devation is {standard_deviation:.6f}')
ftd.append(mean)
error_ftd.append(standard_error)
print()
print('process FTc with FT ancilla')
results_list = []
for iteration in range(ITERATIONS):
counts = encoding_and_detection_FTc()
error_rate, rejected, accepted, valid, invalid = process_FT_results(counts,
codewords = ['0'],
anc_zero = ANC_ZERO,
anc_one = ANC_ONE,
verbose = False,
data_meas_start = DATA_MEAS_START,
data_start = DATA_START,
ancilla_qubits = ANCILLA_QUBITS,
ancilla_meas_repeats = ANCILLA_MEASUREMENT_REPEATS,
data_meas_qubits = DATA_MEAS_QUBITS,
data_meas_repeats = DATA_MEASUREMENT_REPEATS,
simple = SIMPLE_DECODING
)
results_list.append(error_rate)
mean = mean_of_list(results_list)
standard_deviation, standard_error = calculate_standard_error(results_list)
print(f'The mean is {mean:.6f}, the standard error is {standard_error:.6f}')
print(f'The standard devations {standard_deviation:.6f}')
ftca.append(mean)
error_ftca.append(standard_error)
print()
print('process non FT circuit with ancilla')
results_list = []
for iteration in range(ITERATIONS):
counts = encoding_with_ancilla()
error_rate, rejected, accepted, valid, invalid = process_FT_results(counts,
codewords = ['0'],
verbose = False,
data_start = 2,
ancilla_qubits = 1,
simple = SIMPLE_DECODING
)
results_list.append(error_rate)
mean = mean_of_list(results_list)
standard_deviation, standard_error = calculate_standard_error(results_list)
print(f'The mean is {mean:.6f}, the standard error is {standard_error:.6f}')
print(f'The standard devations {standard_deviation:.6f}')
nonfta.append(mean)
error_nonfta.append(standard_error)
print()
color1 = 'black'
color2 = '#045257'
color3 = '#089099'
color4 = '#7CCBA2'
color5 = '#F0746F'
color6 = '#7C1D6F'
color7 = '#DC3977'
plt.plot(FACTOR_LIST, single, '.', color = color1, linestyle = '', label = 'Single qubit' )
plt.plot(FACTOR_LIST, ftb, '.', color = color2, linestyle = '', label = 'FT encoding B' )
plt.plot(FACTOR_LIST, ftc, '.', color = color3, linestyle = '', label = 'FT encoding C' )
plt.plot(FACTOR_LIST, ftd, '.', color = color4, linestyle = '', label = 'FT encoding D' )
plt.plot(FACTOR_LIST, nonft, '.', color = color5, linestyle = '', label = 'Non FT encoding' )
plt.plot(FACTOR_LIST, ftca, '.', color = color6, linestyle = '', label = 'FT encoding C + FT ancilla' )
plt.plot(FACTOR_LIST, nonfta, '.', color = color7, linestyle = '', label = 'Non FT encoding + ancilla' )
plt.errorbar(FACTOR_LIST, single, yerr = error_single, color = color1 )
plt.errorbar(FACTOR_LIST, ftb, yerr = error_ftb, color = color2 )
plt.errorbar(FACTOR_LIST, ftc, yerr = error_ftc, color = color3 )
plt.errorbar(FACTOR_LIST, ftd, yerr = error_ftd, color = color4 )
plt.errorbar(FACTOR_LIST, nonft, yerr = error_nonft, color = color5 )
plt.errorbar(FACTOR_LIST, ftca, yerr = error_ftca, color = color6 )
plt.errorbar(FACTOR_LIST, nonfta, yerr = error_nonfta, color = color7 )
plt.xlabel('Error scaling factor')
plt.ylabel('Error rate')
plt.title(TITLE)
plt.xscale("log")
plt.yscale("log")
plt.legend(bbox_to_anchor=(0, 1), ncol = 2, loc = 2, borderaxespad = 0., prop={"size":8})
fname = TITLE + '.png'
plt.savefig(fname)
###Output
_____no_output_____ |
ECE314/lab0/Lab 0.ipynb | ###Markdown
Lab 0: Getting Started with Python for ECE 314 This is the first half of Lab 1 for * ECE 314 Probability in Engineering Lab. * We post it in case you would like to learn a bit about Python in advance of taking the course. At this point in your academic careers you should have some knowledge of object oriented computer programming. It would certainly help if you've had experience with Python, but if not, have no fear. Python is a very intuitive programming language. If you've coded in C, JAVA, or Matlab you should have no trouble learning Python. Before we get too far into the code, we present a few general notions of what the environment will look like. IPython Notebook: The computer you are using to read this file probably has installed on it the Jupyter Notebook App or similar application to read IPython version 4 notebooks. We also assume the notebooks are run using Python version 2.7XX rather than version 3.4XX. For more information on installation or using an engineering work station (EWS) Linux machine, see instructions on the course webpage. An IPython Notebook file (with extension .ipynb) is an accumulation of cells, each composed of either code or markdown (i.e., text). Each code cell is individually executable. Each markdown cell can contain (among many things) LaTex and HTML. Throughout each lab you will be shown examples of code, probability theory, and coding applications. *You will need to be able modify this file to include your own answers and edits. Each of the questions is numbered in bold and we ask that you put all your responses/code in cells just after the stated questions. Let's go over some of the basics: Running a Cell: While the file is running one cell has the focus. To run the cell that is the current focus you can press the play button in the toolbar or use the shortcut SHIFT-ENTER. You will notice it brings the focus to the next cell after it completes. To run and keep focus in the same cell, you can use CTRL-ENTER. The order the cells run in can be important. In these labs the order will always go from top to bottom. In order to run code in the middle of the lab you may need to have run the code in a cell prior to it. Stopping a Cell:There may come times when a particular section of code is causing errors or running an infinite loop. You may need to interrupt the cell from running. To do this simply click the stop button in the toolbar or use the shortcut CTRL-C Creating a Cell: A new cell can be created using the Insert tab at the top of the page. It will default to be a code type. You can change the cell type of any cell by clicking on it and then using the Cell tab at the top of the page. For normal text, use the "markdown" type. It allows you to use HTML and LaTex as well. Clearing Output: If your screen becomes too busy, it may be useful to be able to clear output. This can be done again from the Cell tab under "All Output". The program is still running, but has been reset. Saving Your File: There is an autosave that can be set to save your file at a given rate (default is to save once every two minutes). If you prefer saving on your own you can use the File tab or the CTRL-S shortcut. A handy feature, also under the File tab, is that you can revert to a previous saved checkpoint. Keyboard Shortcuts: It can be useful to learn the keyboard shortcuts for these. They allow you to insert cells, run code, clear code, at a much quicker a pace. The list can be displayed by typing Ctrl-m h, and can be found here: http://ipython.org/ipython-doc/rel-1.1.0/interactive/notebook.html LaTex and Math: In these labs, you will be asked a number of questions, some requiring typed answers in a markdown cell, others requiring python answers in a code cell. It may be useful to learn LaTex to better explain yourself in mathematical terms. LaTex for the formulation of mathematical equations is very intuitive and can be picked up easily. For a reference, look here: https://www.artofproblemsolving.com/wiki/index.php/LaTeX:Symbols Introduction to Python Code Importing Modules Python is an object oriented programming language where the user has access to functions through imported packages. A package is a collection of modules in directories that have a hierarchy. The three most common packages that we will use in this course are numpy, scipy, and matplotlib, though we will pick up others along the way. Before you can use any of these, you must import them. You only need to import them once in an IPython Notebook file, and then any cell in the notebook can have access to them. Running the code below imports all the pakages you will need for this lab. The simple print statement lets you know when it's completed.
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import scipy.stats as st
print ("Modules Imported!")
###Output
Modules Imported!
###Markdown
The first line is slightly different than the others and uses what is known as a "magic" function. This particular "magic" function simply makes it so that the plots we generate with the matplotlib package occur inline as opposed to opening in new windows outside of the notebook. Basic Math Python is very similar to Matlab and can be used to solve numerical problems. We simply need to run an expression and it will output an answer.
###Code
3+4*2
###Output
_____no_output_____
###Markdown
We can also create a variable, set it equal to an expression, and print the value.
###Code
x = 3+4**2
print (x)
###Output
_____no_output_____
###Markdown
We used ** to represent an exponent. Similarly, we can take the square root of a number this way. Here is an attempt:
###Code
3+4**(1/2)
###Output
_____no_output_____
###Markdown
You should get the answer 5 if running Python 3.x. Under Python 2.7, the division of integers 1/2 would return 0 and the final output would be 4. That could be corrected by changing 1/2 to 1./2. Python handles lists very similarly to Matlab. We can set variables equal to lists and perform operations on them. We can change the contents of the list and they don't need to be of the same type. This is called being mutable. Note that Python indexes starting with 0, as shown below.
###Code
x = [1,2,3,4,5]
y = [6,7,8,9,10]
print (x, y)
x[0] = 'Dog'
print (x[0])
###Output
_____no_output_____
###Markdown
Python also has what is known as a tuple. A tuple is very similar to a list, but is immutable. We cannot change the contents of the tuple. Tuples are often used to input or return objects. Below is the same code as above, but with tuples. It gives us an error message when we try to set x[0].
###Code
x = (1,2,3,4,5)
y = (6,7,8,9,10)
print (x, y)
x[0] = 'Dog'
print (x[0])
###Output
_____no_output_____
###Markdown
Below is a list of tuples. It has two tuples and each tuple has five elements.
###Code
x = [(1,2,3,4,5),(6,7,8,9,10)]
print (x)
print (x[0][3])
###Output
_____no_output_____
###Markdown
You may like to think of lists and tuples as arrays in some sense, but try to keep them separate. An array is actually an object from the NumPy module. We'll go over them a little bit further in the lab, but there are some notable differences. Ifs, Loops, and Functions If statements in Python are like those of most other languages. You need to use a keyword (if or else), followed by a condition, and finally a colon (:). Keep in mind instead of using brackets for grouping, Python goes by indentation. In the if statement below all parts of the if statement are contained within that indentation.
###Code
x = 3
y = 1
if x>y:
print ("I")
if x>3:
print ("Hate")
else:
print ("Love")
print ("Probability")
print ("!")
###Output
I
Love
Probability
!
###Markdown
For loops use the keyword "for" followed by a variable and the keyword "in" and a certain range or vector. The same rules for indentation apply here. Recall that indexing starts at 0. The range(n) function simply creates a integer list from 0 to n-1 in whole number increments.
###Code
x = [0,0,0,0,0]
for i in range(5):
c = 2*i**2
x[i]=c
print (x)
###Output
[0, 2, 8, 18, 32]
###Markdown
Similarly, you can use while loops. In the code below, we make use of the .append method of a list to keep adding to our list without needing to know the size initially. (By the way, a "method" is a function associated with an object. In this case, append is a method associated with a list.)
###Code
x = [0]
i = 0
while x[i]<12:
i = i+1
x.append(i)
print (x)
###Output
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
###Markdown
To specify a function, we need to use the "def" keyword. You need to give the number of inputs and have a return line to end your function. Below is a function that returns the factorial of the input.
###Code
def factorial(x):
c = 1
for i in range(x,1,-1): #range(x,1,-1) creates a vector from x to 2 in -1 increments
c = c*i
return c
print (factorial(5))
###Output
_____no_output_____
###Markdown
You can also return multiple outputs. Technically, we are still returning a single object, but it is a tuple. We can unpack the tuple when we call the function. Below is a function that returns the first and last digit of any integer.
###Code
def firstNlast(x):
l = x%10 # Uses the modulus operator %
while x>0:
f = x%10
x = int(x/10)
return f,l
x = 70094921348
first, last = firstNlast(x)
print (first, last)
###Output
7 8
###Markdown
The returned items get returned as a tuple and you can individually retrieve them by setting them equal to another tuple. Using Modules One of the reasons Python is so popular is due to the building capability of the modules. Remember those files we imported initially? We have access to all of the methods they contain. We abbreviated them to shorthand signifiers so we can code more quickly. It would be impossible to give you an overview of all the useful methods because there are so many. But they are fairly intuitive, so if you think something should be a method, it's probably included. Let's start with NumPy and create an array.
###Code
x = np.array([1,2,3,4,5])
print (x)
print (x[3])
###Output
[1 2 3 4 5]
4
###Markdown
In order to access the "array" method we just needed to type our signifier "np" and then put a decimal and the method. If you want a list of methods to come up as you're coding, after typing the decimal, hit tab on your keyboard. We can similarly declare multidemensional arrays, but notice the use of brackets and indexing. Unlike lists, arrays can only contain a single type. Indexing is also done a little more intuitively (like Matlab) than that of lists. Arrays are also mutable and can be used in multiple dimensions (to create matrices for instance).
###Code
x = np.array([[1,2,3],[4,5,6],[7,8,9]])
print (x)
print (x[0,0])
print (x[:,1])
print (x[1,:])
###Output
[[1 2 3]
[4 5 6]
[7 8 9]]
1
[2 5 8]
[4 5 6]
###Markdown
To give you a better idea of how to use these modules, here are a number of coding examples with functions that will be particularly useful to you this semester. Below we create a function and then plot it over time. Of course we need to properly title and label the graph.
###Code
def f(t): #Creates the function that we are going to plot
return t**3-t**2+t-1
t = np.linspace(-10,10,1000) #Creates an array from -10 to 10 with 1000 points in it
plt.plot(t,f(t)) #Generates a plot of these two vectors.
plt.title('Function vs. Time')
plt.xlabel('Time(s)')
plt.ylabel('Function Value')
###Output
_____no_output_____
###Markdown
The following code is going to create a large vector of random numbers using NumPy's random function. Then it's going to plot them. It's taking the random numbers from an exponential distribution and a normal (Gaussian) distribution. These are both continuous random variables which you will learn about later in the course.
###Code
x = np.random.exponential(1,size = 100) #Generates a vector of 100 points from the exponential distribution
y = np.random.normal(size = 100) #Generates a vector of 100 points from the Normal distribution
plt.plot(x,'ro', label='exponential') #Plots x in red circles with the label exponential
plt.plot(y,'go', label = 'normal')
plt.title('Random values.')
plt.xlabel('index')
plt.ylabel('value')
plt.legend()
###Output
_____no_output_____
###Markdown
This code creates two matrices, multiplies one times the transpose of the other and then finds the eigenvalues:
###Code
A = np.array([(3,7,9),(4,5,1),(12,6,3)]) #Creates Matrix A
B = np.array([(1,0,3),(2,4,0),(8,3,1)]) #Creates Matrix B
A_transpose = A.T #Takes the transpose of A
C = A_transpose.dot(B) #Takes the matrix multiplication of A_transpose and B. Note using * performs a different operation on 2-d arrays
# * is the usual matrix multiplication when applied to np.matrix objects
print (np.linalg.eigvals(C)) #Uses the eigvals method under linalg under NumPy to print the eigenvalues
###Output
[149.57404656 8.88119895 16.54475449]
###Markdown
These are just the basics to be able to program in Python. I have highlighted some of what I feel are the most important functions or modules to know. For a more complete tutorial, take a look at https://docs.python.org/2.7/tutorial/index.html Creating Probability Distribution Objects for Discrete Distributions The scipy stats package contains a number of functions for using and analyzing distributions. Two of its classes are rv_discrete and rv_continous, for discrete type and for continuous type distributions, respectively. A discrete probability distribution is specified by a set of possible values, $c_1,c_2, \ldots $ and associated probabilities for the values, $p_1, p_2, \ldots $ which sum to one. The probability mass function $p$ is defined by $p(c_i)=p_i$ for all $i,$ and $p(c)=0$ for values $c$ not in the list of possible values. A random variable $X$ has such a discrete distribution if $P\{X = u\} = p(u)$ for all $u.$There are several important families of discrete probability distributions that frequently arise in applications.A very basic example is the Bernoulli distribution with parameter $p,$ where $0\leq p \leq 1.$The distribution assigns probability $p$ to value 1, and probability $1-p$ to value 0. If a random variable $X$ has theBernoulli distribution with parameter $p$, we call $X$ a Bernoulli random variable with parameter $p,$ and we write$X \sim Bernoulli(p).$ For example, if $X \sim Bernoulli(\frac{1}{4}),$ then $P\{X = 1\}=\frac{1}{4}$ and$P\{X = 0\}=1-\frac{1}{4} = \frac{3}{4}$. There is zero probability that $X$ is any value other than $1$ or $0$. The class rv_discrete within the scipy stats package is for working with general discrete type random variables, with many instances of the class corresponding to particular well known probability distribuions. It gives a convenient way to compute the mean, variance, pmf, and other attributes for a given distribution, and for generating random variates, using random number generators, with the given distribution.For example, one instance of the rv_discrete class is the object for the bernoulli distribution. By specifying (aka freezing) a value for the parameter $p$ we create a more specialized instance of a rv_discrete class. The cumulative distribution function (CDF) of a random variable $X$ is the function $F_X$ defined by $F_X(c)=P\{X\leq c\}$ for any real value of $c.$ The CDF for the$Bernoulli(\frac{1}{4})$ distribution has a jump of size 3/4 at zero and a jump of size 1/4 at one.
###Code
p = 1./4 #Sets the probability, uses decimal to create double (not integer)
bernoulli25 = st.bernoulli(p) #Generates object for Bernoulli(0.25) distribution
x = np.linspace(-2,2,1001) #Generates a vector on [-2,2] with 1001 points in it
print ('Mean:', bernoulli25.mean()) #Prints the mean (aka expected value) for the distribution
print ('Var:', bernoulli25.var()) #Prints the variance of X
plt.plot(x,bernoulli25.cdf(x)) #Creates a graph of the cumulative distribution fucntion (CDF) of X
plt.title('CDF of Bernoulli(0.25) distribution')
plt.axis([-2, 2, 0, 1.05]) # Sets the displayed ranges of x-axis and y-axis to be [-2, 2] and [0, 1.05]
###Output
Mean: 0.25
Var: 0.1875
###Markdown
Above, we were able to recreate our Bernoulli distribution through scipy.stats. **Problem 1:** Using the scipy.stats package do the following: Print the mean and standard deviation of a Bernoulli variable where $p=\frac{14}{17}$ Create a graph of the probability mass function (pmf). (The function is zero except at zero and one. Try adapting the code in the previous cell to plot the pmf. What happens if you change np.linspace(-2,2,1001) to np.linspace(-2,2,1000)?)
###Code
########Student Answer##############
###Output
_____no_output_____ |
Keras Tutorial Happy House v2.ipynb | ###Markdown
Keras tutorial - the Happy HouseWelcome to the first assignment of week 2. In this assignment, you will:1. Learn to use Keras, a high-level neural networks API (programming framework), written in Python and capable of running on top of several lower-level frameworks including TensorFlow and CNTK. 2. See how you can in a couple of hours build a deep learning algorithm.Why are we using Keras? Keras was developed to enable deep learning engineers to build and experiment with different models very quickly. Just as TensorFlow is a higher-level framework than Python, Keras is an even higher-level framework and provides additional abstractions. Being able to go from idea to result with the least possible delay is key to finding good models. However, Keras is more restrictive than the lower-level frameworks, so there are some very complex models that you can implement in TensorFlow but not (without more difficulty) in Keras. That being said, Keras will work fine for many common models. In this exercise, you'll work on the "Happy House" problem, which we'll explain below. Let's load the required packages and solve the problem of the Happy House!
###Code
import numpy as np
import keras
from keras import backend as k
from keras import layers
from keras.models import Sequential
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.models import Model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from kt_utils import *
import keras.backend as K
K.set_image_data_format('channels_last')
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
%matplotlib inline
###Output
Using TensorFlow backend.
###Markdown
**Note**: As you can see, we've imported a lot of functions from Keras. You can use them easily just by calling them directly in the notebook. Ex: `X = Input(...)` or `X = ZeroPadding2D(...)`. 1 - The Happy House For your next vacation, you decided to spend a week with five of your friends from school. It is a very convenient house with many things to do nearby. But the most important benefit is that everybody has commited to be happy when they are in the house. So anyone wanting to enter the house must prove their current state of happiness. **Figure 1** : **the Happy House**As a deep learning expert, to make sure the "Happy" rule is strictly applied, you are going to build an algorithm which that uses pictures from the front door camera to check if the person is happy or not. The door should open only if the person is happy. You have gathered pictures of your friends and yourself, taken by the front-door camera. The dataset is labbeled. Run the following code to normalize the dataset and learn about its shapes.
###Code
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
# Normalize image vectors
X_train = X_train_orig/255.
X_test = X_test_orig/255.
# Reshape
Y_train = Y_train_orig.T
Y_test = Y_test_orig.T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
###Output
number of training examples = 600
number of test examples = 150
X_train shape: (600, 64, 64, 3)
Y_train shape: (600, 1)
X_test shape: (150, 64, 64, 3)
Y_test shape: (150, 1)
###Markdown
**Details of the "Happy" dataset**:- Images are of shape (64,64,3)- Training: 600 pictures- Test: 150 picturesIt is now time to solve the "Happy" Challenge. 2 - Building a model in KerasKeras is very good for rapid prototyping. In just a short time you will be able to build a model that achieves outstanding results.Here is an example of a model in Keras:```pythondef model(input_shape): Define the input placeholder as a tensor with shape input_shape. Think of this as your input image! X_input = Input(input_shape) Zero-Padding: pads the border of X_input with zeroes X = ZeroPadding2D((3, 3))(X_input) CONV -> BN -> RELU Block applied to X X = Conv2D(32, (7, 7), strides = (1, 1), name = 'conv0')(X) X = BatchNormalization(axis = 3, name = 'bn0')(X) X = Activation('relu')(X) MAXPOOL X = MaxPooling2D((2, 2), name='max_pool')(X) FLATTEN X (means convert it to a vector) + FULLYCONNECTED X = Flatten()(X) X = Dense(1, activation='sigmoid', name='fc')(X) Create model. This creates your Keras model instance, you'll use this instance to train/test the model. model = Model(inputs = X_input, outputs = X, name='HappyModel') return model```Note that Keras uses a different convention with variable names than we've previously used with numpy and TensorFlow. In particular, rather than creating and assigning a new variable on each step of forward propagation such as `X`, `Z1`, `A1`, `Z2`, `A2`, etc. for the computations for the different layers, in Keras code each line above just reassigns `X` to a new value using `X = ...`. In other words, during each step of forward propagation, we are just writing the latest value in the commputation into the same variable `X`. The only exception was `X_input`, which we kept separate and did not overwrite, since we needed it at the end to create the Keras model instance (`model = Model(inputs = X_input, ...)` above). **Exercise**: Implement a `HappyModel()`. This assignment is more open-ended than most. We suggest that you start by implementing a model using the architecture we suggest, and run through the rest of this assignment using that as your initial model. But after that, come back and take initiative to try out other model architectures. For example, you might take inspiration from the model above, but then vary the network architecture and hyperparameters however you wish. You can also use other functions such as `AveragePooling2D()`, `GlobalMaxPooling2D()`, `Dropout()`. **Note**: You have to be careful with your data's shapes. Use what you've learned in the videos to make sure your convolutional, pooling and fully-connected layers are adapted to the volumes you're applying it to.
###Code
# GRADED FUNCTION: HappyModel
def HappyModel(input_shape):
"""
Implementation of the HappyModel.
Arguments:
input_shape -- shape of the images of the dataset
Returns:
model -- a Model() instance in Keras
"""
### START CODE HERE ###
# Feel free to use the suggested outline in the text above to get started, and run through the whole
# exercise (including the later portions of this notebook) once. The come back also try out other
# network architectures as well.
model = Sequential()
model.add(Conv2D(32, (7,7),padding= 'same', input_shape=input_shape,strides = (1,1), init='he_normal'))
model.add(BatchNormalization(axis = 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(2,2))
model.add(Conv2D(32, (7,7),strides = (1,1), init='he_normal'))
model.add(BatchNormalization(axis = 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(2,2))
# Flatten the 3D output to 1D tensor for a fully connected layer to accept the input
model.add(Flatten())
model.add(Dense(1, activation = 'sigmoid', init='he_normal')) #Last layer with one output per class
### END CODE HERE ###
model.summary()
return model
###Output
_____no_output_____
###Markdown
You have now built a function to describe your model. To train and test this model, there are four steps in Keras:1. Create the model by calling the function above2. Compile the model by calling `model.compile(optimizer = "...", loss = "...", metrics = ["accuracy"])`3. Train the model on train data by calling `model.fit(x = ..., y = ..., epochs = ..., batch_size = ...)`4. Test the model on test data by calling `model.evaluate(x = ..., y = ...)`If you want to know more about `model.compile()`, `model.fit()`, `model.evaluate()` and their arguments, refer to the official [Keras documentation](https://keras.io/models/model/).**Exercise**: Implement step 1, i.e. create the model.
###Code
### START CODE HERE ### (1 line)
happyModel = HappyModel((64,64,3))
### END CODE HERE ###
###Output
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d_35 (Conv2D) (None, 64, 64, 32) 4736
_________________________________________________________________
batch_normalization_34 (Batc (None, 64, 64, 32) 128
_________________________________________________________________
activation_33 (Activation) (None, 64, 64, 32) 0
_________________________________________________________________
max_pooling2d_33 (MaxPooling (None, 32, 32, 32) 0
_________________________________________________________________
conv2d_36 (Conv2D) (None, 26, 26, 32) 50208
_________________________________________________________________
batch_normalization_35 (Batc (None, 26, 26, 32) 128
_________________________________________________________________
activation_34 (Activation) (None, 26, 26, 32) 0
_________________________________________________________________
max_pooling2d_34 (MaxPooling (None, 13, 13, 32) 0
_________________________________________________________________
flatten_14 (Flatten) (None, 5408) 0
_________________________________________________________________
dense_23 (Dense) (None, 1) 5409
=================================================================
Total params: 60,609
Trainable params: 60,481
Non-trainable params: 128
_________________________________________________________________
###Markdown
**Exercise**: Implement step 2, i.e. compile the model to configure the learning process. Choose the 3 arguments of `compile()` wisely. Hint: the Happy Challenge is a binary classification problem.
###Code
### START CODE HERE ### (1 line)
happyModel.compile(loss='binary_crossentropy', optimizer='Adam',metrics = ['accuracy'])
### END CODE HERE ###
###Output
_____no_output_____
###Markdown
**Exercise**: Implement step 3, i.e. train the model. Choose the number of epochs and the batch size.
###Code
### START CODE HERE ### (1 line)
happyModel.fit(X_train, Y_train,
epochs = 10,
batch_size = 1,
verbose = 1)
### END CODE HERE ###
#lets run for another 30 epochs
happyModel.fit(X_train, Y_train,
epochs = 30,
batch_size = 100,
verbose = 1)
### END CODE HERE ###
###Output
Epoch 1/30
600/600 [==============================] - 25s - loss: 0.0305 - acc: 0.9850
Epoch 2/30
600/600 [==============================] - 24s - loss: 0.0230 - acc: 0.9917
Epoch 3/30
600/600 [==============================] - 25s - loss: 0.0180 - acc: 0.9967
Epoch 4/30
600/600 [==============================] - 24s - loss: 0.0157 - acc: 0.9983
Epoch 5/30
600/600 [==============================] - 22s - loss: 0.0149 - acc: 0.9983
Epoch 6/30
600/600 [==============================] - 22s - loss: 0.0144 - acc: 0.9983
Epoch 7/30
600/600 [==============================] - 23s - loss: 0.0141 - acc: 0.9983
Epoch 8/30
600/600 [==============================] - 24s - loss: 0.0137 - acc: 0.9983
Epoch 9/30
600/600 [==============================] - 22s - loss: 0.0136 - acc: 0.9983
Epoch 10/30
600/600 [==============================] - 22s - loss: 0.0133 - acc: 0.9983
Epoch 11/30
600/600 [==============================] - 22s - loss: 0.0130 - acc: 0.9983
Epoch 12/30
600/600 [==============================] - 22s - loss: 0.0128 - acc: 0.9983
Epoch 13/30
600/600 [==============================] - 23s - loss: 0.0124 - acc: 0.9983
Epoch 14/30
600/600 [==============================] - 21s - loss: 0.0123 - acc: 0.9983
Epoch 15/30
600/600 [==============================] - 21s - loss: 0.0124 - acc: 0.9983
Epoch 16/30
600/600 [==============================] - 22s - loss: 0.0119 - acc: 0.9983
Epoch 17/30
600/600 [==============================] - 23s - loss: 0.0116 - acc: 0.9983
Epoch 18/30
600/600 [==============================] - 24s - loss: 0.0117 - acc: 0.9983
Epoch 19/30
600/600 [==============================] - 25s - loss: 0.0115 - acc: 0.9983
Epoch 20/30
600/600 [==============================] - 28s - loss: 0.0114 - acc: 0.9983
Epoch 21/30
600/600 [==============================] - 28s - loss: 0.0113 - acc: 0.9983
Epoch 22/30
600/600 [==============================] - 29s - loss: 0.0112 - acc: 0.9983
Epoch 23/30
600/600 [==============================] - 29s - loss: 0.0111 - acc: 0.9983
Epoch 24/30
600/600 [==============================] - 25s - loss: 0.0109 - acc: 0.9983
Epoch 25/30
600/600 [==============================] - 24s - loss: 0.0108 - acc: 0.9983
Epoch 26/30
600/600 [==============================] - 23s - loss: 0.0109 - acc: 0.9983
Epoch 27/30
600/600 [==============================] - 23s - loss: 0.0106 - acc: 0.9983
Epoch 28/30
600/600 [==============================] - 24s - loss: 0.0104 - acc: 0.9983
Epoch 29/30
600/600 [==============================] - 23s - loss: 0.0104 - acc: 0.9983
Epoch 30/30
600/600 [==============================] - 24s - loss: 0.0104 - acc: 0.9983
###Markdown
Note that if you run `fit()` again, the `model` will continue to train with the parameters it has already learnt instead of reinitializing them.**Exercise**: Implement step 4, i.e. test/evaluate the model.
###Code
### START CODE HERE ### (1 line)
preds = happyModel.predict(X_test)
### END CODE HERE ###
print()
print ("Loss = " + str(preds[0]))
print ("Test Accuracy = " + str(preds[1]))
###Output
Loss = [ 0.99996006]
Test Accuracy = [ 0.9992587]
###Markdown
If your `happyModel()` function worked, you should have observed much better than random-guessing (50%) accuracy on the train and test sets.To give you a point of comparison, our model gets around **95% test accuracy in 40 epochs** (and 99% train accuracy) with a mini batch size of 16 and "adam" optimizer. But our model gets decent accuracy after just 2-5 epochs, so if you're comparing different models you can also train a variety of models on just a few epochs and see how they compare. If you have not yet achieved a very good accuracy (let's say more than 80%), here're some things you can play around with to try to achieve it:- Try using blocks of CONV->BATCHNORM->RELU such as:```pythonX = Conv2D(32, (3, 3), strides = (1, 1), name = 'conv0')(X)X = BatchNormalization(axis = 3, name = 'bn0')(X)X = Activation('relu')(X)```until your height and width dimensions are quite low and your number of channels quite large (≈32 for example). You are encoding useful information in a volume with a lot of channels. You can then flatten the volume and use a fully-connected layer.- You can use MAXPOOL after such blocks. It will help you lower the dimension in height and width.- Change your optimizer. We find Adam works well. - If the model is struggling to run and you get memory issues, lower your batch_size (12 is usually a good compromise)- Run on more epochs, until you see the train accuracy plateauing. Even if you have achieved a good accuracy, please feel free to keep playing with your model to try to get even better results. **Note**: If you perform hyperparameter tuning on your model, the test set actually becomes a dev set, and your model might end up overfitting to the test (dev) set. But just for the purpose of this assignment, we won't worry about that here. 3 - ConclusionCongratulations, you have solved the Happy House challenge! Now, you just need to link this model to the front-door camera of your house. We unfortunately won't go into the details of how to do that here. **What we would like you to remember from this assignment:**- Keras is a tool we recommend for rapid prototyping. It allows you to quickly try out different model architectures. Are there any applications of deep learning to your daily life that you'd like to implement using Keras? - Remember how to code a model in Keras and the four steps leading to the evaluation of your model on the test set. Create->Compile->Fit/Train->Evaluate/Test. 4 - Test with your own image (Optional)Congratulations on finishing this assignment. You can now take a picture of your face and see if you could enter the Happy House. To do that: 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. 2. Add your image to this Jupyter Notebook's directory, in the "images" folder 3. Write your image's name in the following code 4. Run the code and check if the algorithm is right (0 is unhappy, 1 is happy)! The training/test sets were quite similar; for example, all the pictures were taken against the same background (since a front door camera is always mounted in the same position). This makes the problem easier, but a model trained on this data may or may not work on your own data. But feel free to give it a try!
###Code
### START CODE HERE ###
img_path = 'images/my_image.jpg'
### END CODE HERE ###
img = image.load_img(img_path, target_size=(64, 64))
imshow(img)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
print(happyModel.predict(x))
###Output
[[ 0.]]
###Markdown
5 - Other useful functions in Keras (Optional)Two other basic features of Keras that you'll find useful are:- `model.summary()`: prints the details of your layers in a table with the sizes of its inputs/outputs- `plot_model()`: plots your graph in a nice layout. You can even save it as ".png" using SVG() if you'd like to share it on social media ;). It is saved in "File" then "Open..." in the upper bar of the notebook.Run the following code.
###Code
happyModel.summary()
plot_model(happyModel, to_file='HappyModel.png')
SVG(model_to_dot(happyModel).create(prog='dot', format='svg'))
###Output
_____no_output_____ |
a6_w3_ex1.ipynb | ###Markdown
This notebook is designed to run in a IBM Watson Studio default runtime (NOT the Watson Studio Apache Spark Runtime as the default runtime with 1 vCPU is free of charge). Therefore, we install Apache Spark in local mode for test purposes only. Please don't use it in production.In case you are facing issues, please read the following two documents first:Then, please feel free to ask:[https://coursera.org/learn/machine-learning-big-data-apache-spark/discussions/all](https://coursera.org/learn/machine-learning-big-data-apache-spark/discussions/all?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-ML0201EN-SkillsNetwork-20647446&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ)Please make sure to follow the guidelines before asking a question:If running outside Watson Studio, this should work as well. In case you are running in an Apache Spark context outside Watson Studio, please remove the Apache Spark setup in the first notebook cells.
###Code
from IPython.display import Markdown, display
def printmd(string):
display(Markdown('# <span style="color:red">'+string+'</span>'))
if ('sc' in locals() or 'sc' in globals()):
printmd('<<<<<!!!!! It seems that you are running in a IBM Watson Studio Apache Spark Notebook. Please run it in an IBM Watson Studio Default Runtime (without Apache Spark) !!!!!>>>>>')
!pip install pyspark==2.4.5
try:
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
except ImportError as e:
printmd('<<<<<!!!!! Please restart your kernel after installing Apache Spark !!!!!>>>>>')
sc = SparkContext.getOrCreate(SparkConf().setMaster("local[*]"))
spark = SparkSession \
.builder \
.getOrCreate()
###Output
_____no_output_____
###Markdown
Welcome to exercise one of week three of “Apache Spark for Scalable Machine Learning on BigData”. In this exercise we’ll use the HMP dataset again and perform some basic operations using Apache SparkML Pipeline components.Let’s create our DataFrame again:
###Code
# delete files from previous runs
!rm -f hmp.parquet*
# download the file containing the data in PARQUET format
!wget https://github.com/IBM/coursera/raw/master/hmp.parquet
# create a dataframe out of it
df = spark.read.parquet('hmp.parquet')
# register a corresponding query table
df.createOrReplaceTempView('df')
###Output
--2020-12-09 04:24:15-- https://github.com/IBM/coursera/raw/master/hmp.parquet
Resolving github.com (github.com)... 140.82.112.4
Connecting to github.com (github.com)|140.82.112.4|:443... connected.
HTTP request sent, awaiting response... 301 Moved Permanently
Location: https://github.com/IBM/skillsnetwork/raw/master/hmp.parquet [following]
--2020-12-09 04:24:15-- https://github.com/IBM/skillsnetwork/raw/master/hmp.parquet
Reusing existing connection to github.com:443.
HTTP request sent, awaiting response... 302 Found
Location: https://raw.githubusercontent.com/IBM/skillsnetwork/master/hmp.parquet [following]
--2020-12-09 04:24:15-- https://raw.githubusercontent.com/IBM/skillsnetwork/master/hmp.parquet
Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 151.101.128.133, 151.101.192.133, 151.101.0.133, ...
Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|151.101.128.133|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 932997 (911K) [application/octet-stream]
Saving to: ‘hmp.parquet’
hmp.parquet 100%[===================>] 911.13K 4.55MB/s in 0.2s
2020-12-09 04:24:16 (4.55 MB/s) - ‘hmp.parquet’ saved [932997/932997]
###Markdown
Given below is the feature engineering pipeline from the lecture. Please add a feature column called “features_minmax” using the MinMaxScaler.More information can be found here:[http://spark.apache.org/docs/latest/ml-features.htmlminmaxscaler](http://spark.apache.org/docs/latest/ml-features.htmlminmaxscaler?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-ML0201EN-SkillsNetwork-20647446&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-ML0201EN-SkillsNetwork-20647446&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ)
###Code
from pyspark.ml.feature import OneHotEncoder, StringIndexer, VectorAssembler, Normalizer, MinMaxScaler
from pyspark.ml.linalg import Vectors
from pyspark.ml import Pipeline
indexer = StringIndexer(inputCol="class", outputCol="classIndex")
encoder = OneHotEncoder(inputCol="classIndex", outputCol="categoryVec")
vectorAssembler = VectorAssembler(inputCols=["x","y","z"],
outputCol="features")
normalizer = Normalizer(inputCol="features", outputCol="features_norm", p=1.0)
minmaxscaler = $$
pipeline = Pipeline(stages=[indexer, encoder, vectorAssembler, normalizer,minmaxscaler])
model = pipeline.fit(df)
prediction = model.transform(df)
prediction.show()
###Output
_____no_output_____
###Markdown
The difference between a transformer and an estimator is state. A transformer is stateless whereas an estimator keeps state. Therefore “VectorAsselmbler” is a transformer since it only need to read row by row. Normalizer, on the other hand need to compute statistics on the dataset before, therefore it is an estimator. An estimator has an additional “fit” function. “OneHotEncoder” has been deprecated in Spark 2.3, therefore please change the code below to use the OneHotEstimator instead of the “OneHotEncoder”.More information can be found here:[http://spark.apache.org/docs/latest/ml-features.htmlonehotencoderestimator](http://spark.apache.org/docs/latest/ml-features.htmlonehotencoderestimator?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-ML0201EN-SkillsNetwork-20647446&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-ML0201EN-SkillsNetwork-20647446&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ)
###Code
from pyspark.ml.feature import OneHotEncoder, StringIndexer, VectorAssembler, Normalizer, MinMaxScaler, OneHotEncoderEstimator
from pyspark.ml.linalg import Vectors
from pyspark.ml import Pipeline
indexer = StringIndexer(inputCol="class", outputCol="classIndex")
encoder = OneHotEncoder(inputCol="classIndex", outputCol="categoryVec")
vectorAssembler = VectorAssembler(inputCols=["x","y","z"],
outputCol="features")
normalizer = Normalizer(inputCol="features", outputCol="features_norm", p=1.0)
pipeline = Pipeline(stages=[indexer, encoder, vectorAssembler, normalizer])
model = pipeline.fit(df)
prediction = model.transform(df)
prediction.show()
###Output
_____no_output_____ |
workbook/hasanPackage/Assignment_8_correction.ipynb | ###Markdown
Assignment 81. Create a 2D data of shape 10x5x3 with values equals zero2. Create another array with values 255 and shape 10x5x33. Join the 2 array horizontally4. Create an array of random unsigned integer 8 bits with shape (50x50x3)5. Convert top part to red (255,0,0) and bottom part to green (0,255,0)6. Seed value is 117. Display the data
###Code
import numpy as np
import matplotlib.pyplot as plt
# Create a 2D data of shape 10x5x3 with values equals zero
a0 = np.zeros((10,5,3), dtype='uint8')
plt.imshow(a0)
plt.title("Array of 0")
plt.show()
# Create another array with values 255 and shape 10x5x3
a255 = np.full((10,5,3),255)
plt.imshow(a255)
plt.title("Array of 255")
plt.show()
# Join the 2 array horizontally
h_joined = np.hstack((a0, a255))
plt.imshow(h_joined)
plt.title("Horizontally Joined Array")
plt.show()
# Create an array of random unsigned integer 8 bits with shape (50x50x3)
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(11)
b = np.random.randint(0, 256, (50, 50, 3),dtype='uint8')
plt.imshow(b)
plt.title('Original Array')
plt.show()
# 5. Convert top part to red (255,0,0) and bottom part to green (0,255,0)
# original array height 50, so each part is 25, since the array start from zero
# the height for bottom and top is 26
# get the top and bottom part
top = b[:25,:,:]
bottom = b[25:,:,:]
plt.title('Top')
plt.imshow(top)
plt.show()
plt.title('Bottom')
plt.imshow(bottom)
plt.show()
# we are replacing top and bottom with red and green respectivelly.
# first we create array of red and green with same size as the two part.
# both are a 25x50x3 array
red = np.full((25,50,3),[255,0,0])
green = np.full((25,50,3),[0,255,0])
# now we assign the red and green array to replace original calue of top and bottom
top = red
bottom = green
# re-join top and bottom for use in next quwstion
b_red_green = np.vstack((top,bottom))
plt.imshow(b_red_green)
plt.title('Top and bottom replaced with red and green')
plt.show()
###Output
_____no_output_____ |
2.9 R Data Visualisation/de-DE/2.9.47 R - ggplot2 Scatterplots.ipynb | ###Markdown
Tag 2. Kapitel 9. R Data & Visualisation Lektion 47. Scatterplots mit ggplot2Scatterplots erlauben es uns Punkte entlang zweier Achsen zu visualisieren und so Korrelationen im Datansatz zu untersuchen. Wie dies mit ggplot geht schauen wir uns in dieser Lektion an.Wir nutzen den bereits bekannten mtcars Datansatz:
###Code
library('ggplot2')
df <- mtcars
head(df)
###Output
_____no_output_____
###Markdown
qplot()
###Code
qplot(wt,mpg,data=df)
###Output
_____no_output_____
###Markdown
Ein drittes Feature hinzufügenWir können durch einen Farbverlaub eine dritte Betrachtung bzw. Dimension zu jedem Punkt hinzufügen. Alternativ können wir die Größe der Punkte anhand dieses dritten Features anpassen. Zum Beispiel:
###Code
qplot(wt,mpg,data=df,color=cyl)
qplot(wt,mpg,data=df,size=cyl)
###Output
_____no_output_____
###Markdown
Oder beides
###Code
qplot(wt,mpg,data=df,size=gear,color=cyl)
# Zeige 4 Dimensionen
qplot(wt,mpg,data=df,size=cyl,color=hp,alpha=0.6)
###Output
_____no_output_____
###Markdown
ggplot()Schauen wir uns nun an, wie wir durch ggplot() mehr Kontrolle erhalten:
###Code
pl <- ggplot(data=df,aes(x = wt,y=mpg))
pl + geom_point()
###Output
_____no_output_____
###Markdown
Ein drittes Feature hinzufügen
###Code
pl <- ggplot(data=df,aes(x = wt,y=mpg))
pl + geom_point(aes(color=cyl))
pl <- ggplot(data=df,aes(x = wt,y=mpg))
pl + geom_point(aes(color=factor(cyl)))
pl <- ggplot(data=df,aes(x = wt,y=mpg))
pl + geom_point(aes(size=factor(cyl)))
# Mit Formen
pl <- ggplot(data=df,aes(x = wt,y=mpg))
pl + geom_point(aes(shape=factor(cyl)))
# Bessere Version
# mit Formen
pl <- ggplot(data=df,aes(x = wt,y=mpg))
pl + geom_point(aes(shape=factor(gear),color=factor(cyl)),size=4,alpha=0.6)
###Output
_____no_output_____
###Markdown
Farbverläufe
###Code
pl + geom_point(aes(colour = hp),size=4) + scale_colour_gradient(high='red',low = "blue")
###Output
_____no_output_____ |
one_million/One-Million All-Word Data-hierarchical Sampling-Fine.ipynb | ###Markdown
Training and Validation data
###Code
train_val_data('lex', 3, index1, split_label1, data_label1, sense_count1, [], lex_cond=False, pos_cond=True)
train_val_data('sense', 4, index2, split_label2, data_label2, sense_count2, [], lex_cond=True, pos_cond=True)
train_val_data('full_sense', 5, index3, split_label3, data_label3, sense_count3, [], lex_cond=True, pos_cond=True)
###Output
/users/btech/aviraj/envs/lib/python3.5/site-packages/sklearn/model_selection/_split.py:2026: FutureWarning: From version 0.21, test_size will always complement train_size unless both are specified.
FutureWarning)
###Markdown
Test data
###Code
test_data('lex', 3, test_index1, data_test_label1, sense_count1, lex_cond=False, pos_cond=True)
test_data('sense', 4, test_index2, data_test_label2, sense_count2, lex_cond=True, pos_cond=True)
test_data('full_sense', 5, test_index3, data_test_label3, sense_count3, lex_cond=True, pos_cond=True)
sampled_sense_count1 = [('1:19', 10000),
('1:17', 10000),
('2:34', 10000),
('2:33', 10000),
('1:27', 10000),
('2:37', 8000),
('1:24', 8000),
('1:08', 8000),
('1:12', 7000),
('1:22', 5000),
('2:29', 5000),
('1:05', 3000),
('1:16', 3000),
('1:25', 3000),
('1:20', 3000),
('1:13', 2000)]
sampled_sense_count2= []
for s, c in sense_count2[120:]:
sampled_sense_count2.append((s, 5000))
for s, c in sense_count2[75:120]:
sampled_sense_count2.append((s, 8000))
for s, c in sense_count2[25:75]:
sampled_sense_count2.append((s, 12000))
sampled_sense_count3= []
for s, c in sense_count3[130:]:
sampled_sense_count3.append((s, 5000))
for s, c in sense_count3[70:130]:
sampled_sense_count3.append((s, 8000))
for s, c in sense_count3[25:70]:
sampled_sense_count3.append((s, 12000))
train_val_data('lex_sampled', 3, index1, split_label1, data_label1, sense_count1, sampled_sense_count1, lex_cond=False, pos_cond=True, sampling=True)
train_val_data('sense_sampled', 4, index2, split_label2, data_label2, sense_count2, sampled_sense_count2, lex_cond=True, pos_cond=True, sampling=True)
train_val_data('full_sense_sampled', 5, index3, split_label3, data_label3, sense_count3, sampled_sense_count3, lex_cond=True, pos_cond=True, sampling=True)
###Output
/home/sshanukr/env/lib/python3.5/site-packages/sklearn/model_selection/_split.py:2026: FutureWarning: From version 0.21, test_size will always complement train_size unless both are specified.
FutureWarning)
|
Documentation/SalaryGrowth_GenderPerYear.ipynb | ###Markdown
Glass Ceiling - A Perspective on Earning Salary Over the last 15 Years Objective: To show the differences in earning salary population for mexican woman during the last 15 years. After our data exploration, we just came down to three data sources which are:* **Population by gender and earned salary.*** Busy population by formality under economical activity Cleaning Sources Salary per Gender over the last 15 years The further analysis will try to make a statment about how women economical growth has been slower than men in Mexico.
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
We import our sources:
###Code
asalariados=pd.read_csv('Porcentaje_de_Asalariados_que_Ganan_hasta_tres_Salarios_Minimos.csv')
###Output
_____no_output_____
###Markdown
So first lets replace ND to NaN
###Code
asalariados=asalariados.replace('ND',np.NaN)
###Output
_____no_output_____
###Markdown
And drop empty values; p.e.: with 6 or more Null values per row:
###Code
asalariados=asalariados.dropna(thresh=6)
asalariados.head()
asalariados.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 126 entries, 0 to 127
Data columns (total 7 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Periodo 126 non-null object
1 Trimestre 126 non-null float64
2 Entidad_Federativa 126 non-null object
3 Sexo 126 non-null object
4 Asalariados que ganan hasta 3 salarios mínimos 126 non-null object
5 Asalariados que reportan ingresos 126 non-null object
6 Porcentaje de asalariados que ganan hasta tres salarios mínimos 126 non-null object
dtypes: float64(1), object(6)
memory usage: 7.9+ KB
###Markdown
Let's convert accordingly the `dtypes`of the `asalariados` dataframe.
###Code
asalariado=asalariados.convert_dtypes()
asalariado.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 126 entries, 0 to 127
Data columns (total 7 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Periodo 126 non-null string
1 Trimestre 126 non-null Int64
2 Entidad_Federativa 126 non-null string
3 Sexo 126 non-null string
4 Asalariados que ganan hasta 3 salarios mínimos 126 non-null string
5 Asalariados que reportan ingresos 126 non-null string
6 Porcentaje de asalariados que ganan hasta tres salarios mínimos 126 non-null string
dtypes: Int64(1), string(6)
memory usage: 8.0 KB
###Markdown
And `to_numeric()`:
###Code
asalariados['Asalariados que ganan hasta 3 salarios mínimos']=pd.to_numeric(asalariados['Asalariados que ganan hasta 3 salarios mínimos'])
asalariados['Asalariados que reportan ingresos']=pd.to_numeric(asalariados['Asalariados que reportan ingresos'])
asalariados['Porcentaje de asalariados que ganan hasta tres salarios mínimos']=pd.to_numeric(asalariados['Porcentaje de asalariados que ganan hasta tres salarios mínimos'])
asalariados.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 126 entries, 0 to 127
Data columns (total 7 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Periodo 126 non-null object
1 Trimestre 126 non-null float64
2 Entidad_Federativa 126 non-null object
3 Sexo 126 non-null object
4 Asalariados que ganan hasta 3 salarios mínimos 126 non-null int64
5 Asalariados que reportan ingresos 126 non-null int64
6 Porcentaje de asalariados que ganan hasta tres salarios mínimos 126 non-null float64
dtypes: float64(2), int64(2), object(3)
memory usage: 7.9+ KB
###Markdown
Now that we have the `asalariados` dataframe cleaned, let's start by analysing the change by gender on earning population over the last years.
###Code
asalariados['Periodo'].unique()
###Output
_____no_output_____
###Markdown
Population amount per gender each 5 years for the last 15 years
###Code
poblacion=pd.read_csv('Poblacion_02.csv')
#Fuentes:
#INEGI. II Conteo de Población y Vivienda 2005.
#INEGI. Censo de Población y Vivienda 2010.
#INEGI. Censo de Población y Vivienda 2020.
#Información para población mayor a 15 años para representar mayor ocupación laboral
poblacion
poblacion.set_index('Periodo')
# Checamos diferencias visuales entre la población:
ax0 = poblacion.groupby(['Periodo', 'Género']).sum()['Total'].unstack()
# genera el gráfico:
ax0.plot(kind="bar",figsize =(20, 8),colormap='PiYG')
###Output
_____no_output_____
###Markdown
If we check differences with percentage on the mean population:
###Code
popM = (
poblacion.loc[poblacion['Género']=='Mujeres']
.groupby('Periodo')
.agg({'Total':'sum'})
.reset_index()
)
popM.set_index('Periodo')
popM=popM.rename(columns = {'Total':'Mujeres'})
popH = (
poblacion.loc[poblacion['Género']=='Hombres']
.groupby('Periodo')
.agg({'Total':'sum'})
.reset_index()
)
popH.set_index('Periodo')
popH=popH.rename(columns = {'Total':'Hombres'})
nPop=pd.concat(
[
popM
,popH.iloc[:,1]
]
,axis=1,ignore_index=False)
nPop=nPop.set_index('Periodo')
nPop['Total']=nPop['Mujeres']+nPop['Hombres']
nPop['%Mujeres']=np.round(nPop['Mujeres']/nPop['Total']*100,2)
nPop['%Hombres']=np.round(nPop['Hombres']/nPop['Total']*100,2)
nPop
np.round(nPop['%Mujeres'].mean(),2)
###Output
_____no_output_____
###Markdown
People who register income per year We have continuous data from the last 15 years until 2020.
###Code
# recupera el salario de cada año, agrupado por sexo:
ax1 = asalariados.groupby(['Periodo', 'Sexo']).sum()['Asalariados que reportan ingresos'].unstack()
# genera el gráfico:
ax1.plot(kind="bar",figsize =(20, 8),colormap='PiYG')
###Output
_____no_output_____
###Markdown
As per the graph we can se a very constant growth all over the years between gender and a big difference of the amount of women vs men who persive monthly income due to an economical activity. As per following data; this is a big disparity comparing population by gender for the last 15 years; if we compare only gender values per year with the total of population over 15 years old, even though women represent over 51% of population; we expect to see them below this average as per salary perceivers. Let's get the mean value from the differences in earned wage. Let's define a new dataframe for this, we need first to check the amount of people reporting income per year:
###Code
salH = (
asalariados.loc[asalariados['Sexo']=='Hombres']
.groupby('Periodo')
.agg({'Asalariados que reportan ingresos':'sum'})
.reset_index()
)
salH.set_index('Periodo')
salH=salH.rename(columns = {'Asalariados que reportan ingresos':'Hombres_reportan_ingreso'})
salM = (
asalariados.loc[asalariados['Sexo']=='Mujeres']
.groupby('Periodo')
.agg({'Asalariados que reportan ingresos':'sum'})
.reset_index()
)
salM.set_index('Periodo')
salM=salM.rename(columns = {'Asalariados que reportan ingresos':'Mujeres_reportan_ingreso'})
nSalarios=pd.concat(
[
salH
,salM.iloc[:,1]
]
,axis=1,ignore_index=False)
nSalarios = nSalarios.set_index('Periodo')
nSalarios['Poblacion_Por_Año']=nSalarios['Hombres_reportan_ingreso']+nSalarios['Mujeres_reportan_ingreso']
nSalarios['Diferencia_HvsM']=nSalarios['Hombres_reportan_ingreso']-nSalarios['Mujeres_reportan_ingreso']
nSalarios['%Diferencia']=np.round(nSalarios['Diferencia_HvsM']/nSalarios['Poblacion_Por_Año']*100,2)
nSalarios
###Output
_____no_output_____
###Markdown
By comparing the mean of people that register income per 5 years:
###Code
nSalarios['%Diferencia'].mean()
###Output
_____no_output_____
###Markdown
As per expected, we can see that Men report income in **23.25% more** than women in average. Percentage of population who reports earning income per year vs total population.
###Code
popSal=nSalarios.loc[['2005','2010','2020']]
###Output
_____no_output_____
###Markdown
Will select only the years where we have total population data; since the join showed a not merged dataframe, will change the type of the indexes and turn them to field in order to have them onto a single dataframe.
###Code
popSal.index.map(str)
nPop.index.map(str)
popSal.reset_index(inplace=True)
nPop.reset_index(inplace=True)
popSal['Periodo']=popSal['Periodo'].map(int)
join=pd.merge(popSal, nPop, on ='Periodo', how ="inner")
join
join=join.drop(['Poblacion_Por_Año', 'Diferencia_HvsM','%Diferencia','Mujeres','Hombres','%Mujeres','%Hombres'], axis = 1)
join['%MujeresQueReportanIngreso']=np.round(join['Mujeres_reportan_ingreso']/join['Total'],2)
join['%HombresQueReportanIngreso']=np.round(join['Hombres_reportan_ingreso']/join['Total'],2)
join
np.round(join['%HombresQueReportanIngreso'].mean()*100,2)
np.round(join['%MujeresQueReportanIngreso'].mean()*100,2)
###Output
_____no_output_____
###Markdown
As per expected, even though women comform over 51% of mexican population, men still represent the 53% of working population who earns some sort of salary, more than 20% than what women represent, this also not only means that men are still more present on labour force, but that women still have the most heavy work load (considering unpaid work such as taking care of house chores; unfortunately by now we can't refute this since there's no more available data).
###Code
# recupera el salario de cada año, agrupado por sexo:
ax2 = asalariados.groupby(['Periodo', 'Sexo']).sum()['Asalariados que reportan ingresos'].unstack()
# genera el gráfico:
ax2.plot(kind="bar",figsize =(20, 8),colormap='Paired')
###Output
_____no_output_____
###Markdown
Working force who earns maximum 3 minimum wages in total. So we'll check the growth in percentage over the years by gender, the hypothesis is that the grown for men has grown quicker than female.Let's explore about the differences on the marginalized population by gender; those who earns tops 3 minimum wages (around 6 USD per hour by 2022).
###Code
asalH = (
asalariados.loc[asalariados['Sexo']=='Hombres']
.groupby('Periodo')
.agg({'Asalariados que ganan hasta 3 salarios mínimos':'sum'})
.reset_index()
)
asalH.set_index('Periodo')
asalH=asalH.rename(columns = {'Asalariados que ganan hasta 3 salarios mínimos':'Hombres_Que_Ganan_Hasta_3_SalMin'})
asalM = (
asalariados.loc[asalariados['Sexo']=='Mujeres']
.groupby('Periodo')
.agg({'Asalariados que ganan hasta 3 salarios mínimos':'sum'})
.reset_index()
)
asalM.set_index('Periodo')
asalM=asalM.rename(columns = {'Asalariados que ganan hasta 3 salarios mínimos':'Mujeres_Que_Ganan_Hasta_3_SalMin'})
new_asalariados=pd.concat(
[
asalH
,asalM.iloc[:,1]
]
,axis=1,ignore_index=False)
new_asalariados=new_asalariados.set_index('Periodo')
new_asalariados['TotPopPorAño']=new_asalariados['Hombres_Que_Ganan_Hasta_3_SalMin']+new_asalariados['Mujeres_Que_Ganan_Hasta_3_SalMin']
new_asalariados['Diferencia']=new_asalariados['Hombres_Que_Ganan_Hasta_3_SalMin']-new_asalariados['Mujeres_Que_Ganan_Hasta_3_SalMin']
new_asalariados['%Diferencia']=np.round(new_asalariados['Diferencia']/new_asalariados['TotPopPorAño']*100,2)
new_asalariados
np.round(new_asalariados['%Diferencia'].mean(),2)
###Output
_____no_output_____
###Markdown
In average, there's around 18% more men than women earning at most three minimum wage; still with this difference we can't conclude much more since:* It could depend if for those families, male figure is the main source for its family or home.* We depend on further analysis considering the ocupancy on formal and informal activities per gender.
###Code
# recupera el salario de cada año, agrupado por sexo:
ax2 = asalariados.groupby(['Periodo', 'Sexo']).sum()['Asalariados que ganan hasta 3 salarios mínimos'].unstack()
# genera el gráfico:
ax2.plot(kind="bar",figsize =(20, 8),colormap='Paired')
###Output
_____no_output_____
###Markdown
Even though we expected to watch some significant difference in growth, we can see a similar performance between two genders, keeping same difference in earning; having a brief growth in 2011 and a brief closure at the beginning of the pandemic (2020). But still, is notizable the disparity even in marginalized sectors. Percentage of working force by gender who earn as much as 3 minimum wages. Even though we previously concluded that men still represent a higher ocupancy in income, with following graph we can observe how from total population, women represent the higher percentage of earning maximum 3 minimum wages. From this, as we expected, we can state how dispair is earning between genders, and though women represent mayority in population, the presence of them into work-force is still misspaid or replaced by male figures at job.
###Code
# recupera el salario de cada año, agrupado por sexo:
ax2 = asalariados.groupby(['Periodo', 'Sexo']).sum()['Porcentaje de asalariados que ganan hasta tres salarios mínimos'].unstack()
# genera el gráfico:
ax2.plot(kind="bar",figsize =(20, 8),colormap='tab20b')
ax3 = asalariados.groupby(['Periodo','Sexo'])['Porcentaje de asalariados que ganan hasta tres salarios mínimos'].sum().unstack('Sexo').fillna(0)
ax3.plot(kind='bar', stacked=True,figsize=(20,8))
###Output
_____no_output_____ |
Exercises/draft/Exercicios_simple_email_extraction.ipynb | ###Markdown
Extracting emails from text
###Code
url1 = 'http://www.dcc.ufmg.br/dcc/?q=pt-br/professores'
url2 = 'https://emap.fgv.br/pessoas'
pagina = requests.get(url1)
texto = pagina.text
print(texto[0:1000])
res1 = [candidato for candidato in texto.split() if '@' in candidato]
res2 = [candidato for candidato in res1 if 'http' not in candidato]
res3 = [candidato.replace('href="mailto:','') for candidato in res2]
res4 = [candidato.strip('"') for candidato in res3]
res5 = [candidato for candidato in res4 if '.' in candidato[candidato.find('@'):]]
res6 = [candidato[:candidato.find('"')] for candidato in res5]
print(res6)
pagina = requests.get(url2)
texto = pagina.text
print(texto[0:1000])
soup = BeautifulSoup(pagina.text, "lxml")
print(soup.text[160:800])
links = soup.findAll('a')
lista_links = []
for link in links:
lista_links.append(link.get('href'))
print(lista_links)
lista_links = [link for link in lista_links if '/corpo-docente/' in link]
lista_links
requisicao = requests.get('https://emap.fgv.br/' + lista_links[1])
soup = BeautifulSoup(requisicao.text, "lxml")
email_do_professor = soup.select('a[href^=mailto]')
email_do_professor
###Output
_____no_output_____
###Markdown
Using regular expressions
###Code
text = """The E-Book looks amazing and I would like a copy of it, here is my e-mail id - [email protected] |
Hi, I am looking for a job in data science field, please send me the E-book and kindly suggest
how to move forward, thanks - [email protected]"""
re.findall(r"([\w.-]+@[\w.-]+)", text)
###Output
_____no_output_____
###Markdown
Removing Emojis from text
###Code
text= "Hi 😂! Have a nice weekend 💕👭"
preprocessed_text=text.encode('ascii', 'ignore').decode('ascii')
print("Raw tweet:",text) #with emoji
print("Preprocessed tweet:",preprocessed_text) # no emoji
###Output
Raw tweet: Hi 😂! Have a nice weekend 💕👭
Preprocessed tweet: Hi ! Have a nice weekend
|
03_read_data.ipynb | ###Markdown
Reading the data> This notebook covers reading the Reddit data. Variables
###Code
SUBREDDIT = 'askreddit'
LIMIT = 100_000
YEARS = [year for year in range(2006, 2021)]
YEAR = 2010
###Output
_____no_output_____
###Markdown
Imports
###Code
# export
from glob import glob
import pandas as pd
from pathlib import Path
import os
###Output
_____no_output_____
###Markdown
`Google Cloud Storage` authentication
###Code
CREDS = f'{os.getcwd()}/google-drive-d2e64a7dbc90.json'
%env GOOGLE_APPLICATION_CREDENTIALS=$CREDS
###Output
_____no_output_____
###Markdown
Get file paths per lexeme
###Code
#export
def get_fpaths_lex(LEX, CORPUS_DIR='data/', source='local', bucket_name='socemb'):
if source == 'remote':
client = storage.Client()
blobs = [blob for blob in client.list_blobs(bucket_name, prefix=f'comments/{LEX}')]
fpaths = [f'gs://{bucket_name}/{blob.name}' for blob in blobs]
if source == 'local':
lex_path = f'{CORPUS_DIR}{LEX}' + "/*.csv"
fpaths = glob(lex_path)
return fpaths
fpaths_lex = get_fpaths_lex('Anglo-Saxon', source='local')
fpaths_lex
assert len(fpaths_lex) == 2
###Output
_____no_output_____
###Markdown
per subreddit (and year)
###Code
# export
def get_fpath_subr_yr(SUBREDDIT, YEAR, LIMIT):
return f'data/subreddit/{SUBREDDIT}/{LIMIT}_{YEAR}.csv'
fpath = get_fpath_subr_yr('askaconservative', 2020, 100_000)
fpath
os.path.exists(fpath)
get_fpath_subr_yr('askreddit', 100_000, 2009) == 'data/subreddit/askreddit/100000_2009.csv'
# export
def get_fpaths_subr_yrs(SUBREDDIT, LIMIT, YEARS):
fpaths = [get_fpath_subr_yr(SUBREDDIT, LIMIT, year) for year in YEARS]
return fpaths
assert len(get_fpaths_subr_yrs(SUBREDDIT, LIMIT, YEARS)) == 14
###Output
_____no_output_____
###Markdown
per year
###Code
# export
def get_fpaths_yr(YEAR, DIR='data/subreddit/'):
fpaths = []
for fpath in Path(DIR).rglob(f'*{YEAR}.csv'):
fpaths.append(fpath)
return fpaths
get_fpaths_yr(2010)
###Output
_____no_output_____
###Markdown
Read comments Read `1` comments `csv` file
###Code
fpath = get_fpath_subr_yr('askreddit', 100_000, 2009)
# export
def read_comm_csv(fpath):
try:
date_parser = lambda x: pd.to_datetime(x, unit='s', errors='coerce')
comments = pd.read_csv(
fpath,
usecols=['id', 'created_utc', 'subreddit', 'body'],
dtype={
'id': 'string',
'created_utc': int,
'subreddit': 'string',
'body': 'string'
},
parse_dates=['created_utc'],
date_parser=date_parser,
low_memory=False,
lineterminator='\n'
)
comments_clean = comments\
.dropna()\
.drop_duplicates(subset='id')
return comments_clean
except FileNotFoundError:
print(f'{fpath} not found on disk')
except pd.errors.EmptyDataError:
print(f'{fpath} is empty')
comments = read_comm_csv(fpath)
comments.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 99999 entries, 0 to 99999
Data columns (total 4 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 body 99999 non-null string
1 created_utc 99999 non-null datetime64[ns]
2 id 99999 non-null string
3 subreddit 99999 non-null string
dtypes: datetime64[ns](1), string(3)
memory usage: 3.8 MB
###Markdown
Read multiple comment `csv` files
###Code
# export
def read_comm_csvs(fpaths):
comments_lst = []
for fpath in fpaths:
comments = read_comm_csv(fpath)
comments_lst.append(comments)
comments_concat = pd.concat(
comments_lst,
axis=0,
ignore_index=True
)
return comments_concat
fpaths = get_fpaths_subr_yrs(SUBREDDIT, LIMIT, YEARS)
comments = read_comm_csvs(fpaths)
comments.value_counts('subreddit')
assert comments.shape == (1400, 4)
###Output
_____no_output_____
###Markdown
Parse dates
###Code
# export
def parse_dates(comments):
comments['created_utc'] = pd.to_datetime(comments['created_utc'], errors='coerce')
comments.sort_values('created_utc', inplace=True)
comments.dropna(subset=['created_utc'], inplace=True)
return comments
comments = parse_dates(comments)
comments.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 1400 entries, 99 to 1300
Data columns (total 4 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 body 1400 non-null object
1 created_utc 1400 non-null datetime64[ns]
2 id 1400 non-null object
3 subreddit 1400 non-null object
dtypes: datetime64[ns](1), object(3)
memory usage: 54.7+ KB
###Markdown
Export notebooks
###Code
# hide
from nbdev.export import notebook2script
notebook2script()
###Output
Converted 00_processing.ipynb.
Converted 01_installation.ipynb.
Converted 02_read_data.ipynb.
Converted 03_clean_data.ipynb.
Converted 04_usage_intensity.ipynb.
Converted index.ipynb.
|
examples/funcalign/FastSRM_encoding_experiment.ipynb | ###Markdown
Encoding experiment using the fast shared response model (FastSRM) In this notebook we introduce some basic functionalities of FastSRM and compare its performance to another implementation of SRM (ProbSRM). We present an encoding experiment that shows how fmri data of train subjects can be used to predict fmri data of test subjects (after training).More precisely, let us assume we have 2 groups of subjects (train, test) exposed to 2 similar but different naturalistic stimuli (session 1 and session 2) while we record their brain activity using an fMRI scanner. Our experiment follows the following steps:- Align train subjects: We train an alignment model on session 1 using train subjects- Align test subjects: Using data of test subjects during session 1 and the previously fitted model we add test subjects to the model- Predict test subjects data from train subjects: We use the model to align train subjects during session 2. From the aligned data (shared response) we predict the data of test subjects during session 2.- Measure performance: We report the R2 score between predicted and actual data. Real fMRI data We'll download a publicly available fMRI dataset and run SRM on these data. This dataset comprises fMRI data for 20 subjects listening to the spoken story Pie Man by Jim O'Grady (archived on the Princeton DataSpace). Note that we use 20 subjects to minimize computational demands for this tutorial and recommend larger sample sizes for publication. The gzipped data archive file is ~1.5 GB in size, and may take a couple minutes to download and unzip. The functional data were acquired with 3 x 3 x 4 mm voxels and 1.5 s TRs. Data were preprocessed using fMRIPrep (Esteban et al., 2018), including spatial normalization to MNI space (the T1-weighted ICBM 2009c Nonlinear Asymmetric template). The data were then smoothed to 6 mm FWHM using AFNI's 3dBlurToFWHM (Cox, 1996). The following confound variables were regressed out using 3dTproject: six head motion parameters (and their first derivatives), framewise displacement, six prinicipal components from an anatomical mask of cerebrospinal fluid (CSF) and white matter, sine/cosine bases for high-pass filtering (cutoff: 0.00714 Hz; 140 s), as well as a linear and quadratic trends. The anatomical template and a brain mask (i.e., excluding skull) are supplied as well. These have been resampled to match resolution of the functional images.
###Code
import wget
from time import time
from glob import glob
from os.path import join
import nibabel
from nilearn.image import new_img_like
from nilearn.input_data import NiftiMasker, MultiNiftiMasker
import numpy as np
from joblib import Parallel, delayed
from nilearn.plotting import plot_stat_map
import matplotlib.pyplot as plt
from IPython.display import clear_output
import tarfile
# Download data tarball from Princeton DataSpace (about 1 Gb to download)
t0 = time()
def update_progress(current, total, width=0):
bar_length = 80
progress = current / total
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
if progress < 0:
progress = 0
if progress >= 1:
progress = 1
block = int(round(bar_length * progress))
clear_output(wait = True)
text = "Progress: [{0}] {1:.1f}%".format( "#" * block + "-" * (bar_length - block), progress * 100)
print(text)
wget.download('https://dataspace.princeton.edu/jspui/bitstream/88435/dsp01dz010s83s/6/pieman-isc-tutorial.tgz',
'pieman_isc', bar=update_progress)
tar = tarfile.open("pieman_isc", "r:gz")
tar.extractall()
tar.close()
print("Done in %.2f seconds" % (time() - t0))
###Output
Done in 801.00 seconds
###Markdown
Step 1: Mask and save the data- We split our data into two sessions (in order to be able to perform our encoding experiment)- We mask the data and save them into .npy file Note:We use ``detrend=True`` and ``standardize=True`` in the ``NiftiMasker``. This is standard fMRI preprocessing and is needed for FastSRM to work.
###Code
t0 = time()
# the directory where our data are located
data_dir = 'pieman-isc-tutorial'
# Filenames for MRI data; gzipped NIfTI images (.nii.gz)
func_fns = glob(join(data_dir, ('sub-*_task-pieman_space-MNI152NLin2009cAsym'
'_desc-tproject_bold.nii.gz')))
# The mask for our data
mask_fn = join(data_dir, 'MNI152NLin2009cAsym_desc-brain_mask.nii.gz')
# Let us mask these data and separate them into two sessions
def separate_and_mask(func):
# Load data
N = nibabel.load(func).get_data()
# Separate them into two sessions
N_1 = N[:, :, :, :250]
N_2 = N[:, :, :, 250:]
I_1 = new_img_like(func, N_1)
I_2 = new_img_like(func, N_2)
# Mask data
masker = NiftiMasker(
mask_img=mask_fn,
detrend=True,
standardize=True,
smoothing_fwhm=6
).fit()
# Transpose the data to fit with SRM conventions
X_1 = masker.transform(I_1).T
X_2 = masker.transform(I_2).T
# Save data
np.save(func[:-7] + "_session_1", X_1)
np.save(func[:-7] + "_session_2", X_2)
# I have 4 cores in my computer, it you have more increase n_jobs
Parallel(n_jobs=4, verbose=10)(
delayed(separate_and_mask)(
func
) for func in func_fns)
print("Done in %.2f seconds" % (time() - t0))
###Output
[Parallel(n_jobs=4)]: Using backend LokyBackend with 4 concurrent workers.
[Parallel(n_jobs=4)]: Done 5 tasks | elapsed: 15.1s
[Parallel(n_jobs=4)]: Done 10 tasks | elapsed: 22.4s
[Parallel(n_jobs=4)]: Done 16 out of 20 | elapsed: 29.8s remaining: 7.5s
###Markdown
Step 2: mask and save an atlas- Atlases are used in FastSRM to make computation faster- Any off-the-shelf big atlas should work (number of regions of the atlas should be larger than number of components used in SRM) we use Basc 444 for our example
###Code
def load_atlas(atlas, mask_img):
# Load masker
atlas_masker = MultiNiftiMasker(
mask_img=mask_img).fit()
X = nibabel.load(atlas).get_data()
# If the atlas is a deterministic atlas
# (each region is identified by a number starting from 1)
if len(X.shape) == 3:
n_components = len(np.unique(X)) - 1
xa, ya, za = X.shape
A = np.zeros((xa, ya, za, n_components + 1))
for c in np.unique(X)[1:].astype(int):
X_ = np.copy(X)
X_[X_ != c] = 0.
X_[X_ == c] = 1.
A[:, :, :, c] = X_
A = atlas_masker.transform(new_img_like(atlas, A))
A = np.argmax(A, axis=0)
# If the atlas is a probabilistic atlas
# (each region is assigned to a component)
else:
A = atlas_masker.transform(atlas)
return A
t0 = time()
from nilearn.datasets import fetch_atlas_basc_multiscale_2015
atlas = fetch_atlas_basc_multiscale_2015(data_dir=data_dir)['scale444']
print(atlas)
A = load_atlas(atlas, mask_fn)
np.save(atlas[:-7], A)
atlas_path = atlas[:-7] + ".npy"
print("Done in %.2f" % (time() - t0))
###Output
Dataset created in pieman-isc-tutorial/basc_multiscale_2015
Downloading data from https://ndownloader.figshare.com/files/1861819 ...
###Markdown
Step 3: Fit of the model and predict data of left-out subjects- Load data- Train model on first session using train subjects - Compute shared response on second session using train subjects- Compute alignment for test subjects using session 1- Predict data of test subjects during session 2 using the trained model Note about input images ProbSRM/DetSRM possible input- imgs is a list of arrays where element i of the array is a numpy array of shape [n_voxels, n_timeframes] that contains the data of subject i FastSRM possible input- imgs is a list of arrays where element i of the array is a numpy array of shape [n_voxels, n_timeframes] that contains the data of subject i- imgs is a list of list of arrays where element i, j of the array is a numpy array of shape [n_voxels, n_timeframes] that contains the data of subject i collected during session j.- imgs is an np array imgs, imgs[i, j] is a path to the data of subject i collected during session j. Data are loaded with numpy.load and expected shape is [n_voxels, n_timeframes] n_timeframes and n_voxels are assumed to be the same across subjects n_timeframes can vary across sessions. Each voxel’s timecourse is assumed to have mean 0 and variance 1=> So FastSRM can be used with very large dataset (even those where data cannot be hold in memory)
###Code
subjects = [18, 19, 20, 21, 24, 25, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 40, 42]
sessions = [1, 2]
files = np.array([
[glob(join(data_dir, "sub-%.3i*_session_%i*" %(sub, sess)))[0]
for sess in sessions]
for sub in subjects])
# 20 subjects x 2 sessions file matrix
print("Input shape")
print(files.shape)
def load_and_concat(paths):
"""
Take an array (n_subjects, n_sessions) of path and yields a list of arrays
Parameters
----------
paths
Returns
-------
X
"""
X = []
for i in range(len(paths)):
X_i = np.concatenate([np.load(paths[i, j])
for j in range(len(paths[i]))], axis=1)
X.append(X_i)
return X
from brainiak.funcalign.fastsrm import FastSRM
from brainiak.funcalign.srm import SRM
fastsrm = FastSRM(
atlas=atlas_path, # the path to basc atlas (we could have used np.load(atlas_path) instead)
n_components=20,
n_jobs=1, # Since we use a small dataset paralellization is counter-productive so we do not use it here
n_iter=10,
temp_dir=data_dir, # We will use the disk as if we had a small memory
low_ram=True, # Let's say I really have a small memory so I need low_ram mode
aggregate="mean" # transform will return the mean of subject specific shared response
)
probsrm = SRM(
n_iter=10, # same number of iterations
features=20 # same number of components
)
models = [("probsrm", probsrm), ("fastsrm", fastsrm)]
from sklearn.model_selection import KFold
# List in which we record for each algo the R2 scores per voxels averaged across subjects
r2_mean = {}
for name, model in models:
print("Running reconstruction experiment for %s" % name)
t0 = time()
# List in which we record for each subject the test R2 scores per voxels
r2_subjects = []
# We divide all subjects into train subjects and test subjects
for subjects_train, subjects_test in KFold(n_splits=5,
shuffle=True
).split(np.arange(len(subjects))):
# First let us train the model on train subjects during session 1
# # For this we will use an input format that is supported by both FastSRM and SRM: a list of arrays
train_subjects_session_1 = load_and_concat(files[subjects_train, :][:, :1])
train_subjects_session_2 = load_and_concat(files[subjects_train, :][:, 1:])
test_subjects_session_1 = load_and_concat(files[subjects_test, :][:, :1])
test_subjects_session_2 = load_and_concat(files[subjects_test, :][:, 1:])
n_subjects_train = len(subjects_train)
n_subjects_test = len(subjects_test)
# # Let us fit the model on the first session
model.fit(train_subjects_session_1)
# Then let us compute the shared response on the second session
# # With ProbSRM the transform method returns a list of subject-specific responses in shared space
# # so we need an additional step to aggregate them
if name == "probsrm":
shared_session_2 = model.transform(train_subjects_session_2)
shared_session_2 = np.mean(shared_session_2, axis=0)
# # With FastSRM we can specify the desired behavior.
# # Because we specified aggregate="mean" transform directly returns
# # the mean of subject-specific responses (aggregate=None would result
# # in the same behavior as in ProbSRM)
if name == "fastsrm":
shared_session_2 = model.transform(train_subjects_session_2)
# Now we add test subjects to the model
# # With ProbSRM we have a function transform subject that returns the basis for
# # one specific subject. We will save this in a list
if name == "probsrm":
list_basis_test_subjects = [model.transform_subject(x) for x in test_subjects_session_1]
# # With FastSRM we have a function add subject that takes a list of subjects
# # new subjects are added to internal basis_list (that can be accessed using .basis_list
# # but this is usually not necessary)
# # With FastSRM we need to specify what is the shared response that is used to learn the alignment
if name == "fastsrm":
shared_session_1 = model.transform(train_subjects_session_1)
model.add_subjects(test_subjects_session_1, shared_session_1)
# Then we try to reconstruct the data of test subjects during session 2
# # ProbSRM does not provide an inverse transform so we need to implement this
# # (it is rather easy)
if name == "probsrm":
reconstructed_data_test_subjects_session_2 = [
list_basis_test_subjects[i].dot(shared_session_2)
for i in range(n_subjects_test)]
# # FastSRM provides an inverse transform but we need to specify what to reconstruct
# # New subjects are added at the end of the list so we need to reconstruct the data of the last
# # n_subjects_test subjects
if name == "fastsrm":
reconstructed_data_test_subjects_session_2 = model.inverse_transform(
shared_session_2,
subjects_indexes=np.arange(n_subjects_train, n_subjects_train + n_subjects_test))
# This is the true data we are trying to reconstruct ()
real_data_test_subjects_session_2 = np.array([np.load(file) for file in files[subjects_test, :][:, 1]])
for i in range(n_subjects_test):
diff = reconstructed_data_test_subjects_session_2[i] - real_data_test_subjects_session_2[i]
r2 = 1 - diff.var(axis=1)
r2_subjects.append(r2)
r2_mean[name] = np.mean(r2_subjects, axis=0)
print("Done in %.2f" % (time() - t0))
###Output
Running reconstruction experiment for probsrm
Done in 105.25
Running reconstruction experiment for fastsrm
Done in 48.39
###Markdown
Step 5: Plot results
###Code
masker = NiftiMasker(
mask_img=mask_fn).fit()
for name in ["probsrm", "fastsrm"]:
# R2 score in a ROI given by areas where ProbSRM performs well
print("R2 score %s: %.3f" % (name, np.mean(r2_mean[name][r2_mean["probsrm"] > 0.01])))
plot_stat_map(
masker.inverse_transform(r2_mean[name]),
display_mode="z",
cut_coords=[0, 5, 10, 15, 20],
vmax=0.3,
title="R2 %s" % name
)
###Output
R2 score probsrm: 0.036
R2 score fastsrm: 0.039
|
nb/GeoPandas.ipynb | ###Markdown
GeoPandas [GeoPandas](http://geopandas.org/) es un proyecto de software libre que extiende los tipos de datos de [Pandas](http://pandas.pydata.org/) para incorporar objetos geométricos (puntos, líneas, polígonos, etc). GeoPandas se apoya en las bibliotecas [Shapely](https://pypi.org/project/Shapely/) para realizar las operaciones geométricas, [Fiona](https://github.com/Toblerity/Fiona) para acceder a los datos (ej. en archivos) y [Descartes](https://bitbucket.org/sgillies/descartes/src/default/) y [Matplotlib](https://matplotlib.org/) para graficación.El objetivo de GeoPandas es facilitar el trabajo con datos geoespaciales en el lenguaje Python, lo que se logra a través de la implementación de estructuras que permiten manejar simultáneamente grandes cantidades de datos. Las dos estructuras principales de GeoPandas son:- [GeoSeries](http://geopandas.org/data_structures.htmlgeoseries): es un vector en el que cada elemento es un conjunto de una o varias geometrías correspondientes a una observación. Por ejemplo, el polígono (o multipolígono) que representa una provincia.- [GeoDataFrame](http://geopandas.org/data_structures.htmlgeodataframe): es una estructura tabular (i.e. con filas y columnas) de datos geométricos y no geométricos (ej. textos, números). El conjunto de geometrías se implementa a través de GeoSeries.Con estas estructuras, es posible realizar desde Python operaciones "masivas" de datos, las cuales de otra forma requerirían de una base de datos geoespacial (ej. [PostgreSQL/PostGIS](https://postgis.net/)). Instalación Para instalar el paquete mediante **conda**, debe ejecutarse la siguiente instrucción desde la línea de comandos de Anaconda:```conda install geopandas``` Importación
###Code
%matplotlib inline
import pandas as pd
import geopandas
from shapely.geometry import Point, Polygon
# Cantidad máxima de registros que se despliegan en un GeoDataFrame
pd.options.display.max_rows = 10
###Output
_____no_output_____
###Markdown
Ejemplos Para los siguientes ejemplos, se utilizará el _shapefile_ de países de [Natural Earth](https://www.naturalearthdata.com/), disponible en [http://www.naturalearthdata.com/downloads/110m-cultural-vectors/110m-admin-0-countries/](http://www.naturalearthdata.com/downloads/110m-cultural-vectors/110m-admin-0-countries/). El enlace anterior brinda acceso a un archivo ZIP que debe colocarse en el directorio de datos (/datos). Una vez hecho esto, se procede a almacenar los datos en un GeoDataFrame, a través de la función **read_file()**.
###Code
paises = geopandas.read_file("zip://./datos/ne_110m_admin_0_countries.zip")
# si se descomprimió el archivo, debe usarse el comando:
# paises = geopandas.read_file("datos/ne_110m_admin_0_countries/ne_110m_admin_0_countries.shp")
# paises es una variable del tipo GeoDataFrame
type(paises)
###Output
_____no_output_____
###Markdown
Visualización de datos tabulares Los datos de un GeoDataFrame pueden inspeccionarse con la función **head()**, la cual retorna los primeros registros de un GeoDataFrame. Nótese la columna con el tipo de datos geométricos.
###Code
paises.head()
# Despliegue de las geometrías
paises.geometry
# paises.geometry es una variable del tipo GeoSeries
type(paises.geometry)
# Despliegue de la lista de columnas
paises.columns
# Despliegue de un subconjunto de columnas
paises[['NAME_ES','CONTINENT','ECONOMY']]
###Output
_____no_output_____
###Markdown
**Funciones en columnas**
###Code
# Promedio
paises['POP_EST'].mean()
# Máximo
paises['POP_EST'].max()
# Mínimo
paises['POP_EST'].min()
###Output
_____no_output_____
###Markdown
**Filtrado**
###Code
paises[paises['CONTINENT'] == 'Africa']
paises[paises['POP_EST'] <= 100000]
###Output
_____no_output_____
###Markdown
Visualización de datos geoespaciales La función [plot()](http://geopandas.org/reference.htmlgeopandas.GeoDataFrame.plot) proporciona una manera sencilla de visualizar los datos en un mapa.
###Code
paises.plot()
paises_asia = paises[paises['CONTINENT'] == 'Asia']
paises_asia.plot()
# Cambio de tamaño del mapa
paises_asia.plot(figsize=(15, 10))
###Output
_____no_output_____
###Markdown
**Colores**
###Code
paises_asia.plot(figsize=(15, 10), cmap="rainbow")
# Colores asignados con base en una columna
paises_asia.plot(figsize=(15, 10), cmap="YlOrRd", column="POP_EST")
###Output
_____no_output_____
###Markdown
Para más opciones de colores, puede consultarse [https://matplotlib.org/users/colormaps.html](https://matplotlib.org/users/colormaps.html). Visualización de múltiples capas Para los siguientes ejemplos, deben descargarse los siguientes _shapefiles_ comprimidos en formato ZIP:- **Ciudades**: [http://www.naturalearthdata.com/downloads/110m-cultural-vectors/110m-populated-places/](http://www.naturalearthdata.com/downloads/110m-cultural-vectors/110m-populated-places/)- **Ríos**: [http://www.naturalearthdata.com/downloads/50m-physical-vectors/50m-rivers-lake-centerlines/](http://www.naturalearthdata.com/downloads/50m-physical-vectors/50m-rivers-lake-centerlines/)Ambas capas deben copiarse en el directorio de datos. Seguidamente, su contenido se almacena en dos GeoDataFrames:
###Code
ciudades = geopandas.read_file("zip://./datos/ne_110m_populated_places.zip")
rios = geopandas.read_file("zip://./datos/ne_50m_rivers_lake_centerlines.zip")
###Output
_____no_output_____
###Markdown
Se crea un subconjunto de datos para el continente africano:
###Code
paises_africa = paises[paises['CONTINENT'] == 'Africa']
ax = paises.plot(edgecolor='black', facecolor='none', figsize=(15, 10))
rios.plot(ax=ax, color='blue')
ciudades.plot(ax=ax, color='red')
ax.set(xlim=(-20, 60), ylim=(-40, 40))
###Output
_____no_output_____
###Markdown
Ejercicio:Descargue del SNIT las capas de:* Límite provincial [(http://www.snitcr.go.cr/servicios_ogc_lista_capas?k=bm9kbzo6MjY=&nombre=IGN%20Cartograf%C3%ADa%201:5mil)](http://www.snitcr.go.cr/servicios_ogc_lista_capas?k=bm9kbzo6MjY=&nombre=IGN%20Cartograf%C3%ADa%201:5mil)* Aeródromos y red vial [(http://www.snitcr.go.cr/servicios_ogc_lista_capas?k=bm9kbzo6MjY=&nombre=IGN%20Cartograf%C3%ADa%201:5mil)](http://www.snitcr.go.cr/servicios_ogc_lista_capas?k=bm9kbzo6MjY=&nombre=IGN%20Cartograf%C3%ADa%201:5mil)Despliegue las tres capas en un solo mapa. **Datos geoespaciales en archivos de texto** En estos casos, puede crearse un DataFrame convencional a partir del archivo de texto y un conjunto de geometrías a partir de las columnas correspondientes (ej. longitud, latitud). Posteriormente, se crea un GeoDataFrame combinando el DataFrame y las geometrías. A continuación, se presenta un ejemplo. Para comenzar, descargue el archivo de datos de presencia en Costa Rica del género de aves [Trogon](https://en.wikipedia.org/wiki/Trogon_(genus)) de [http://api.gbif.org/v1/occurrence/download/request/0021444-190621201848488.zip](http://api.gbif.org/v1/occurrence/download/request/0021444-190621201848488.zip) y descomprímalo.
###Code
# Carga de los datos en un DataFrame
trogones_df = pd.read_csv("datos/0021444-190621201848488.csv", sep='\t')
trogones_df.head()
# Despliegue de un subconjunto de columnas
trogones_df[['species', 'decimalLongitude', 'decimalLatitude', 'eventDate']]
# Se crea una lista de geometrías de puntos
puntos = [Point(xy) for xy in zip(trogones_df["decimalLongitude"], trogones_df["decimalLatitude"])]
puntos[:5]
# Se combina el DataFrame y las geometrías en un GeoDataFrame, junto con un sistema de coordenadas
trogones=geopandas.GeoDataFrame(trogones_df, crs={"init": "epsg:4326"}, geometry=puntos)
trogones.head()
# Mapa de los registros de presencia de trogones
trogones.plot(figsize=(15, 10), color="red", markersize=5)
###Output
_____no_output_____
###Markdown
Ejercicio:Despliegue los registros de presencia de trogones sobre la capa de provincias del SNIT.
###Code
provincias = geopandas.read_file("datos/cr_provincias_wgs84_snit_ign_2019.shp")
ax = provincias.plot(edgecolor='black', facecolor='none', figsize=(15, 10))
trogones.plot(ax=ax, color='red', markersize=5)
ax.set(xlim=(-86.5, -82), ylim=(8, 11.25))
###Output
_____no_output_____
###Markdown
Ejercicio:Ejecute el siguiente fragmento de código y estudie las funciones utilizadas. Se asume que existe un GeoDataFrame llamado "provincias" que corresponde a la capa de provincias.
###Code
dfsjoin = geopandas.sjoin(provincias,trogones)
dfpivot = pd.pivot_table(dfsjoin, index='nom_prov', columns='species', aggfunc={'species':len})
dfpivot.columns = dfpivot.columns.droplevel()
dfprovspecies = provincias.merge(dfpivot, how='left', on='nom_prov')
dfprovspecies
# Para sustituir los valores nulos (NaN) por cero
dfprovspecies = dfprovspecies.fillna(0)
dfprovspecies
###Output
_____no_output_____
###Markdown
Ejercicio:Realice la sustitución de los valores nulos solamente en las columnas con los nombres de especies (no en todo el dataframe). Ejercicio:Deje en el dataframe solamente las columnas del nombre de la provincia y de los nombres de las especies. Ejercicio:Cambie a entero el tipo de datos de las columnas con los nombres de especies. Ejercicio:Con base en el el GeoDataFrame generado en el paso anterior, despliegue un mapa de coropletas que refleje la cantidad de registros de la especie *Trogon massena* en cada provincia. **Guardar la capa**Se hace con la función [to_file()](http://geopandas.org/reference.htmlgeopandas.GeoDataFrame.to_file) de GeoPandas.
###Code
dfprovspecies.to_file("datos/provincias-trogones.shp")
###Output
_____no_output_____ |
001-Jupyter/001-Tutorials/002-IPython-Cookbook/chapter05_hpc/06_ray.ipynb | ###Markdown
5.6. Optimizing Cython code by writing less Python and more C
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
w, h = 400, 400 # Size of the screen in pixels.
def normalize(x):
# This function normalizes a vector.
x /= np.linalg.norm(x)
return x
def intersect_sphere(O, D, S, R):
# Return the distance from O to the intersection
# of the ray (O, D) with the sphere (S, R), or
# +inf if there is no intersection.
# O and S are 3D points, D (direction) is a
# normalized vector, R is a scalar.
a = np.dot(D, D)
OS = O - S
b = 2 * np.dot(D, OS)
c = np.dot(OS, OS) - R * R
disc = b * b - 4 * a * c
if disc > 0:
distSqrt = np.sqrt(disc)
q = (-b - distSqrt) / 2.0 if b < 0 \
else (-b + distSqrt) / 2.0
t0 = q / a
t1 = c / q
t0, t1 = min(t0, t1), max(t0, t1)
if t1 >= 0:
return t1 if t0 < 0 else t0
return np.inf
def trace_ray(O, D):
# Find first point of intersection with the scene.
t = intersect_sphere(O, D, position, radius)
# No intersection?
if t == np.inf:
return
# Find the point of intersection on the object.
M = O + D * t
N = normalize(M - position)
toL = normalize(L - M)
toO = normalize(O - M)
# Ambient light.
col = ambient
# Lambert shading (diffuse).
col += diffuse * max(np.dot(N, toL), 0) * color
# Blinn-Phong shading (specular).
col += specular_c * color_light * \
max(np.dot(N, normalize(toL + toO)), 0) \
** specular_k
return col
def run():
img = np.zeros((h, w, 3))
# Loop through all pixels.
for i, x in enumerate(np.linspace(-1, 1, w)):
for j, y in enumerate(np.linspace(-1, 1, h)):
# Position of the pixel.
Q[0], Q[1] = x, y
# Direction of the ray going through
# the optical center.
D = normalize(Q - O)
# Launch the ray and get the color
# of the pixel.
col = trace_ray(O, D)
if col is None:
continue
img[h - j - 1, i, :] = np.clip(col, 0, 1)
return img
# Sphere properties.
position = np.array([0., 0., 1.])
radius = 1.
color = np.array([0., 0., 1.])
diffuse = 1.
specular_c = 1.
specular_k = 50
# Light position and color.
L = np.array([5., 5., -10.])
color_light = np.ones(3)
ambient = .05
# Camera.
O = np.array([0., 0., -1.]) # Position.
Q = np.array([0., 0., 0.]) # Pointing to.
img = run()
fig, ax = plt.subplots(1, 1, figsize=(10, 10))
ax.imshow(img)
ax.set_axis_off()
%timeit run()
###Output
_____no_output_____ |
jupyter-notebooks/TINC Graph Update.ipynb | ###Markdown
Graph update example
###Code
%pylab inline
from tinc import *
tclient = TincClient()
tclient.synchronize()
imageBuffer = tclient.get_disk_buffer("graph")
imageBuffer._path
import random
import matplotlib.pyplot as plt
import threading
def update_graph(parameter_value):
#print("Parameter value " + str(parameter_value))
data = [random.random() * parameter_value for i in range(10)]
with threading.Lock():
fname = imageBuffer.get_filename_for_writing()
f = plt.figure()
plt.title("Random numbers with range 0->" + str(parameter_value))
plt.plot(data)
#print("Update " + fname)
plt.savefig(fname)
plt.close()
f.clf()
del f
imageBuffer.done_writing_file(fname)
update_graph(1)
param = tclient.get_parameter("internalValuesDim")
param
param.register_callback(update_graph)
param.value = 0.1
pserver.monitor_server()
pserver.stop()
param.value
pwd
###Output
_____no_output_____ |
15 - Advanced Statistical Methods in Python/7_K-Means Clustering/6_How to Choose the Number of Clusters (6:11)/Selecting the number of clusters_with_comments.ipynb | ###Markdown
Basics of cluster analysis In this notebook we explore the issue of selecting the right number of clusters Import the relevant libraries
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Set the styles to Seaborn
sns.set()
# Import the KMeans module so we can perform k-means clustering with sklearn
from sklearn.cluster import KMeans
###Output
_____no_output_____
###Markdown
Load the data
###Code
# Load the country clusters data
data = pd.read_csv('3.01. Country clusters.csv')
# Check out the data manually
data
###Output
_____no_output_____
###Markdown
Map the data
###Code
# Create a copy of the original dataset
data_mapped = data.copy()
# Map languages with 0, 1, and 2. Note that this is not the best way to do that, but for now we will use it
data_mapped['Language']=data_mapped['Language'].map({'English':0,'French':1,'German':2})
# Check if we did it correctly
data_mapped
###Output
_____no_output_____
###Markdown
Select the features
###Code
# iloc is a method used to 'slice' data
# 'slice' is not technically correct as there are methods 'slice' which are a bit different
# The term used by pandas is 'selection by position'
# The first argument of identifies the rows we want to keep
# The second - the columns
# When choosing the columns, e.g. a:b, we will keep columns a,a+1,a+2,...,b-1 ; so column b is excluded
x = data_mapped.iloc[:,1:4]
# for this particular case, we are choosing columns 1 and 2
# Note column indices in Python start from 0
# Check if we worked correctly
x
###Output
_____no_output_____
###Markdown
Clustering
###Code
# Create an object (which we would call kmeans)
# The number in the brackets is K, or the number of clusters we are aiming for
kmeans = KMeans(2)
# Fit the input data, i.e. cluster the data in X in K clusters
kmeans.fit(x)
###Output
_____no_output_____
###Markdown
Clustering results
###Code
# Create a variable which will contain the predicted clusters for each observation
identified_clusters = kmeans.fit_predict(x)
# Check the result
identified_clusters
# Create a copy of the mapped data
data_with_clusters = data_mapped.copy()
# Create a new Series, containing the identified cluster for each observation
data_with_clusters['Cluster'] = identified_clusters
# Check the result
data_with_clusters
# Plot the data using the longitude and the latitude
# c (color) is an argument which could be coded with a variable
# The variable in this case has values 0,1,2, indicating to plt.scatter, that there are three colors (0,1,2)
# All points in cluster 0 will be the same colour, all points in cluster 1 - another one, etc.
# cmap is the color map. Rainbow is a nice one, but you can check others here: https://matplotlib.org/users/colormaps.html
plt.scatter(data_with_clusters['Longitude'],data_with_clusters['Latitude'],c=data_with_clusters['Cluster'],cmap='rainbow')
plt.xlim(-180,180)
plt.ylim(-90,90)
plt.show()
###Output
_____no_output_____
###Markdown
Selecting the number of clusters WCSS (within-cluster sum of squares)WCSS is a measure developed within the ANOVA framework. It gives a very good idea about the different distance between different clusters and within clusters, thus providing us a rule for deciding the appropriate number of clusters.
###Code
# Get the WCSS for the current solution
kmeans.inertia_
# Create an empty list
wcss=[]
# Create all possible cluster solutions with a loop
for i in range(1,7):
# Cluster solution with i clusters
kmeans = KMeans(i)
# Fit the data
kmeans.fit(x)
# Find WCSS for the current iteration
wcss_iter = kmeans.inertia_
# Append the value to the WCSS list
wcss.append(wcss_iter)
# Let's see what we got
wcss
###Output
_____no_output_____
###Markdown
The Elbow Method
###Code
# Create a variable containing the numbers from 1 to 6, so we can use it as X axis of the future plot
number_clusters = range(1,7)
# Plot the number of clusters vs WCSS
plt.plot(number_clusters,wcss)
# Name your graph
plt.title('The Elbow Method')
# Name the x-axis
plt.xlabel('Number of clusters')
# Name the y-axis
plt.ylabel('Within-cluster Sum of Squares')
###Output
_____no_output_____ |
lab_3_mlops/1_sm_pipeline.ipynb | ###Markdown
Lab 3: MLOps with SageMaker Pipelines Prerequisites---본 모듈은 여러분이 SageMaker와 SageMaker Pipelines에 대한 기본 컨셉을 알고 있다고 가정합니다. 만약 기본 컨셉에 대한 이해와 step-by-step 핸즈온이 필요하면 아래 링크들을 통해 세션 시청 후, 핸즈온을 해 보시는 것을 권장드립니다.- SageMaker Pipelines 세션 (AWS Builders 300) - Part 1: https://www.youtube.com/watch?v=7IL_0-OjZWk - Part 2: https://www.youtube.com/watch?v=z_l2aNJswWQ- SageMaker Pipelines Step-by-step 핸즈온 - 입문 과정: https://github.com/gonsoomoon-ml/SageMaker-Pipelines-Step-By-Step - (optionally) 고급 과정 1: https://github.com/gonsoomoon-ml/SageMaker-Pipelines-Step-By-Step/tree/main/phase01 - (optionally) 고급 과정 2: https://github.com/gonsoomoon-ml/SageMaker-Pipelines-Step-By-Step/tree/main/phase02 Introduction---본 모듈에서는 SageMaker Pipelines를 사용하여 간단한 머신 러닝 파이프라인을 구축합니다. SageMaker Pipelines은 re:Invent 2020 서비스 런칭 이후 지속적으로 업데이트되고 있으며, 2021년 8월 업데이트된 주요 기능인 Lambda Step을 사용하면 호스팅 엔드포인트 모델 배포를 비롯한 서버리스 작업들을 쉽게 수행할 수 있습니다. 또한 캐싱(caching) 기능을 사용하면 모든 파이프라인을 처음부터 재시작할 필요 없이 변경된 파라메터에 대해서만 빠르게 실험해볼 수 있습니다. Lambda Step과 캐싱에 대한 자세한 내용은 아래 링크들을 참조해 주세요.Reference: - SageMaker Pipelines SDK: https://docs.aws.amazon.com/sagemaker/latest/dg/pipelines-sdk.html- Caching Pipeline Steps: https://docs.aws.amazon.com/sagemaker/latest/dg/pipelines-caching.html- AWS AIML Blog: Use a SageMaker Pipeline Lambda step for lightweight model deployments: https://aws.amazon.com/de/blogs/machine-learning/use-a-sagemaker-pipeline-lambda-step-for-lightweight-model-deployments/Note:- 본 노트북을 실행하려면 `AmazonSageMakerFullAccess`와 `AmazonSageMakerPipelinesIntegrations` policy를 추가해야 합니다.- 빠른 핸즈온을 위해 1000건의 샘플 데이터와 1 epoch으로 전처리 및 훈련을 수행합니다. 사전에 이미 파인튜닝이 완료된 모델을 훈련하므로 높은 정확도를 보입니다.
###Code
import boto3
import os
import numpy as np
import sagemaker
import sys
import time
import sagemaker
import sagemaker.huggingface
from sagemaker.huggingface import HuggingFace, HuggingFaceModel
from sagemaker.workflow.parameters import ParameterInteger, ParameterFloat, ParameterString
from sagemaker.lambda_helper import Lambda
from sagemaker.sklearn.processing import SKLearnProcessor
from sagemaker.huggingface.processing import HuggingFaceProcessor
from sagemaker.processing import ProcessingInput, ProcessingOutput
from sagemaker.workflow.steps import CacheConfig, ProcessingStep
from sagemaker.inputs import TrainingInput
from sagemaker.workflow.steps import TrainingStep
from sagemaker.processing import ScriptProcessor
from sagemaker.workflow.properties import PropertyFile
from sagemaker.workflow.step_collections import CreateModelStep, RegisterModel
from sagemaker.workflow.conditions import ConditionLessThanOrEqualTo,ConditionGreaterThanOrEqualTo
from sagemaker.workflow.condition_step import ConditionStep
from sagemaker.workflow.functions import JsonGet
from sagemaker.workflow.pipeline import Pipeline, PipelineExperimentConfig
from sagemaker.workflow.execution_variables import ExecutionVariables
sess = sagemaker.Session()
region = sess.boto_region_name
# sagemaker session bucket -> used for uploading data, models and logs
# sagemaker will automatically create this bucket if it not exists
sagemaker_session_bucket=None
if sagemaker_session_bucket is None and sess is not None:
# set to default bucket if a bucket name is not given
sagemaker_session_bucket = sess.default_bucket()
role = sagemaker.get_execution_role()
sagemaker_session = sagemaker.Session(default_bucket=sagemaker_session_bucket)
print(f"sagemaker role arn: {role}")
print(f"sagemaker bucket: {sagemaker_session.default_bucket()}")
print(f"sagemaker session region: {sagemaker_session.boto_region_name}")
###Output
_____no_output_____
###Markdown
1. Defining the Pipeline--- 1.1. Pipeline parameters기본적인 파이프라인 파라메터들을 정의합니다. 자세한 내용은 아래 링크를 참조해 주세요.References: - 개발자 가이드: https://docs.aws.amazon.com/sagemaker/latest/dg/build-and-manage-parameters.html
###Code
# S3 prefix where every assets will be stored
s3_prefix = "hf-kornlp-mlops-demo"
# s3 bucket used for storing assets and artifacts
bucket = sagemaker_session.default_bucket()
# aws region used
region = sagemaker_session.boto_region_name
# base name prefix for sagemaker jobs (training, processing, inference)
base_job_prefix = s3_prefix
# Cache configuration for workflow
cache_config = CacheConfig(enable_caching=True, expire_after="7d")
# package versions
transformers_version = "4.11.0"
pytorch_version = "1.9.0"
py_version = "py38"
model_id_ = "daekeun-ml/koelectra-small-v3-nsmc"
tokenizer_id_ = "daekeun-ml/koelectra-small-v3-nsmc"
dataset_name_ = "nsmc"
model_id = ParameterString(name="ModelId", default_value=model_id_)
tokenizer_id = ParameterString(name="TokenizerId", default_value=tokenizer_id_)
dataset_name = ParameterString(name="DatasetName", default_value=dataset_name_)
###Output
_____no_output_____
###Markdown
1.2. Processing Step빌트인 `SKLearnProcessor`를 통해 전처리 스텝을 정의합니다. 최근 PyTorch, TensorFlow, MXNet, XGBoost, Hugging Face도 빌트인으로 지원되기 시작했습니다. `HuggingFaceProcessor` 사용 예시는 아래 코드 snippet을 참조해 주세요. 단, `HuggingFaceProcessor`는 현 시점(2022년 1월)에서는 GPU 인스턴스만 지원하기 때문에 GPU 리소스가 필요하지 않은 경우는 `SKLearnProcessor` 사용을 권장드립니다.```pythonfrom sagemaker.huggingface.processing import HuggingFaceProcessorhf_processor = HuggingFaceProcessor( instance_type=processing_instance_type, instance_count=processing_instance_count, pytorch_version=pytorch_version, transformers_version=transformers_version, py_version=py_version, base_job_name=base_job_prefix + "-preprocessing", sagemaker_session=sagemaker_session, role=role)```References: - AWS AIML Blog: https://aws.amazon.com/ko/blogs/machine-learning/use-deep-learning-frameworks-natively-in-amazon-sagemaker-processing/- 개발자 가이드: https://docs.aws.amazon.com/ko_kr/sagemaker/latest/dg/build-and-manage-steps.htmlstep-type-processing
###Code
processing_instance_type = ParameterString(name="ProcessingInstanceType", default_value="ml.c5.xlarge")
processing_instance_count = ParameterInteger(name="ProcessingInstanceCount", default_value=1)
processing_script = ParameterString(name="ProcessingScript", default_value="./src/processing_sklearn.py")
processing_output_destination = f"s3://{bucket}/{s3_prefix}/data"
sklearn_processor = SKLearnProcessor(
instance_type=processing_instance_type,
instance_count=processing_instance_count,
framework_version="0.23-1",
base_job_name=base_job_prefix + "-preprocessing",
sagemaker_session=sagemaker_session,
role=role
)
step_process = ProcessingStep(
name="ProcessDataForTraining",
cache_config=cache_config,
processor=sklearn_processor,
job_arguments=["--model_id", model_id_,
"--tokenizer_id", tokenizer_id_,
"--dataset_name", dataset_name_,
"--transformers_version", transformers_version,
"--pytorch_version", pytorch_version
],
outputs=[
ProcessingOutput(
output_name="train",
destination=f"{processing_output_destination}/train",
source="/opt/ml/processing/train",
),
ProcessingOutput(
output_name="validation",
destination=f"{processing_output_destination}/test",
source="/opt/ml/processing/validation",
),
ProcessingOutput(
output_name="test",
destination=f"{processing_output_destination}/test",
source="/opt/ml/processing/test",
)
],
code=processing_script
)
###Output
_____no_output_____
###Markdown
1.3. Model Training Step이전 랩에서 진행한 훈련 스크립트를 그대로 활용하여 훈련 스텝을 정의합니다. SageMaker Pipelines에 적용하기 위해 워크플로 파라메터(`ParameterInteger, ParameterFloat, ParameterString`)도 같이 정의합니다.훈련, 검증 및 테스트 데이터에 대한 S3 경로는 이전 랩처럼 수동으로 지정하는 것이 아니라 체인으로 연결되는 개념이기에, 아래 예시처럼 전처리 스텝 결괏값(`step_process`)의 프로퍼티(`properties`)를 참조하여 지정해야 합니다.```python"train": TrainingInput( s3_data=step_process.properties.ProcessingOutputConfig.Outputs["train"].S3Output.S3Uri)``` Training Parameter
###Code
# training step parameters
training_entry_point = ParameterString(name="TrainingEntryPoint", default_value="train.py")
training_source_dir = ParameterString(name="TrainingSourceDir", default_value="./src")
training_instance_type = ParameterString(name="TrainingInstanceType", default_value="ml.p3.2xlarge")
training_instance_count = ParameterInteger(name="TrainingInstanceCount", default_value=1)
# hyperparameters, which are passed into the training job
n_gpus = ParameterString(name="NumGPUs", default_value="1")
epochs = ParameterString(name="Epochs", default_value="1")
seed = ParameterString(name="Seed", default_value="42")
train_batch_size = ParameterString(name="TrainBatchSize", default_value="32")
eval_batch_size = ParameterString(name="EvalBatchSize", default_value="64")
learning_rate = ParameterString(name="LearningRate", default_value="5e-5")
# model_id = ParameterString(name="ModelId", default_value=model_id_)
# tokenizer_id = ParameterString(name="TokenizerId", default_value=tokenizer_id_)
# dataset_name = ParameterString(name="DatasetName", default_value=dataset_name_)
hyperparameters = {
'n_gpus': n_gpus, # number of GPUs per instance
'epochs': epochs, # number of training epochs
'seed': seed, # seed
'train_batch_size': train_batch_size, # batch size for training
'eval_batch_size': eval_batch_size, # batch size for evaluation
'warmup_steps': 0, # warmup steps
'learning_rate': learning_rate, # learning rate used during training
'tokenizer_id': model_id, # pre-trained tokenizer
'model_id': tokenizer_id # pre-trained model
}
chkpt_s3_path = f's3://{bucket}/{s3_prefix}/sm-processing/checkpoints'
huggingface_estimator = HuggingFace(
entry_point=training_entry_point,
source_dir=training_source_dir,
base_job_name=base_job_prefix + "-training",
instance_type=training_instance_type,
instance_count=training_instance_count,
role=role,
transformers_version=transformers_version,
pytorch_version=pytorch_version,
py_version=py_version,
hyperparameters=hyperparameters,
sagemaker_session=sagemaker_session,
disable_profiler=True,
debugger_hook_config=False,
checkpoint_s3_uri=chkpt_s3_path,
checkpoint_local_path='/opt/ml/checkpoints'
)
step_train = TrainingStep(
name="TrainHuggingFaceModel",
estimator=huggingface_estimator,
inputs={
"train": TrainingInput(
s3_data=step_process.properties.ProcessingOutputConfig.Outputs[
"train"
].S3Output.S3Uri
),
"test": TrainingInput(
s3_data=step_process.properties.ProcessingOutputConfig.Outputs[
"test"
].S3Output.S3Uri
),
},
cache_config=cache_config,
)
###Output
_____no_output_____
###Markdown
1.4. Model evaluation Step훈련된 모델의 성능을 평가하기 위해 추가 `ProcessingStep`을 정의합니다. 평가 결과에 따라 모델이 생성, 등록 및 배포되거나 파이프라인이 중단됩니다.평가 결과는 `PropertyFile`에 복사되며, 이는 이후 `ConditionStep`에서 사용됩니다. Evaluation Parameter
###Code
evaluation_script = ParameterString(name="EvaluationScript", default_value="./src/evaluate.py")
evaluation_instance_type = ParameterString(name="EvaluationInstanceType", default_value="ml.m5.xlarge")
evaluation_instance_count = ParameterInteger(name="EvaluationInstanceCount", default_value=1)
###Output
_____no_output_____
###Markdown
Evaluator
###Code
!pygmentize ./src/evaluate.py
script_eval = SKLearnProcessor(
framework_version="0.23-1",
instance_type=evaluation_instance_type,
instance_count=evaluation_instance_count,
base_job_name=base_job_prefix + "-evaluation",
sagemaker_session=sagemaker_session,
role=role,
)
evaluation_report = PropertyFile(
name="HuggingFaceEvaluationReport",
output_name="evaluation",
path="evaluation.json",
)
step_eval = ProcessingStep(
name="HuggingfaceEvalLoss",
processor=script_eval,
inputs=[
ProcessingInput(
source=step_train.properties.ModelArtifacts.S3ModelArtifacts,
destination="/opt/ml/processing/model",
)
],
outputs=[
ProcessingOutput(
output_name="evaluation",
source="/opt/ml/processing/evaluation",
destination=f"s3://{bucket}/{s3_prefix}/evaluation_report",
),
],
code=evaluation_script,
property_files=[evaluation_report],
cache_config=cache_config,
)
###Output
_____no_output_____
###Markdown
1.5. Register the model훈련된 모델은 모델 패키지 그룹(Model Package Group)의 모델 레지스트리(Model Registry)에 등록됩니다. 모델 레지스트리는 SageMaker Pipelines에서 소개된 개념으로, 기존 SageMaker 모델과 다르게 모델 버전 관리가 가능하며 승인 여부를 지정할 수 있습니다. 모델 승인은 `ConditionStep`의 조건을 만족할 때에만 가능하게 할 수 있습니다. (예: 정확도가 80% 이상인 경우에만 모델 배포)
###Code
model = HuggingFaceModel(
model_data=step_train.properties.ModelArtifacts.S3ModelArtifacts,
role=role,
transformers_version=transformers_version,
pytorch_version=pytorch_version,
py_version=py_version,
sagemaker_session=sagemaker_session,
)
model_package_group_name = "HuggingFaceModelPackageGroup"
step_register = RegisterModel(
name="HuggingFaceRegisterModel",
model=model,
content_types=["application/json"],
response_types=["application/json"],
inference_instances=["ml.m5.xlarge", "ml.g4dn.xlarge"],
transform_instances=["ml.m5.xlarge", "ml.g4dn.xlarge"],
model_package_group_name=model_package_group_name,
approval_status="Approved",
)
###Output
_____no_output_____
###Markdown
1.6. Model Deployment`LambdaStep`에서 파생된 커스텀 단계 `ModelDeployment`를 생성합니다. LambdaStep에서 정의한 Lambda 함수를 통해 호스팅 리얼타임 엔드포인트를 배포합니다.
###Code
!pygmentize utils/deploy_step.py
# custom Helper Step for ModelDeployment
from utils.deploy_step import ModelDeployment
# we will use the iam role from the notebook session for the created endpoint
# this role will be attached to our endpoint and need permissions, e.g. to download assets from s3
sagemaker_endpoint_role=sagemaker.get_execution_role()
model_name = f"{model_id_.split('/')[-1]}-{time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())}"
step_deployment = ModelDeployment(
model_name=model_name,
registered_model=step_register.steps[0],
endpoint_instance_type="ml.m5.xlarge",
sagemaker_endpoint_role=sagemaker_endpoint_role,
autoscaling_policy=None,
)
###Output
_____no_output_____
###Markdown
1.7. Condition for deployment`ConditionStep`을 통해 모델 평가 결과를 검사합니다. 정확도가 일정 이상일 때(accuracy > 0.8) 모델 등록 및 배포 파이프라인을 진행합니다. Condition Parameter
###Code
threshold_accuracy = ParameterFloat(name="ThresholdAccuracy", default_value=0.8)
###Output
_____no_output_____
###Markdown
Condition
###Code
cond_gte = ConditionGreaterThanOrEqualTo(
left=JsonGet(
step_name=step_eval.name,
property_file=evaluation_report,
json_path="eval_accuracy",
),
right=threshold_accuracy,
)
step_cond = ConditionStep(
name="CheckHuggingfaceEvalAccuracy",
conditions=[cond_gte],
if_steps=[step_register, step_deployment],
else_steps=[],
)
###Output
_____no_output_____
###Markdown
2. Pipeline definition and execution---모든 스텝을 정의하였다면 파이프라인을 정의합니다. 파이프라인 인스턴스는 이름(`name`), 파라메터(`parameters`), 및 스텝(`steps`)으로 구성됩니다. - 파이프라인 이름: (AWS 계정, 리전) 쌍 내에서 고유해야 합니다 - 파라메터: 스텝 정의에 사용했던 모든 파라메터들을 파이프라인에서 정의해야 합니다. - 스텝: 리스트 형태로 이전 스텝들을 정의합니다. 내부적으로 데이터 종속성을 사용하여 각 스텝 간의 관계를 DAG으로 정의하기 때문에 실행 순서대로 나열할 필요는 없습니다.
###Code
pipeline = Pipeline(
name=f"HuggingFaceDemoPipeline",
parameters=[
model_id,
tokenizer_id,
dataset_name,
processing_instance_type,
processing_instance_count,
processing_script,
training_entry_point,
training_source_dir,
training_instance_type,
training_instance_count,
evaluation_script,
evaluation_instance_type,
evaluation_instance_count,
threshold_accuracy,
n_gpus,
epochs,
seed,
eval_batch_size,
train_batch_size,
learning_rate,
],
steps=[step_process, step_train, step_eval, step_cond],
sagemaker_session=sagemaker_session,
)
###Output
_____no_output_____
###Markdown
Check the pipeline definition
###Code
import json
definition = json.loads(pipeline.definition())
definition
pipeline.upsert(role_arn=role)
###Output
_____no_output_____
###Markdown
Run the pipeline파이프라인을 실행합니다.
###Code
execution = pipeline.start()
execution.describe()
###Output
_____no_output_____
###Markdown
파이프라인 실행이 완료될 때까지 기다립니다. SageMaker Studio 콘솔을 통해 진행 상황을 확인할 수도 있습니다.
###Code
execution.wait()
###Output
_____no_output_____
###Markdown
실행된 스텝들을 리스트업합니다.
###Code
execution.list_steps()
###Output
_____no_output_____
###Markdown
3. Getting predictions from the endpoint---파이프라인의 모든 단계가 정상적으로 실행되었다면 배포된 엔드포인트를 통해 실시간 추론을 수행할 수 있습니다.
###Code
from sagemaker.huggingface import HuggingFacePredictor
endpoint_name = model_name
# check if endpoint is up and running
print(f"https://console.aws.amazon.com/sagemaker/home?region={region}#/endpoints/{endpoint_name}")
hf_predictor = HuggingFacePredictor(endpoint_name,sagemaker_session=sagemaker_session)
# example request, you always need to define "inputs"
data = {
"inputs": [
"정말 재미있습니다. 세 번 봐도 질리지 않아요.",
"시간이 아깝습니다. 다른 영화를 보세요."
]
}
hf_predictor.predict(data)
data = {
"inputs": [
"10점 만점에 1점만 줄께요.",
"내용이 너무 아른거려서 잠을 이룰 수가 없었어요. 감동의 향연!",
"액션광이기에 내용을 기대했지만 앙꼬없는 찐빵이다"
]
}
hf_predictor.predict(data)
###Output
_____no_output_____
###Markdown
Clean up---과금을 방지하기 위해 사용하지 않는 리소스를 삭제합니다. 아래 코드셀은 Lambda 함수와 엔드포인트를 삭제합니다.
###Code
sm_client = boto3.client("sagemaker")
# Delete the Lambda function
step_deployment.func.delete()
# Delete the endpoint
hf_predictor.delete_endpoint()
###Output
_____no_output_____ |
titanic/using_fastai.ipynb | ###Markdown
Understanding how the NaN values in Embarked should be replaced-
###Code
train['Sex'].loc[train['Embarked'] == 'S'].value_counts()
train['Sex'].loc[train['Embarked'] == 'C'].value_counts()
train.loc[train['Cabin'] == 'B28']
train.loc[(train['Embarked'] == 'S') & (train['Survived'] == 1) & (train['Sex'] == 'female')]
train.loc[(train['Embarked'] == 'C') & (train['Survived'] == 1) & (train['Sex'] == 'female')]
print(str(140*100 / 203) + ' chances of a female from S embarkment to survive.')
print(str(64*100 / 73) + ' chances of a female from C embarkment to survive.')
# Filling with S since it's largest
train["Embarked"] = train["Embarked"].fillna("S")
test['Fare'].fillna(test['Fare'].median(), inplace = True)
## Assigning all the null values as "N"
train['Cabin'].fillna("NA", inplace=True)
test['Cabin'].fillna("NA", inplace=True)
print(train.isnull().sum(), test.isnull().sum())
train["Title"] = pd.Series([i.split(",")[1].split(".")[0].strip() for i in train["Name"]])
train["Title"].head()
test["Title"] = pd.Series([i.split(",")[1].split(".")[0].strip() for i in test["Name"]])
test["Title"].head()
grouped = train.groupby(['Sex','Pclass', 'Title'])
grouped.head()
grouped['Age'].median()
# apply the grouped median value on the Age NaN
train['Age'] = grouped['Age'].apply(lambda x: x.fillna(x.median()))
# Same on test
test_grouped = test.groupby(['Sex','Pclass', 'Title'])
test_grouped['Age'].median()
test['Age'] = grouped['Age'].apply(lambda x: x.fillna(x.median()))
print(train.isnull().sum(), test.isnull().sum())
dep_var = 'Survived'
cat_names = ['Title', 'Sex', 'Ticket', 'Cabin', 'Embarked']
cont_names = [ 'Age', 'SibSp', 'Parch', 'Fare']
procs = [FillMissing, Categorify, Normalize]
test = TabularList.from_df(test, cat_names=cat_names, cont_names=cont_names, procs=procs)
data = (TabularList.from_df(train, path='.', cat_names=cat_names, cont_names=cont_names, procs=procs)
.split_by_idx(list(range(0,200)))
.label_from_df(cols = dep_var)
.add_test(test, label=0)
.databunch())
data.show_batch(rows=10)
np.random.seed(40)
learn = tabular_learner(data, layers=[180, 120], metrics=accuracy, emb_drop=0.1)
learn.lr_find()
learn.recorder.plot()
learn.fit(5,slice(1e-01))
learn.recorder.plot_losses()
test_temp = pd.read_csv("input/test.csv")
# Predict our target value
predictions, *_ = learn.get_preds(DatasetType.Test)
labels = np.argmax(predictions, 1)
# create submission file to submit in Kaggle competition
submission = pd.DataFrame({'PassengerId': test_temp['PassengerId'] , 'Survived': labels})
submission.to_csv('submission.csv', index=False)
submission.head()
submission.shape
###Output
_____no_output_____ |
03 Credit Card Fraud Detection/Credit Card Fraud Detection.ipynb | ###Markdown
--- Credit Card Fraud Detection---
###Code
Throughout the financial sector, machine learning algorithms are being developed to detect fraudulent transactions. In this project, that is exactly what we are going to be doing as well. Using a dataset of of nearly 28,500 credit card transactions and multiple unsupervised anomaly detection algorithms, we are going to identify transactions with a high probability of being credit card fraud. In this project, we will build and deploy the following two machine learning algorithms:
* Local Outlier Factor (LOF)
* Isolation Forest Algorithm
Furthermore, using metrics suchs as precision, recall, and F1-scores, we will investigate why the classification accuracy for these algorithms can be misleading.
In addition, we will explore the use of data visualization techniques common in data science, such as parameter histograms and correlation matrices, to gain a better understanding of the underlying distribution of data in our data set. Let's get started!
###Output
_____no_output_____
###Markdown
1. Importing Necessary Libraries
###Code
# import the necessary packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
###Output
_____no_output_____
###Markdown
2. Load The Data SetIn the following cells, we will import our dataset from a .csv file as a Pandas DataFrame. Furthermore, we will begin exploring the dataset to gain an understanding of the type, quantity, and distribution of data in our dataset. For this purpose, we will use Pandas' built-in describe feature, as well as parameter histograms and a correlation matrix. Download the dataset from there click here
###Code
# Load the dataset from the csv file using pandas
data = pd.read_csv('data/creditcard.csv')
print(data.shape)
print(data.head())
# Start exploring the dataset
print(data.columns)
# take random rows for data
data = data.sample(frac=0.1, random_state = 1) # frac=0.1 means choose random float value
data.head()
# V1 - V28 are the results of a PCA Dimensionality reduction to protect user identities and sensitive features
# Print the shape of the data
print(data.shape)
print(data.describe())
data.isna().sum() # no null value in any columns
data.info() # No null value so we start plotting
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 28481 entries, 169876 to 97365
Data columns (total 31 columns):
Time 28481 non-null float64
V1 28481 non-null float64
V2 28481 non-null float64
V3 28481 non-null float64
V4 28481 non-null float64
V5 28481 non-null float64
V6 28481 non-null float64
V7 28481 non-null float64
V8 28481 non-null float64
V9 28481 non-null float64
V10 28481 non-null float64
V11 28481 non-null float64
V12 28481 non-null float64
V13 28481 non-null float64
V14 28481 non-null float64
V15 28481 non-null float64
V16 28481 non-null float64
V17 28481 non-null float64
V18 28481 non-null float64
V19 28481 non-null float64
V20 28481 non-null float64
V21 28481 non-null float64
V22 28481 non-null float64
V23 28481 non-null float64
V24 28481 non-null float64
V25 28481 non-null float64
V26 28481 non-null float64
V27 28481 non-null float64
V28 28481 non-null float64
Amount 28481 non-null float64
Class 28481 non-null int64
dtypes: float64(30), int64(1)
memory usage: 7.0 MB
###Markdown
Data Visualization
###Code
# Plot histograms of each parameter
data.hist(figsize = (20, 20))
plt.show()
# Determine number of fraud cases in dataset
Fraud = data[data['Class'] == 1]
Valid = data[data['Class'] == 0]
outlier_fraction = len(Fraud)/float(len(Valid))
print(outlier_fraction)
print('Fraud Cases : {}'.format(len(data[data['Class'] == 1])))
print('Valid Transactions: {}'.format(len(data[data['Class'] == 0])))
## Correlation matrix
corrmat=data.corr()
fig=plt.figure(figsize=(36,25))
sns.heatmap(corrmat, vmax = .8, square = True,annot=True,cmap="coolwarm",linewidth=2)
plt.show()
###Output
_____no_output_____
###Markdown
Data processing
###Code
# Get all the columns from the dataFrame
columns = data.columns.tolist()
# print(columns)
print("shape of data",data.shape)
print()
# Filter the columns to remove data we do not want
columns = [c for c in columns if c not in ["Class"]] # remove class columns bcoz we want to target with Class so
# print(columns)
# Store the variable we'll be predicting on
target = "Class"
X = data[columns] # all the columns data there except class
Y = data[target] # only Class columns data there
# Print shapes
print("X shape : ",X.shape)
print("Y shape : ",Y.shape)
###Output
shape of data (28481, 31)
X shape : (28481, 30)
Y shape : (28481,)
###Markdown
Split data into Train n test datset
###Code
# split data into 80% train and 20% test data
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=1) #0.2 means 20% test n 80% train data
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
X_train.head()
X_test.head()
y_train.head()
y_test.head()
###Output
_____no_output_____
###Markdown
Feature Scaling Converting different units and magnitude data in one unit.
###Code
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
X_train_sc=sc.fit_transform(X_train) # convert all data into float data type
X_test_sc=sc.transform(X_test)
X_test_sc.dtype
###Output
_____no_output_____
###Markdown
Machine Learning Model Building We have clean data to build the Ml model. But which Machine learning algorithm is best for the data we have to find. The output is a categorical format so we will use supervised classification machine learning algorithms. To build the best model, we have to train and test the dataset with multiple Machine Learning algorithms then we can find the best ML model. import packages
###Code
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
###Output
_____no_output_____
###Markdown
1. Support vector Classifier
###Code
from sklearn.svm import SVC
svc_classifier=SVC()
svc_classifier.fit(X_train,y_train)
y_pred_svc=svc_classifier.predict(X_test)
accuracy_score(y_test,y_pred_svc)
# Trained With Standard Sclaer data
svc_clf_sc=SVC()
svc_clf_sc.fit(X_train_sc,y_train)
y_pred_svc_sc=svc_clf_sc.predict(X_test_sc)
accuracy_score(y_test,y_pred_svc_sc)
###Output
_____no_output_____
###Markdown
2. Logistic Regression
###Code
from sklearn.linear_model import LogisticRegression
lr_clf=LogisticRegression(random_state=1,penalty="l2")
lr_clf.fit(X_train,y_train)
y_pred_lr=lr_clf.predict(X_test)
accuracy_score(y_test,y_pred_lr)
## trained with Standard Scalar data
lr_clf_sc=LogisticRegression(random_state=1,penalty="l2")
lr_clf_sc.fit(X_train_sc,y_train)
y_pred_lr_sc=lr_clf_sc.predict(X_test_sc)
accuracy_score(y_test,y_pred_lr_sc)
###Output
_____no_output_____
###Markdown
3. K-Nearest Neighbors Classifier
###Code
# K-Nearest Neighbour Classifier
from sklearn.neighbors import KNeighborsClassifier
knn_clf=KNeighborsClassifier(n_neighbors=3,metric="minkowski",p=1)
knn_clf.fit(X_train,y_train)
y_pred_knn=knn_clf.predict(X_test)
accuracy_score(y_test,y_pred_knn)
# Train with Standard scalar data
knn_clf_sc=KNeighborsClassifier(n_neighbors=3,metric="minkowski",p=1)
knn_clf_sc.fit(X_train_sc,y_train)
y_pred_knn_sc=knn_clf_sc.predict(X_test_sc)
accuracy_score(y_test,y_pred_knn_sc)
###Output
_____no_output_____
###Markdown
4. Naive bayes Classifier
###Code
# Naive Bayes Classifier
from sklearn.naive_bayes import GaussianNB
nb_clf=GaussianNB()
nb_clf.fit(X_train,y_train)
y_pred_nb=nb_clf.predict(X_test)
accuracy_score(y_test,y_pred_nb)
# train with Standard Scalar
nb_clf_sc=GaussianNB()
nb_clf_sc.fit(X_train_sc,y_train)
y_pred_nb_sc=nb_clf_sc.predict(X_test_sc)
accuracy_score(y_test,y_pred_nb_sc)
###Output
_____no_output_____
###Markdown
5. Decision Tree Classifier
###Code
# Decision tree Classifier
from sklearn.tree import DecisionTreeClassifier
dt_clf=DecisionTreeClassifier(criterion="entropy",random_state=5)
dt_clf.fit(X_train,y_train)
y_pred_dt=dt_clf.predict(X_test)
accuracy_score(y_test,y_pred_dt)
# train with Standard Scalar
dt_clf_sc=DecisionTreeClassifier(criterion="entropy",random_state=5)
dt_clf_sc.fit(X_train_sc,y_train)
y_pred_dt_sc=dt_clf_sc.predict(X_test_sc)
accuracy_score(y_test,y_pred_dt_sc)
###Output
_____no_output_____
###Markdown
6. Random Forest Classifier
###Code
# Random forest classifier
from sklearn.ensemble import RandomForestClassifier
rf_clf=RandomForestClassifier(n_estimators=20,criterion="entropy",random_state=5)
rf_clf.fit(X_train,y_train)
y_pred_rf=rf_clf.predict(X_test)
accuracy_score(y_test,y_pred_rf)
# train with standard Scalar
rf_clf_sc=RandomForestClassifier(n_estimators=20,criterion="entropy",random_state=5)
rf_clf_sc.fit(X_train_sc,y_train)
y_pred_rf_sc=rf_clf_sc.predict(X_test_sc)
accuracy_score(y_test,y_pred_rf_sc)
###Output
_____no_output_____
###Markdown
7. AdaBoost Classifier
###Code
# Adaboost classifier
from sklearn.ensemble import AdaBoostClassifier
abd_clf=AdaBoostClassifier(DecisionTreeClassifier(criterion="entropy",random_state=20),
n_estimators=200,
learning_rate=0.1,
algorithm="SAMME.R",
random_state=1, )
abd_clf.fit(X_train,y_train)
y_pred_abd=abd_clf.predict(X_test)
accuracy_score(y_test,y_pred_abd)
# Train with Standard Scalar
abd_clf_sc=AdaBoostClassifier(DecisionTreeClassifier(criterion="entropy",random_state=20),
n_estimators=200,
learning_rate=0.1,
algorithm="SAMME.R",
random_state=1,)
abd_clf_sc.fit(X_train_sc,y_train)
y_pred_abd_sc=abd_clf_sc.predict(X_test_sc)
accuracy_score(y_test,y_pred_abd_sc)
###Output
_____no_output_____
###Markdown
8. XGBoost Classifier
###Code
from xgboost import XGBClassifier
xgb_clf=XGBClassifier()
xgb_clf.fit(X_train,y_train)
y_pred_xgb=xgb_clf.predict(X_test)
accuracy_score(y_test,y_pred_xgb)
# Train with Standard Scalar
xgb_clf_sc=XGBClassifier()
xgb_clf_sc.fit(X_train_sc,y_train)
y_pred_xgb_sc=xgb_clf_sc.predict(X_test_sc)
accuracy_score(y_test,y_pred_xgb_sc)
###Output
_____no_output_____
###Markdown
XGBoost Parameter Tuning Ramdomized Search
###Code
# XGBoost classifier most required parameters
params={
"learning_rate" : [0.05, 0.10, 0.15, 0.20, 0.25, 0.30 ] ,
"max_depth" : [ 3, 4, 5, 6, 8, 10, 12, 15],
"min_child_weight" : [ 1, 3, 5, 7 ],
"gamma" : [ 0.0, 0.1, 0.2 , 0.3, 0.4 ],
"colsample_bytree" : [ 0.3, 0.4, 0.5 , 0.7 ]
}
# Randomized Search
from sklearn.model_selection import RandomizedSearchCV
random_search=RandomizedSearchCV(xgb_clf,param_distributions=params,scoring='roc_auc', n_jobs=-1,verbose=3)
random_search.fit(X_train,y_train)
random_search.best_params_
random_search.best_estimator_
# Training XGBoost Classifier with best parameters
xgb_classifier_pt = XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bynode=1, colsample_bytree=0.4, gamma=0.2,
learning_rate=0.1, max_delta_step=0, max_depth=15,
min_child_weight=1, missing=None, n_estimators=100, n_jobs=1,
nthread=None, objective='binary:logistic', random_state=0,
reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,
silent=None, subsample=1, verbosity=1)
xgb_classifier_pt.fit(X_train,y_train)
y_pred_xgb_pt=xgb_classifier_pt.predict(X_test)
accuracy_score(y_test,y_pred_xgb_pt)
###Output
_____no_output_____
###Markdown
Confusion Matrix
###Code
cm=confusion_matrix(y_test,y_pred_xgb_pt)
cm
plt.title("heatmap of confusion matrix",fontsize=20)
sns.heatmap(cm,annot=True,cmap="coolwarm")
plt.show()
###Output
_____no_output_____
###Markdown
Classification report of model
###Code
print(classification_report(y_test,y_pred_xgb_pt))
###Output
precision recall f1-score support
0 1.00 1.00 1.00 5691
1 0.83 0.83 0.83 6
accuracy 1.00 5697
macro avg 0.92 0.92 0.92 5697
weighted avg 1.00 1.00 1.00 5697
###Markdown
Define Xgb_model_pt2
###Code
# create xgb_model_pt2 estimator
xgb_model_pt2=XGBClassifier()
xgb_model_pt2.fit(X_train,y_train)
###Output
_____no_output_____
###Markdown
Cross-validation of the ML model
###Code
# Cross validation
from sklearn.model_selection import cross_val_score
cross_validation = cross_val_score(estimator = xgb_model_pt2, X = X_train_sc, y = y_train, cv = 10)
# print("Cross validation of XGBoost model = ",cross_validation)
print("Cross validation of XGBoost model (in mean) = ",cross_validation.mean())
from sklearn.model_selection import cross_val_score
cross_validation = cross_val_score(estimator = xgb_classifier_pt, X = X_train_sc,y = y_train, cv = 10)
print("Cross validation accuracy of XGBoost model = ", cross_validation)
print("\nCross validation mean accuracy of XGBoost model = ", cross_validation.mean())
###Output
Cross validation of XGBoost model (in mean) = 0.9990783120764041
Cross validation accuracy of XGBoost model = [0.99868363 1. 0.99956121 0.99868363 0.99912204 0.99868306
0.99956102 1. 0.99912204 0.99956102]
Cross validation mean accuracy of XGBoost model = 0.99929776433374
###Markdown
Save as Pickle
###Code
## pickle
import pickle
# Save model
pickle.dump(xgb_classifier_pt,open('CreditCard_fraud.pickle','wb'))
# load model
breast_cancer_detector_model=pickle.load(open('CreditCard_fraud.pickle','rb'))
# predict the output
y_pred=breast_cancer_detector_model.predict(X_test)
# Confusion matrix
print("Confusion matrix of XGBoost model: \n",confusion_matrix(y_test,y_pred),'\n')
# show the accuracy
print("Accuracy of XGBoost model= ",accuracy_score(y_test,y_pred))
###Output
Confusion matrix of XGBoost model:
[[5690 1]
[ 1 5]]
Accuracy of XGBoost model= 0.9996489380375636
###Markdown
We got a accuracy **99.96%** with XGBoost model --- 3. Unsupervised Outlier DetectionNow that we have processed our data, we can begin deploying our machine learning algorithms. We will use the following techniques: **Local Outlier Factor (LOF)**The anomaly score of each sample is called Local Outlier Factor. It measures the local deviation of density of a given sample with respect to its neighbors. It is local in that the anomaly score depends on how isolated the object is with respect to the surrounding neighborhood.**Isolation Forest Algorithm**The IsolationForest ‘isolates’ observations by randomly selecting a feature and then randomly selecting a split value between the maximum and minimum values of the selected feature.Since recursive partitioning can be represented by a tree structure, the number of splittings required to isolate a sample is equivalent to the path length from the root node to the terminating node.This path length, averaged over a forest of such random trees, is a measure of normality and our decision function.Random partitioning produces noticeably shorter paths for anomalies. Hence, when a forest of random trees collectively produce shorter path lengths for particular samples, they are highly likely to be anomalies.
###Code
from sklearn.metrics import classification_report, accuracy_score
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
# define random states
state = 1
# define outlier detection tools to be compared
classifiers = {
"Isolation Forest": IsolationForest(max_samples=len(X),
contamination=outlier_fraction,
random_state=state),
"Local Outlier Factor": LocalOutlierFactor(
n_neighbors=20,
contamination=outlier_fraction)}
###Output
_____no_output_____
###Markdown
Fit the model
###Code
plt.figure(figsize=(9, 7))
n_outliers = len(Fraud)
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
if clf_name == "Local Outlier Factor":
y_pred = clf.fit_predict(X)
scores_pred = clf.negative_outlier_factor_
else:
clf.fit(X)
scores_pred = clf.decision_function(X)
y_pred = clf.predict(X)
# Reshape the prediction values to 0 for valid, 1 for fraud.
y_pred[y_pred == 1] = 0
y_pred[y_pred == -1] = 1
n_errors = (y_pred != Y).sum()
# Run classification metrics
print('{}: {}'.format(clf_name, n_errors))
print("Accuracy score : ",accuracy_score(Y, y_pred))
print()
print(classification_report(Y, y_pred))
###Output
Isolation Forest: 71
Accuracy score : 0.99750711000316
precision recall f1-score support
0 1.00 1.00 1.00 28432
1 0.28 0.29 0.28 49
accuracy 1.00 28481
macro avg 0.64 0.64 0.64 28481
weighted avg 1.00 1.00 1.00 28481
Local Outlier Factor: 97
Accuracy score : 0.9965942207085425
precision recall f1-score support
0 1.00 1.00 1.00 28432
1 0.02 0.02 0.02 49
accuracy 1.00 28481
macro avg 0.51 0.51 0.51 28481
weighted avg 1.00 1.00 1.00 28481
|
jupyter/riddler_2017_05_09.ipynb | ###Markdown
[Original Post on 538](https://fivethirtyeight.com/features/who-will-win-the-lucky-derby/)> Lucky Derby> The Kentucky Derby is on Saturday, and a field of 20 horses is slated to run “the fastest two minutes in sports” in pursuit of the right to be draped with a blanket of roses. But let’s consider, instead, the Lucky Derby, where things are a little more bizarre:> The bugle sounds, and 20 horses make their way to the starting gate for the first annual Lucky Derby. These horses, all trained at the mysterious Riddler Stables, are special. Each second, every Riddler-trained horse takes one step. Each step is exactly one meter long. But what these horses exhibit in precision, they lack in sense of direction. Most of the time, their steps are forward (toward the finish line) but the rest of the time they are backward (away from the finish line). As an avid fan of the Lucky Derby, you’ve done exhaustive research on these 20 competitors. You know that Horse One goes forward 52 percent of the time, Horse Two 54 percent of the time, Horse Three 56 percent, and so on, up to the favorite filly, Horse Twenty, who steps forward 90 percent of the time. The horses’ steps are taken independently of one another, and the finish line is 200 meters from the starting gate.> Handicap this race and place your bets! In other words, what are the odds (a percentage is fine) that each horse wins? SolutionWe will be generating random numbers to simulate results of random chance, so import the necessary python module
###Code
from __future__ import print_function, division
import random
###Output
_____no_output_____
###Markdown
First, we need to create objects to track each horse's attributes, and track position, and determine whether the horse has completed the race. This will make it a bit easier to do bookkeeping, although the object-oriented focus isn't strictly necessary.
###Code
class Horse(object):
def __init__(self, forward_chance, race_length):
# Save the forward chance % into the object so we can use it later
self.forward_chance = forward_chance
# Initialize the horse's distance at the beginning of the race
self.distance = 0
# Save the full distance of the race, so we can determine whether this horse has won
self.race_length = race_length
def take_step(self):
# Generate random number and compare against random chance of moving forward
if random.random() <= self.forward_chance:
self.distance += 1
else:
self.distance -= 1
def finished(self):
# Determine whether horse has moved equal or more than the total length of the race
if self.distance >= self.race_length:
return True
else:
return False
###Output
_____no_output_____
###Markdown
With our `Horse` object created, we can now create all the horses, and run a race by simulating each second and waiting until at least one horse has completed the full length of the race.
###Code
def run_race(length, n_horses=20):
timer = 0 # Just for fun, we can keep track of how long it takes this race to complete
horses = [Horse(0.52 + x * 0.02, length) for x in range(n_horses)]
# Run race until at least one horse has completed
while len([h for h in horses if h.finished()]) == 0:
# Move all the horses
for h in horses:
h.take_step()
# Increment the counter
timer += 1
# Once race is complete, print the winning horse and race duration
winner = [h.forward_chance for h in horses if h.finished()]
return winner, timer
winner, timer = run_race(200)
print(winner, timer)
###Output
[0.9] 250
###Markdown
Now that we can simulate one race, we need to just loop a number of times through the same simulation and keep track of who wins each time.
###Code
# Create a dictionary with each horse's forward percentage, and increment the counter when a horse wins
results = {0.52 + x * 0.02: 0 for x in range(20)}
timers = []
for i in range(100000):
winners, timer = run_race(200)
timers.append(timer)
# There could be ties, so we split up a full win between all winners in this case
for w in winners:
results[w] += 1 / len(winners)
# Print out each horse, and the percentage of all races that they won
for k in sorted(results):
print('%.2f: %.3f%%' % (k, results[k] / sum(results.values()) * 100))
###Output
0.52: 0.000%
0.54: 0.000%
0.56: 0.000%
0.58: 0.000%
0.60: 0.000%
0.62: 0.000%
0.64: 0.000%
0.66: 0.000%
0.68: 0.000%
0.70: 0.000%
0.72: 0.000%
0.74: 0.000%
0.76: 0.000%
0.78: 0.002%
0.80: 0.006%
0.82: 0.116%
0.84: 0.856%
0.86: 5.005%
0.88: 21.833%
0.90: 72.183%
###Markdown
So there we have it. Horses with a forward chance < 78% have a less than 1:100,000 shot of winning the race (since we didn't observe a single victory in our 10,000 simulations), while the favored victor has a nearly 3:4 chance to win. Extra MileThe nice thing about having this simulation, is that you can tweak the input variables slightly and see how that alters the outcome. What would the percentages have been if we only had the worst 10 horses involved instead of all 20?
###Code
# Create a dictionary with each horse's forward percentage, and increment the counter when a horse wins
results = {0.52 + x * 0.02: 0 for x in range(10)}
timers = []
for i in range(100000):
winners, timer = run_race(200, n_horses=10)
timers.append(timer)
# There could be ties, so we split up a full win between all winners in this case
for w in winners:
results[w] += 1 / len(winners)
# Print out each horse, and the percentage of all races that they won
for k in sorted(results):
print('%.2f: %.3f%%' % (k, results[k] / sum(results.values()) * 100))
###Output
0.52: 0.000%
0.54: 0.000%
0.56: 0.000%
0.58: 0.000%
0.60: 0.001%
0.62: 0.041%
0.64: 0.667%
0.66: 4.833%
0.68: 22.451%
0.70: 72.006%
###Markdown
The results here are nearly identical to those above, with the top horse winning roughly 72% of the time, the next winning 22% of the time, and the third horse winning 5% of the time, with others winning smaller amounts. It seems as though only the top 6 horses will win more than 1:100,000 times, regardless of the size of the field.Now what happens if we change the length of the race to 50m instead of 200? Because the higher forward percentage should compound as the race gets longer, shortening the challenge might alter the winning percentages. Our simulation can check.
###Code
# Create a dictionary with each horse's forward percentage, and increment the counter when a horse wins
results = {0.52 + x * 0.02: 0 for x in range(20)}
timers = []
for i in range(100000):
winners, timer = run_race(50, n_horses=20)
timers.append(timer)
# There could be ties, so we split up a full win between all winners in this case
for w in winners:
results[w] += 1 / len(winners)
# Print out each horse, and the percentage of all races that they won
for k in sorted(results):
print('%.2f: %.3f%%' % (k, results[k] / sum(results.values()) * 100))
###Output
0.52: 0.000%
0.54: 0.000%
0.56: 0.000%
0.58: 0.000%
0.60: 0.000%
0.62: 0.000%
0.64: 0.000%
0.66: 0.001%
0.68: 0.001%
0.70: 0.006%
0.72: 0.024%
0.74: 0.078%
0.76: 0.231%
0.78: 0.553%
0.80: 1.392%
0.82: 3.212%
0.84: 6.668%
0.86: 13.652%
0.88: 25.802%
0.90: 48.379%
|
FailurePrediction/VariableRotationalSpeed/MachineLearningModels/RandomForest_360_traintest.ipynb | ###Markdown
Random Forest
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.colors import ListedColormap
import matplotlib.patches as mpatches
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.ensemble import RandomForestClassifier
import sklearn.externals
import joblib
df_train = pd.read_csv("statistics_10_train.csv" , sep = ',')
df_test = pd.read_csv("statistics_10_test.csv" , sep = ',')
X_train = df_train[['Kurtosis', 'Impulse factor', 'RMS', 'Margin factor', 'Skewness',
'Shape factor', 'Peak to peak', 'Crest factor']].values
y_train = df_train['Tipo'].values
X_test = df_test[['Kurtosis', 'Impulse factor', 'RMS', 'Margin factor', 'Skewness',
'Shape factor', 'Peak to peak', 'Crest factor']].values
y_test = df_test['Tipo'].values
arr_estimators = range(1, 101)
scores_train = []
scores_test = []
for i in arr_estimators:
randomForest = RandomForestClassifier(random_state=0, n_estimators = i, min_samples_split = 2, min_samples_leaf = 1)
randomForest.fit(X_train, y_train)
scores_train.append(randomForest.score(X_train, y_train))
scores_test.append(randomForest.score(X_test, y_test))
if (i % 10 == 0 or i == 1):
print('----- n trees: ' + str(i) + '----- Accuracy test: ' + str(scores_test[i - 1]) + '-----')
plt.figure()
plt.xlabel('n_trees')
plt.ylabel('Accuracy')
plt.plot(arr_estimators, scores_train, label = 'Train')
plt.plot(arr_estimators, scores_test, label = 'Test')
plt.legend()
randomForest = RandomForestClassifier(random_state=0, n_estimators = 90, min_samples_split = 2, min_samples_leaf = 1)
randomForest.fit(X_train, y_train)
target_names = ['Inner', 'Outer', 'Healthy']
pred = randomForest.predict(X_test)
print(confusion_matrix(y_test, pred))
print(classification_report(y_test, pred, target_names = target_names))
pred_train = randomForest.predict(X_train)
print(confusion_matrix(y_train, pred_train))
print(classification_report(y_train, pred_train, target_names = target_names))
sns.set()
mat = confusion_matrix(y_test, pred)
fig, ax = plt.subplots(figsize=(7,6))
sns.set(font_scale=1.3)
sns.heatmap(mat.T, square=False, annot=True, fmt='d', cbar=False,
xticklabels=['Inner', 'Outer', 'Healthy'],
yticklabels=['Inner', 'Outer', 'Healthy'],
cmap=sns.cubehelix_palette(light=1, as_cmap=True))
plt.xlabel('True label');
plt.ylabel('Predicted label');
joblib.dump(randomForest, 'randomForest_traintest_trained.pkl') # Guardo el modelo.
###Output
_____no_output_____ |
Analysis/GridGratingDrawing/2021-05-12-GridGratingDrawing.ipynb | ###Markdown
We depend on this sync_lib library that is one folder above
###Code
import sys
sys.path.append('../')
from sync_lib import Dataset
import matplotlib.pylab as plt
import numpy as np
###Output
_____no_output_____
###Markdown
Path to relevant sync file
###Code
path = "2021T134531.h5"
dset = Dataset(path)
###Output
/Users/jeromel/anaconda2/envs/deep_work/lib/python3.6/site-packages/h5py/_hl/dataset.py:313: H5pyDeprecationWarning: dataset.value has been deprecated. Use dataset[()] instead.
"Use dataset[()] instead.", H5pyDeprecationWarning)
###Markdown
Here we plot the period of stimulus rendering, driving photo-diode and photo-diode response time after screen flipping
###Code
# This is the fastest output from bonsai to digital line
times_bonsai_fast_sync = dset.get_rising_edges('vsync_stim', units='sec')
# This is the driving signal behind the photodiode
times_bonsai_driving_photodiode = dset.get_rising_edges(
'stim_running', units='sec')
times_photodiode = dset.get_rising_edges('stim_photodiode', units='sec')
plt.subplot(3, 1, 1)
plt.plot(times_bonsai_fast_sync[2:], np.diff(np.diff(times_bonsai_fast_sync)))
plt.ylabel('Period (s)')
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.title('Stimulus rendering')
plt.subplot(3, 1, 2)
plt.plot(times_bonsai_driving_photodiode[1:], np.diff(
times_bonsai_driving_photodiode))
plt.ylabel('Period (s)')
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.title('Driving photo diode signal')
plt.subplot(3, 1, 3)
y_axis_data = np.diff(
times_photodiode)
plt.plot(times_photodiode[1:], y_axis_data)
plt.xlabel('Time from start (s)')
plt.ylabel('Period (s)')
plt.title('Measured photo diode signal')
plt.gca().spines['right'].set_visible(False)
plt.gca().spines['top'].set_visible(False)
plt.tight_layout()
plt.ylim([0,2])
###Output
_____no_output_____
###Markdown
We want to replicate the plot from BonVisin paper on Frames/second vs Number of elements. For this we need to extract each different section of the grid array stim
###Code
times_photodiode.shape[0]
###Output
_____no_output_____
###Markdown
This is coming from the bonsai workflow
###Code
grid_size = np.array([1,2,3,4,6,8,12,16,24,32,48,64])
nb_flips_per_grid = 24
list_periods = []
local_list = []
previous_time = []
for index, local_time in enumerate(times_photodiode):
if not(previous_time==[]):
local_period = local_time-previous_time
if local_period<2:
local_list.append(local_period)
previous_time = local_time
else:
list_periods.append(np.mean(local_list))
local_list = []
previous_time = []
else:
previous_time = local_time
plt.plot([str(x**2) for x in grid_size], 1/np.array(list_periods), 'r')
plt.xlabel('Number of gratings displayed')
plt.ylabel('Frames / second')
plt.savefig('2021-05-12-BonVision_grating_replication.png')
###Output
ipykernel_launcher:6: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.
|
Invoice_to_BigQuery.ipynb | ###Markdown
 Document AI : Saving Invoice to BigQuery Prerequisites
###Code
# !sudo apt-get install -y poppler-utils # need for pdfinfo command
!cat requirements.txt
# !pip install -r requirements.txt
from google.cloud import documentai_v1beta3 as documentai
from google.cloud import bigquery
from wand.image import Image as WImage
import pandas as pd
PROJECT_ID = 'doc-ai-ce'
PROCESSOR_ID = 'e38c82311d145f3b'
LOCATION = 'us'
###Output
_____no_output_____
###Markdown
Download Invoice More Sample Invoices: https://github.com/GoogleCloudPlatform/documentai-notebooks/tree/master/resources/procurement/invoices
###Code
!mkdir -p pdf_samples
!gsutil cp gs://cloud-samples-data/documentai/*invoice* ./pdf_samples/
INVOICE_PDF = './pdf_samples/fake_invoice.pdf'
WImage(filename=INVOICE_PDF, resolution=70)
###Output
_____no_output_____
###Markdown
Process Invoice 1. Call Document AI
###Code
%%time
processor_name = f'projects/{PROJECT_ID}/locations/{LOCATION}/processors/{PROCESSOR_ID}'
with open(INVOICE_PDF, 'rb') as image:
document = {'content': image.read(), 'mime_type': 'application/pdf'}
request = {'name': processor_name, 'document': document}
results = documentai.DocumentProcessorServiceClient().process_document(request=request)
###Output
CPU times: user 32.8 ms, sys: 8.24 ms, total: 41 ms
Wall time: 2.53 s
###Markdown
2. Gather Entities
###Code
results_frame = [[entity.type_, entity.mention_text, round(entity.confidence, 4)] for entity in results.document.entities]
df = pd.DataFrame(results_frame, columns=['type', 'value','confidence'])
df
###Output
_____no_output_____
###Markdown
3. Transform Data
###Code
df_t = df.rename(columns={'type':'index'}).drop(columns=['confidence']).T
df_t.columns = df_t.iloc[0]
df_t = df_t.drop(df_t.index[0])
df_t = df_t.reset_index(drop=True)
df_t = df_t[['invoice_id','purchase_order','due_date'] + [col for col in df_t.columns if '_amount' in col]]
# transform date column
df_t['due_date'] = pd.to_datetime(df_t['due_date'])
# transform amount columns
for num_col in [col for col in df_t.columns if '_amount' in col]:
df_t[num_col] = pd.to_numeric(df_t[num_col].replace({'\$':'', ',':''}, regex = True))
keeper_cols = ['invoice_id', 'purchase_order', 'due_date', 'total_tax_amount', 'freight_amount', 'net_amount', 'total_amount']
df_t = df_t[keeper_cols]
df_t
###Output
_____no_output_____
###Markdown
Save to BigQuery 1. Create BigQuery Dataset Instructions and Pics HereLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. 2. Insert Invoice into BigQuery
###Code
DATASET = 'dai'
TABLE = 'invoice'
bq = bigquery.Client(project=PROJECT_ID)
bq.delete_table(f'{DATASET}.{TABLE}', not_found_ok=True)
schema=[bigquery.SchemaField('invoice_id', 'STRING'),
bigquery.SchemaField('purchase_order', 'STRING'),
bigquery.SchemaField('due_date', 'TIMESTAMP'),
bigquery.SchemaField('total_tax_amount', 'FLOAT'),
bigquery.SchemaField('freight_amount', 'FLOAT'),
bigquery.SchemaField('net_amount', 'FLOAT'),
bigquery.SchemaField('total_amount', 'FLOAT')]
job_config = bigquery.LoadJobConfig(schema=schema)
job = bq.load_table_from_dataframe(df_t, f'{DATASET}.{TABLE}', job_config=job_config)
job.result().state
###Output
_____no_output_____
###Markdown
3. Verify Data query back the newly inserted datahttps://pantheon.corp.google.com/bigquery?project=doc-ai-ce&p=doc-ai-ce&d=dai&t=invoice&page=table
###Code
bq.query(f'''
SELECT
invoice_id,
purchase_order,
cast(due_date AS DATE) due_date,
net_amount,
total_amount,
total_tax_amount,
freight_amount
FROM
{DATASET}.{TABLE}''').to_dataframe()
###Output
_____no_output_____
###Markdown
Another Invoice Now process and save another invoice, all at once.
###Code
INVOICE_PDF = './pdf_samples/invoice.pdf'
WImage(filename=INVOICE_PDF, resolution=70)
with open(INVOICE_PDF, 'rb') as image:
document = {'content': image.read(), 'mime_type': 'application/pdf'}
request = {'name': processor_name, 'document': document}
results = documentai.DocumentProcessorServiceClient().process_document(request=request)
results_frame = [[entity.type_, entity.mention_text, round(entity.confidence, 4)] for entity in results.document.entities]
df = pd.DataFrame(results_frame, columns=['type', 'value','confidence'])
df
df_t = df.rename(columns={'type':'index'}).drop(columns=['confidence']).T
df_t.columns = df_t.iloc[0]
df_t = df_t.drop(df_t.index[0])
df_t = df_t.reset_index(drop=True)
df_t = df_t[['invoice_id','purchase_order','due_date'] + [col for col in df_t.columns if '_amount' in col]]
# transform date column
df_t['due_date'] = pd.to_datetime(df_t['due_date'])
# transform amount columns
for num_col in [col for col in df_t.columns if '_amount' in col]:
df_t[num_col] = pd.to_numeric(df_t[num_col].replace({'\$':'', ',':''}, regex = True))
df_t
bq.insert_rows_from_dataframe(table=f'{DATASET}.{TABLE}', dataframe=df_t, selected_fields = schema)
bq.query(f'''
SELECT
invoice_id,
purchase_order,
cast(due_date AS DATE) due_date,
net_amount,
total_amount,
total_tax_amount,
freight_amount
FROM
{DATASET}.{TABLE}''').to_dataframe()
###Output
_____no_output_____ |
codes/Neural_Prophet_Experiment.ipynb | ###Markdown
데이콘 비트코인 가격 예측 - Prophet
###Code
import os, datetime
import numpy as np
import pandas as pd
from tqdm import tqdm
import IPython
import IPython.display
import matplotlib.pyplot as plt
from neuralprophet import NeuralProphet
import preprocessor, coin_simulation
# modeling programing
def neural_prophet_modeling(input_array):
''' 함수 설명 : prophet fitting & prediction'''
#미래 데이터 저장을 위한 빈 array 생성
valid_pred_array = np.zeros([input_array.shape[0], 120])
error_counter = 0
#모델 돌리기 및 결과 저장
for idx in tqdm(range(input_array.shape[0])):
try:
x_series = input_array[idx,:].reshape(-1)
x_df = prophet_preprocessor(x_series)
model = NeuralProphet(
n_changepoints = 20,
d_hidden = 30,
changepoints_range = 0.95,
num_hidden_layers = 1,
learning_rate = 0.1, epochs=40, batch_size = 32, loss_func="Huber",
seasonality_mode = 'multiplicative',
yearly_seasonality = False, weekly_seasonality = False, daily_seasonality = False,
normalize='off' # Type of normalization ('minmax', 'standardize', 'soft', 'off')
)
model.add_seasonality(name='first_seasonality', period=1/24, fourier_order= 7 )
model.add_seasonality(name='second_seasonality', period=1/12, fourier_order= 15)
metrics = model.fit(x_df, freq="min")
future = model.make_future_dataframe(x_df, periods=120)
forecast = model.predict(future)
valid_pred_array[idx,:] = forecast.yhat1.values[-120:]
IPython.display.clear_output()
except:
error_counter += 1
print(f'Neural Prophet modeling error!')
IPython.display.clear_output()
pass
# clear display
IPython.display.clear_output()
print(f'''
NOTE : {len(input_array)}의 샘플 내 {error_counter}개의 샘플에서 에러가 발생했습니다.\n
Prediction Complete!'
'''
)
return valid_pred_array
def prophet_preprocessor(x_series):
''' 함수 설명 : 빈 x_df 만들기'''
# start time initialization
start_time = '2021-01-01 00:00:00'
start_dt = datetime.datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')
# datafram 만들기
x_df = pd.DataFrame()
# 분당 시간 데이터 시리즈 입력
x_df['ds'] = [start_dt + datetime.timedelta(minutes = time_min) for time_min in np.arange(1, x_series.shape[0]+1).tolist()]
# 가격 데이터 시리즈 입력
x_df['y'] = x_series.tolist()
return x_df
### ------------ Data upload part ---------------- ###
# 데이터가 위치한 폴더 경로
dir_path = './data'
# 파일명 설정
x_file_name, y_file_name = 'train_x_df.csv', 'train_y_df.csv'
x_file_path = os.path.join(dir_path, x_file_name)
y_file_path = os.path.join(dir_path, y_file_name)
#파일 업로드
train_x_df = pd.read_csv(x_file_path)
train_y_df = pd.read_csv(y_file_path)
print("Reading Complete!")
### --------------- Modeling part ---------------- ###
# 데이터 전처리 1 : dafaframe to array
train_x_array = preprocessor.df2d_to_array3d(train_x_df)
train_y_array = preprocessor.df2d_to_array3d(train_y_df)
# 데이터 전처리 2 : 실수 차분(Fractional Differencing)
FFD_train_x_array = preprocessor.FFD_smoothing(train_x_array) #자동으로 383개만 추출 시켜둠.
# 데이터 전처리 2-2 : 비차분 open 데이터 추출
# normal_x_array = train_x_array[:383, :, 1].reshape(383, 1380, 1) # open col is 1
print(1232131)
# 모델링 시작
valid_pred_array = neural_prophet_modeling(FFD_train_x_array)
save_file_name = 'FFD_neural_prophet_result2.csv'
np.savetxt(save_file_name, valid_pred_array, delimiter = ",")
import profit_function
# arguments : pred array, start_idx, increase_rate
valid_submission = profit_function.array_to_submission(valid_pred_array, start_idx = 0, increase_rate = 1.01)
valid_y_array = train_y_array[:383, :, 1]
total_money, total_money_list = profit_function.COIN(y_array=valid_y_array, submission=valid_submission)
print(total_money)
plt.plot(total_money_list)
plt.title(total_money)
plt.show()
###Output
11406.797940622964
|
Copia_de_inference_playground_mp4.ipynb | ###Markdown
SAM: Animation Inference Playground Nueva sección
###Code
import os
os.chdir('/content')
CODE_DIR = 'SAM'
!git clone https://github.com/yuval-alaluf/SAM.git $CODE_DIR
!wget https://github.com/ninja-build/ninja/releases/download/v1.8.2/ninja-linux.zip
!sudo unzip ninja-linux.zip -d /usr/local/bin/
!sudo update-alternatives --install /usr/bin/ninja ninja /usr/local/bin/ninja 1 --force
os.chdir(f'./{CODE_DIR}')
from argparse import Namespace
import os
import sys
import pprint
import numpy as np
from PIL import Image
import torch
import torchvision.transforms as transforms
sys.path.append(".")
sys.path.append("..")
from datasets.augmentations import AgeTransformer
from utils.common import tensor2im
from models.psp import pSp
EXPERIMENT_TYPE = 'ffhq_aging'
###Output
_____no_output_____
###Markdown
Step 1: Download Pretrained ModelAs part of this repository, we provide our pretrained aging model.We'll download the model for the selected experiments as save it to the folder `../pretrained_models`.
###Code
def get_download_model_command(file_id, file_name):
""" Get wget download command for downloading the desired model and save to directory ../pretrained_models. """
current_directory = os.getcwd()
save_path = os.path.join(os.path.dirname(current_directory), "pretrained_models")
if not os.path.exists(save_path):
os.makedirs(save_path)
url = r"""wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id={FILE_ID}' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id={FILE_ID}" -O {SAVE_PATH}/{FILE_NAME} && rm -rf /tmp/cookies.txt""".format(FILE_ID=file_id, FILE_NAME=file_name, SAVE_PATH=save_path)
return url
MODEL_PATHS = {
"ffhq_aging": {"id": "1XyumF6_fdAxFmxpFcmPf-q84LU_22EMC", "name": "sam_ffhq_aging.pt"}
}
path = MODEL_PATHS[EXPERIMENT_TYPE]
download_command = get_download_model_command(file_id=path["id"], file_name=path["name"])
!wget {download_command}
###Output
--2021-02-16 05:34:10-- http://wget/
Resolving wget (wget)... failed: Name or service not known.
wget: unable to resolve host address ‘wget’
--2021-02-16 05:34:10-- https://docs.google.com/uc?export=download&confirm=i59R&id=1XyumF6_fdAxFmxpFcmPf-q84LU_22EMC
Resolving docs.google.com (docs.google.com)... 74.125.20.138, 74.125.20.101, 74.125.20.100, ...
Connecting to docs.google.com (docs.google.com)|74.125.20.138|:443... connected.
HTTP request sent, awaiting response... 302 Moved Temporarily
Location: https://doc-08-7g-docs.googleusercontent.com/docs/securesc/3288s4o1us02iiims1id6qrftql3lq11/r12uc4jdkq5fsoice2hhek2qiuko5ap2/1613453625000/05457687429326987275/17328170664508509099Z/1XyumF6_fdAxFmxpFcmPf-q84LU_22EMC?e=download [following]
--2021-02-16 05:34:10-- https://doc-08-7g-docs.googleusercontent.com/docs/securesc/3288s4o1us02iiims1id6qrftql3lq11/r12uc4jdkq5fsoice2hhek2qiuko5ap2/1613453625000/05457687429326987275/17328170664508509099Z/1XyumF6_fdAxFmxpFcmPf-q84LU_22EMC?e=download
Resolving doc-08-7g-docs.googleusercontent.com (doc-08-7g-docs.googleusercontent.com)... 74.125.20.132, 2607:f8b0:400e:c07::84
Connecting to doc-08-7g-docs.googleusercontent.com (doc-08-7g-docs.googleusercontent.com)|74.125.20.132|:443... connected.
HTTP request sent, awaiting response... 302 Found
Location: https://docs.google.com/nonceSigner?nonce=fo2q0ci0tcjhs&continue=https://doc-08-7g-docs.googleusercontent.com/docs/securesc/3288s4o1us02iiims1id6qrftql3lq11/r12uc4jdkq5fsoice2hhek2qiuko5ap2/1613453625000/05457687429326987275/17328170664508509099Z/1XyumF6_fdAxFmxpFcmPf-q84LU_22EMC?e%3Ddownload&hash=6laq9m5irdgrnvl37mlmpjq7l3ibvem2 [following]
--2021-02-16 05:34:10-- https://docs.google.com/nonceSigner?nonce=fo2q0ci0tcjhs&continue=https://doc-08-7g-docs.googleusercontent.com/docs/securesc/3288s4o1us02iiims1id6qrftql3lq11/r12uc4jdkq5fsoice2hhek2qiuko5ap2/1613453625000/05457687429326987275/17328170664508509099Z/1XyumF6_fdAxFmxpFcmPf-q84LU_22EMC?e%3Ddownload&hash=6laq9m5irdgrnvl37mlmpjq7l3ibvem2
Connecting to docs.google.com (docs.google.com)|74.125.20.138|:443... connected.
HTTP request sent, awaiting response... 302 Found
Location: https://doc-08-7g-docs.googleusercontent.com/docs/securesc/3288s4o1us02iiims1id6qrftql3lq11/r12uc4jdkq5fsoice2hhek2qiuko5ap2/1613453625000/05457687429326987275/17328170664508509099Z/1XyumF6_fdAxFmxpFcmPf-q84LU_22EMC?e=download&nonce=fo2q0ci0tcjhs&user=17328170664508509099Z&hash=ovsniob71v7eck566tdnv991rrg23491 [following]
--2021-02-16 05:34:10-- https://doc-08-7g-docs.googleusercontent.com/docs/securesc/3288s4o1us02iiims1id6qrftql3lq11/r12uc4jdkq5fsoice2hhek2qiuko5ap2/1613453625000/05457687429326987275/17328170664508509099Z/1XyumF6_fdAxFmxpFcmPf-q84LU_22EMC?e=download&nonce=fo2q0ci0tcjhs&user=17328170664508509099Z&hash=ovsniob71v7eck566tdnv991rrg23491
Connecting to doc-08-7g-docs.googleusercontent.com (doc-08-7g-docs.googleusercontent.com)|74.125.20.132|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: unspecified [application/x-zip]
Saving to: ‘/content/pretrained_models/sam_ffhq_aging.pt’
/content/pretrained [ <=> ] 2.11G 77.6MB/s in 29s
2021-02-16 05:34:39 (75.2 MB/s) - ‘/content/pretrained_models/sam_ffhq_aging.pt’ saved [2270547237]
FINISHED --2021-02-16 05:34:39--
Total wall clock time: 30s
Downloaded: 1 files, 2.1G in 29s (75.2 MB/s)
###Markdown
Step 3: Define Inference Parameters Below we have a dictionary defining parameters such as the path to the pretrained model to use and the path to theimage to perform inference on.While we provide default values to run this script, feel free to change as needed.
###Code
EXPERIMENT_DATA_ARGS = {
"ffhq_aging": {
"model_path": "../pretrained_models/sam_ffhq_aging.pt",
"transform": transforms.Compose([
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
}
}
EXPERIMENT_ARGS = EXPERIMENT_DATA_ARGS[EXPERIMENT_TYPE]
###Output
_____no_output_____
###Markdown
Step 4: Load Pretrained ModelWe assume that you have downloaded the pretrained aging model and placed it in the path defined above.
###Code
model_path = EXPERIMENT_ARGS['model_path']
ckpt = torch.load(model_path, map_location='cpu')
opts = ckpt['opts']
pprint.pprint(opts)
# update the training options
opts['checkpoint_path'] = model_path
opts = Namespace(**opts)
net = pSp(opts)
net.eval()
net.cuda()
print('Model successfully loaded!')
###Output
Loading SAM from checkpoint: ../pretrained_models/sam_ffhq_aging.pt
Model successfully loaded!
###Markdown
Utils for Generating MP4
###Code
import imageio
from tqdm import tqdm
import matplotlib
matplotlib.use('module://ipykernel.pylab.backend_inline')
%matplotlib inline
def generate_mp4(out_name, images, kwargs):
writer = imageio.get_writer(out_name + '.mp4', **kwargs)
for image in images:
writer.append_data(image)
writer.close()
def run_on_batch_to_vecs(inputs, net):
_, result_batch = net(inputs.to("cuda").float(), return_latents=True, randomize_noise=False, resize=False)
return result_batch.cpu()
def get_result_from_vecs(vectors_a, vectors_b, alpha):
results = []
for i in range(len(vectors_a)):
cur_vec = vectors_b[i] * alpha + vectors_a[i] * (1 - alpha)
res = net(cur_vec.cuda(), randomize_noise=False, input_code=True, input_is_full=True, resize=False)
results.append(res[0])
return results
SEED = 42
np.random.seed(SEED)
img_transforms = EXPERIMENT_ARGS['transform']
n_transition = 25
kwargs = {'fps': 40}
save_path = "notebooks/animations"
os.makedirs(save_path, exist_ok=True)
#################################################################
# TODO: define your image paths here to be fed into the model
#################################################################
root_dir = 'notebooks/images'
ims = ['866', '1287', '2468']
im_paths = [os.path.join(root_dir, im) + '.jpg' for im in ims]
# NOTE: Please make sure the images are pre-aligned!
target_ages = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 90, 80, 70, 60, 50, 40, 30, 20, 10, 0]
age_transformers = [AgeTransformer(target_age=age) for age in target_ages]
for image_path in im_paths:
image_name = os.path.basename(image_path)
print(f'Working on image: {image_name}')
original_image = Image.open(image_path).convert("RGB")
input_image = img_transforms(original_image)
all_vecs = []
for idx, age_transformer in enumerate(age_transformers):
input_age_batch = [age_transformer(input_image.cpu()).to('cuda')]
input_age_batch = torch.stack(input_age_batch)
# get latent vector for the current target age amount
with torch.no_grad():
result_vec = run_on_batch_to_vecs(input_age_batch, net)
result_image = get_result_from_vecs([result_vec],result_vec,0)[0]
all_vecs.append([result_vec])
images = []
for i in range(1, len(target_ages)):
alpha_vals = np.linspace(0, 1, n_transition).tolist()
for alpha in tqdm(alpha_vals):
result_image = get_result_from_vecs(all_vecs[i-1], all_vecs[i], alpha)[0]
output_im = tensor2im(result_image)
images.append(np.array(output_im))
animation_path = os.path.join(save_path, f"{image_name}_animation")
generate_mp4(animation_path, images, kwargs)
###Output
Working on image: 866.jpg
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.